Merge branch 'nasm-2.14.xx'
[nasm.git] / asm / assemble.c
blobda58d1284749be25558a94c9c8154bcfdbf3dd5c
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2018 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * assemble.c code generation for the Netwide Assembler
37 * Bytecode specification
38 * ----------------------
41 * Codes Mnemonic Explanation
43 * \0 terminates the code. (Unless it's a literal of course.)
44 * \1..\4 that many literal bytes follow in the code stream
45 * \5 add 4 to the primary operand number (b, low octdigit)
46 * \6 add 4 to the secondary operand number (a, middle octdigit)
47 * \7 add 4 to both the primary and the secondary operand number
48 * \10..\13 a literal byte follows in the code stream, to be added
49 * to the register value of operand 0..3
50 * \14..\17 the position of index register operand in MIB (BND insns)
51 * \20..\23 ib a byte immediate operand, from operand 0..3
52 * \24..\27 ib,u a zero-extended byte immediate operand, from operand 0..3
53 * \30..\33 iw a word immediate operand, from operand 0..3
54 * \34..\37 iwd select between \3[0-3] and \4[0-3] depending on 16/32 bit
55 * assembly mode or the operand-size override on the operand
56 * \40..\43 id a long immediate operand, from operand 0..3
57 * \44..\47 iwdq select between \3[0-3], \4[0-3] and \5[4-7]
58 * depending on the address size of the instruction.
59 * \50..\53 rel8 a byte relative operand, from operand 0..3
60 * \54..\57 iq a qword immediate operand, from operand 0..3
61 * \60..\63 rel16 a word relative operand, from operand 0..3
62 * \64..\67 rel select between \6[0-3] and \7[0-3] depending on 16/32 bit
63 * assembly mode or the operand-size override on the operand
64 * \70..\73 rel32 a long relative operand, from operand 0..3
65 * \74..\77 seg a word constant, from the _segment_ part of operand 0..3
66 * \1ab a ModRM, calculated on EA in operand a, with the spare
67 * field the register value of operand b.
68 * \172\ab the register number from operand a in bits 7..4, with
69 * the 4-bit immediate from operand b in bits 3..0.
70 * \173\xab the register number from operand a in bits 7..4, with
71 * the value b in bits 3..0.
72 * \174..\177 the register number from operand 0..3 in bits 7..4, and
73 * an arbitrary value in bits 3..0 (assembled as zero.)
74 * \2ab a ModRM, calculated on EA in operand a, with the spare
75 * field equal to digit b.
77 * \240..\243 this instruction uses EVEX rather than REX or VEX/XOP, with the
78 * V field taken from operand 0..3.
79 * \250 this instruction uses EVEX rather than REX or VEX/XOP, with the
80 * V field set to 1111b.
82 * EVEX prefixes are followed by the sequence:
83 * \cm\wlp\tup where cm is:
84 * cc 00m mmm
85 * c = 2 for EVEX and mmmm is the M field (EVEX.P0[3:0])
86 * and wlp is:
87 * 00 wwl lpp
88 * [l0] ll = 0 (.128, .lz)
89 * [l1] ll = 1 (.256)
90 * [l2] ll = 2 (.512)
91 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
93 * [w0] ww = 0 for W = 0
94 * [w1] ww = 1 for W = 1
95 * [wig] ww = 2 for W don't care (always assembled as 0)
96 * [ww] ww = 3 for W used as REX.W
98 * [p0] pp = 0 for no prefix
99 * [60] pp = 1 for legacy prefix 60
100 * [f3] pp = 2
101 * [f2] pp = 3
103 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
104 * (compressed displacement encoding)
106 * \254..\257 id,s a signed 32-bit operand to be extended to 64 bits.
107 * \260..\263 this instruction uses VEX/XOP rather than REX, with the
108 * V field taken from operand 0..3.
109 * \270 this instruction uses VEX/XOP rather than REX, with the
110 * V field set to 1111b.
112 * VEX/XOP prefixes are followed by the sequence:
113 * \tmm\wlp where mm is the M field; and wlp is:
114 * 00 wwl lpp
115 * [l0] ll = 0 for L = 0 (.128, .lz)
116 * [l1] ll = 1 for L = 1 (.256)
117 * [lig] ll = 2 for L don't care (always assembled as 0)
119 * [w0] ww = 0 for W = 0
120 * [w1 ] ww = 1 for W = 1
121 * [wig] ww = 2 for W don't care (always assembled as 0)
122 * [ww] ww = 3 for W used as REX.W
124 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
126 * \271 hlexr instruction takes XRELEASE (F3) with or without lock
127 * \272 hlenl instruction takes XACQUIRE/XRELEASE with or without lock
128 * \273 hle instruction takes XACQUIRE/XRELEASE with lock only
129 * \274..\277 ib,s a byte immediate operand, from operand 0..3, sign-extended
130 * to the operand size (if o16/o32/o64 present) or the bit size
131 * \310 a16 indicates fixed 16-bit address size, i.e. optional 0x67.
132 * \311 a32 indicates fixed 32-bit address size, i.e. optional 0x67.
133 * \312 adf (disassembler only) invalid with non-default address size.
134 * \313 a64 indicates fixed 64-bit address size, 0x67 invalid.
135 * \314 norexb (disassembler only) invalid with REX.B
136 * \315 norexx (disassembler only) invalid with REX.X
137 * \316 norexr (disassembler only) invalid with REX.R
138 * \317 norexw (disassembler only) invalid with REX.W
139 * \320 o16 indicates fixed 16-bit operand size, i.e. optional 0x66.
140 * \321 o32 indicates fixed 32-bit operand size, i.e. optional 0x66.
141 * \322 odf indicates that this instruction is only valid when the
142 * operand size is the default (instruction to disassembler,
143 * generates no code in the assembler)
144 * \323 o64nw indicates fixed 64-bit operand size, REX on extensions only.
145 * \324 o64 indicates 64-bit operand size requiring REX prefix.
146 * \325 nohi instruction which always uses spl/bpl/sil/dil
147 * \326 nof3 instruction not valid with 0xF3 REP prefix. Hint for
148 disassembler only; for SSE instructions.
149 * \330 a literal byte follows in the code stream, to be added
150 * to the condition code value of the instruction.
151 * \331 norep instruction not valid with REP prefix. Hint for
152 * disassembler only; for SSE instructions.
153 * \332 f2i REP prefix (0xF2 byte) used as opcode extension.
154 * \333 f3i REP prefix (0xF3 byte) used as opcode extension.
155 * \334 rex.l LOCK prefix used as REX.R (used in non-64-bit mode)
156 * \335 repe disassemble a rep (0xF3 byte) prefix as repe not rep.
157 * \336 mustrep force a REP(E) prefix (0xF3) even if not specified.
158 * \337 mustrepne force a REPNE prefix (0xF2) even if not specified.
159 * \336-\337 are still listed as prefixes in the disassembler.
160 * \340 resb reserve <operand 0> bytes of uninitialized storage.
161 * Operand 0 had better be a segmentless constant.
162 * \341 wait this instruction needs a WAIT "prefix"
163 * \360 np no SSE prefix (== \364\331)
164 * \361 66 SSE prefix (== \366\331)
165 * \364 !osp operand-size prefix (0x66) not permitted
166 * \365 !asp address-size prefix (0x67) not permitted
167 * \366 operand-size prefix (0x66) used as opcode extension
168 * \367 address-size prefix (0x67) used as opcode extension
169 * \370,\371 jcc8 match only if operand 0 meets byte jump criteria.
170 * jmp8 370 is used for Jcc, 371 is used for JMP.
171 * \373 jlen assemble 0x03 if bits==16, 0x05 if bits==32;
172 * used for conditional jump over longer jump
173 * \374 vsibx|vm32x|vm64x this instruction takes an XMM VSIB memory EA
174 * \375 vsiby|vm32y|vm64y this instruction takes an YMM VSIB memory EA
175 * \376 vsibz|vm32z|vm64z this instruction takes an ZMM VSIB memory EA
178 #include "compiler.h"
180 #include <stdio.h>
181 #include <string.h>
182 #include <stdlib.h>
184 #include "nasm.h"
185 #include "nasmlib.h"
186 #include "error.h"
187 #include "assemble.h"
188 #include "insns.h"
189 #include "tables.h"
190 #include "disp8.h"
191 #include "listing.h"
193 enum match_result {
195 * Matching errors. These should be sorted so that more specific
196 * errors come later in the sequence.
198 MERR_INVALOP,
199 MERR_OPSIZEMISSING,
200 MERR_OPSIZEMISMATCH,
201 MERR_BRNOTHERE,
202 MERR_BRNUMMISMATCH,
203 MERR_MASKNOTHERE,
204 MERR_DECONOTHERE,
205 MERR_BADCPU,
206 MERR_BADMODE,
207 MERR_BADHLE,
208 MERR_ENCMISMATCH,
209 MERR_BADBND,
210 MERR_BADREPNE,
211 MERR_REGSETSIZE,
212 MERR_REGSET,
214 * Matching success; the conditional ones first
216 MOK_JUMP, /* Matching OK but needs jmp_match() */
217 MOK_GOOD /* Matching unconditionally OK */
220 typedef struct {
221 enum ea_type type; /* what kind of EA is this? */
222 int sib_present; /* is a SIB byte necessary? */
223 int bytes; /* # of bytes of offset needed */
224 int size; /* lazy - this is sib+bytes+1 */
225 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
226 int8_t disp8; /* compressed displacement for EVEX */
227 } ea;
229 #define GEN_SIB(scale, index, base) \
230 (((scale) << 6) | ((index) << 3) | ((base)))
232 #define GEN_MODRM(mod, reg, rm) \
233 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
235 static int64_t calcsize(int32_t, int64_t, int, insn *,
236 const struct itemplate *);
237 static int emit_prefix(struct out_data *data, const int bits, insn *ins);
238 static void gencode(struct out_data *data, insn *ins);
239 static enum match_result find_match(const struct itemplate **tempp,
240 insn *instruction,
241 int32_t segment, int64_t offset, int bits);
242 static enum match_result matches(const struct itemplate *, insn *, int bits);
243 static opflags_t regflag(const operand *);
244 static int32_t regval(const operand *);
245 static int rexflags(int, opflags_t, int);
246 static int op_rexflags(const operand *, int);
247 static int op_evexflags(const operand *, int, uint8_t);
248 static void add_asp(insn *, int);
250 static enum ea_type process_ea(operand *, ea *, int, int,
251 opflags_t, insn *, const char **);
253 static inline bool absolute_op(const struct operand *o)
255 return o->segment == NO_SEG && o->wrt == NO_SEG &&
256 !(o->opflags & OPFLAG_RELATIVE);
259 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
261 return ins->prefixes[pos] == prefix;
264 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
266 if (ins->prefixes[pos])
267 nasm_error(ERR_NONFATAL, "invalid %s prefix",
268 prefix_name(ins->prefixes[pos]));
271 static const char *size_name(int size)
273 switch (size) {
274 case 1:
275 return "byte";
276 case 2:
277 return "word";
278 case 4:
279 return "dword";
280 case 8:
281 return "qword";
282 case 10:
283 return "tword";
284 case 16:
285 return "oword";
286 case 32:
287 return "yword";
288 case 64:
289 return "zword";
290 default:
291 return "???";
295 static void warn_overflow(int size)
297 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
298 "%s data exceeds bounds", size_name(size));
301 static void warn_overflow_const(int64_t data, int size)
303 if (overflow_general(data, size))
304 warn_overflow(size);
307 static void warn_overflow_out(int64_t data, int size, enum out_sign sign)
309 bool err;
311 switch (sign) {
312 case OUT_WRAP:
313 err = overflow_general(data, size);
314 break;
315 case OUT_SIGNED:
316 err = overflow_signed(data, size);
317 break;
318 case OUT_UNSIGNED:
319 err = overflow_unsigned(data, size);
320 break;
321 default:
322 panic();
323 break;
326 if (err)
327 warn_overflow(size);
331 * This routine wrappers the real output format's output routine,
332 * in order to pass a copy of the data off to the listing file
333 * generator at the same time, flatten unnecessary relocations,
334 * and verify backend compatibility.
336 static void out(struct out_data *data)
338 static int32_t lineno = 0; /* static!!! */
339 static const char *lnfname = NULL;
340 union {
341 uint8_t b[8];
342 uint64_t q;
343 } xdata;
344 size_t asize, amax;
345 uint64_t zeropad = 0;
346 int64_t addrval;
347 int32_t fixseg; /* Segment for which to produce fixed data */
349 if (!data->size)
350 return; /* Nothing to do */
353 * Convert addresses to RAWDATA if possible
354 * XXX: not all backends want this for global symbols!!!!
356 switch (data->type) {
357 case OUT_ADDRESS:
358 addrval = data->toffset;
359 fixseg = NO_SEG; /* Absolute address is fixed data */
360 goto address;
362 case OUT_RELADDR:
363 addrval = data->toffset - data->relbase;
364 fixseg = data->segment; /* Our own segment is fixed data */
365 goto address;
367 address:
368 nasm_assert(data->size <= 8);
369 asize = data->size;
370 amax = ofmt->maxbits >> 3; /* Maximum address size in bytes */
371 if ((ofmt->flags & OFMT_KEEP_ADDR) == 0 && data->tsegment == fixseg &&
372 data->twrt == NO_SEG) {
373 warn_overflow_out(addrval, asize, data->sign);
374 xdata.q = cpu_to_le64(addrval);
375 data->data = xdata.b;
376 data->type = OUT_RAWDATA;
377 asize = amax = 0; /* No longer an address */
379 break;
381 case OUT_SEGMENT:
382 nasm_assert(data->size <= 8);
383 asize = data->size;
384 amax = 2;
385 break;
387 default:
388 asize = amax = 0; /* Not an address */
389 break;
393 * this call to src_get determines when we call the
394 * debug-format-specific "linenum" function
395 * it updates lineno and lnfname to the current values
396 * returning 0 if "same as last time", -2 if lnfname
397 * changed, and the amount by which lineno changed,
398 * if it did. thus, these variables must be static
401 if (src_get(&lineno, &lnfname))
402 dfmt->linenum(lnfname, lineno, data->segment);
404 if (asize > amax) {
405 if (data->type == OUT_RELADDR || data->sign == OUT_SIGNED) {
406 nasm_error(ERR_NONFATAL,
407 "%u-bit signed relocation unsupported by output format %s",
408 (unsigned int)(asize << 3), ofmt->shortname);
409 } else {
410 nasm_error(ERR_WARNING | ERR_WARN_ZEXTRELOC,
411 "%u-bit %s relocation zero-extended from %u bits",
412 (unsigned int)(asize << 3),
413 data->type == OUT_SEGMENT ? "segment" : "unsigned",
414 (unsigned int)(amax << 3));
416 zeropad = data->size - amax;
417 data->size = amax;
419 lfmt->output(data);
421 if (likely(data->segment != NO_SEG)) {
422 ofmt->output(data);
423 } else {
424 /* Outputting to ABSOLUTE section - only reserve is permitted */
425 if (data->type != OUT_RESERVE) {
426 nasm_error(ERR_NONFATAL, "attempt to assemble code in [ABSOLUTE]"
427 " space");
429 /* No need to push to the backend */
432 data->offset += data->size;
433 data->insoffs += data->size;
435 if (zeropad) {
436 data->type = OUT_ZERODATA;
437 data->size = zeropad;
438 lfmt->output(data);
439 ofmt->output(data);
440 data->offset += zeropad;
441 data->insoffs += zeropad;
442 data->size += zeropad; /* Restore original size value */
446 static inline void out_rawdata(struct out_data *data, const void *rawdata,
447 size_t size)
449 data->type = OUT_RAWDATA;
450 data->data = rawdata;
451 data->size = size;
452 out(data);
455 static void out_rawbyte(struct out_data *data, uint8_t byte)
457 data->type = OUT_RAWDATA;
458 data->data = &byte;
459 data->size = 1;
460 out(data);
463 static inline void out_reserve(struct out_data *data, uint64_t size)
465 data->type = OUT_RESERVE;
466 data->size = size;
467 out(data);
470 static void out_segment(struct out_data *data, const struct operand *opx)
472 if (opx->opflags & OPFLAG_RELATIVE)
473 nasm_error(ERR_NONFATAL, "segment references cannot be relative");
475 data->type = OUT_SEGMENT;
476 data->sign = OUT_UNSIGNED;
477 data->size = 2;
478 data->toffset = opx->offset;
479 data->tsegment = ofmt->segbase(opx->segment | 1);
480 data->twrt = opx->wrt;
481 out(data);
484 static void out_imm(struct out_data *data, const struct operand *opx,
485 int size, enum out_sign sign)
487 if (opx->segment != NO_SEG && (opx->segment & 1)) {
489 * This is actually a segment reference, but eval() has
490 * already called ofmt->segbase() for us. Sigh.
492 if (size < 2)
493 nasm_error(ERR_NONFATAL, "segment reference must be 16 bits");
495 data->type = OUT_SEGMENT;
496 } else {
497 data->type = (opx->opflags & OPFLAG_RELATIVE)
498 ? OUT_RELADDR : OUT_ADDRESS;
500 data->sign = sign;
501 data->toffset = opx->offset;
502 data->tsegment = opx->segment;
503 data->twrt = opx->wrt;
505 * XXX: improve this if at some point in the future we can
506 * distinguish the subtrahend in expressions like [foo - bar]
507 * where bar is a symbol in the current segment. However, at the
508 * current point, if OPFLAG_RELATIVE is set that subtraction has
509 * already occurred.
511 data->relbase = 0;
512 data->size = size;
513 out(data);
516 static void out_reladdr(struct out_data *data, const struct operand *opx,
517 int size)
519 if (opx->opflags & OPFLAG_RELATIVE)
520 nasm_error(ERR_NONFATAL, "invalid use of self-relative expression");
522 data->type = OUT_RELADDR;
523 data->sign = OUT_SIGNED;
524 data->size = size;
525 data->toffset = opx->offset;
526 data->tsegment = opx->segment;
527 data->twrt = opx->wrt;
528 data->relbase = data->offset + (data->inslen - data->insoffs);
529 out(data);
532 static bool jmp_match(int32_t segment, int64_t offset, int bits,
533 insn * ins, const struct itemplate *temp)
535 int64_t isize;
536 const uint8_t *code = temp->code;
537 uint8_t c = code[0];
538 bool is_byte;
540 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
541 return false;
542 if (!optimizing.level || (optimizing.flag & OPTIM_DISABLE_JMP_MATCH))
543 return false;
544 if (optimizing.level < 0 && c == 0371)
545 return false;
547 isize = calcsize(segment, offset, bits, ins, temp);
549 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
550 /* Be optimistic in pass 1 */
551 return true;
553 if (ins->oprs[0].segment != segment)
554 return false;
556 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
557 is_byte = (isize >= -128 && isize <= 127); /* is it byte size? */
559 if (is_byte && c == 0371 && ins->prefixes[PPS_REP] == P_BND) {
560 /* jmp short (opcode eb) cannot be used with bnd prefix. */
561 ins->prefixes[PPS_REP] = P_none;
562 nasm_error(ERR_WARNING | ERR_WARN_BND | ERR_PASS2 ,
563 "jmp short does not init bnd regs - bnd prefix dropped.");
566 return is_byte;
569 /* This is totally just a wild guess what is reasonable... */
570 #define INCBIN_MAX_BUF (ZERO_BUF_SIZE * 16)
572 int64_t assemble(int32_t segment, int64_t start, int bits, insn *instruction)
574 struct out_data data;
575 const struct itemplate *temp;
576 enum match_result m;
577 int64_t wsize; /* size for DB etc. */
579 nasm_zero(data);
580 data.offset = start;
581 data.segment = segment;
582 data.itemp = NULL;
583 data.bits = bits;
585 wsize = db_bytes(instruction->opcode);
586 if (wsize == -1)
587 return 0;
589 if (wsize) {
590 extop *e;
592 list_for_each(e, instruction->eops) {
593 if (e->type == EOT_DB_NUMBER) {
594 if (wsize > 8) {
595 nasm_error(ERR_NONFATAL,
596 "integer supplied to a DT, DO, DY or DZ"
597 " instruction");
598 } else {
599 data.insoffs = 0;
600 data.inslen = data.size = wsize;
601 data.toffset = e->offset;
602 data.twrt = e->wrt;
603 data.relbase = 0;
604 if (e->segment != NO_SEG && (e->segment & 1)) {
605 data.tsegment = e->segment;
606 data.type = OUT_SEGMENT;
607 data.sign = OUT_UNSIGNED;
608 } else {
609 data.tsegment = e->segment;
610 data.type = e->relative ? OUT_RELADDR : OUT_ADDRESS;
611 data.sign = OUT_WRAP;
613 out(&data);
615 } else if (e->type == EOT_DB_STRING ||
616 e->type == EOT_DB_STRING_FREE) {
617 int align = e->stringlen % wsize;
618 if (align)
619 align = wsize - align;
621 data.insoffs = 0;
622 data.inslen = e->stringlen + align;
624 out_rawdata(&data, e->stringval, e->stringlen);
625 out_rawdata(&data, zero_buffer, align);
628 } else if (instruction->opcode == I_INCBIN) {
629 const char *fname = instruction->eops->stringval;
630 FILE *fp;
631 size_t t = instruction->times; /* INCBIN handles TIMES by itself */
632 off_t base = 0;
633 off_t len;
634 const void *map = NULL;
635 char *buf = NULL;
636 size_t blk = 0; /* Buffered I/O block size */
637 size_t m = 0; /* Bytes last read */
639 if (!t)
640 goto done;
642 fp = nasm_open_read(fname, NF_BINARY|NF_FORMAP);
643 if (!fp) {
644 nasm_error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
645 fname);
646 goto done;
649 len = nasm_file_size(fp);
651 if (len == (off_t)-1) {
652 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
653 fname);
654 goto close_done;
657 if (instruction->eops->next) {
658 base = instruction->eops->next->offset;
659 if (base >= len) {
660 len = 0;
661 } else {
662 len -= base;
663 if (instruction->eops->next->next &&
664 len > (off_t)instruction->eops->next->next->offset)
665 len = (off_t)instruction->eops->next->next->offset;
669 lfmt->set_offset(data.offset);
670 lfmt->uplevel(LIST_INCBIN);
672 if (!len)
673 goto end_incbin;
675 /* Try to map file data */
676 map = nasm_map_file(fp, base, len);
677 if (!map) {
678 blk = len < (off_t)INCBIN_MAX_BUF ? (size_t)len : INCBIN_MAX_BUF;
679 buf = nasm_malloc(blk);
682 while (t--) {
684 * Consider these irrelevant for INCBIN, since it is fully
685 * possible that these might be (way) bigger than an int
686 * can hold; there is, however, no reason to widen these
687 * types just for INCBIN. data.inslen == 0 signals to the
688 * backend that these fields are meaningless, if at all
689 * needed.
691 data.insoffs = 0;
692 data.inslen = 0;
694 if (map) {
695 out_rawdata(&data, map, len);
696 } else if ((off_t)m == len) {
697 out_rawdata(&data, buf, len);
698 } else {
699 off_t l = len;
701 if (fseeko(fp, base, SEEK_SET) < 0 || ferror(fp)) {
702 nasm_error(ERR_NONFATAL,
703 "`incbin': unable to seek on file `%s'",
704 fname);
705 goto end_incbin;
707 while (l > 0) {
708 m = fread(buf, 1, l < (off_t)blk ? (size_t)l : blk, fp);
709 if (!m || feof(fp)) {
711 * This shouldn't happen unless the file
712 * actually changes while we are reading
713 * it.
715 nasm_error(ERR_NONFATAL,
716 "`incbin': unexpected EOF while"
717 " reading file `%s'", fname);
718 goto end_incbin;
720 out_rawdata(&data, buf, m);
721 l -= m;
725 end_incbin:
726 lfmt->downlevel(LIST_INCBIN);
727 if (instruction->times > 1) {
728 lfmt->uplevel(LIST_TIMES);
729 lfmt->downlevel(LIST_TIMES);
731 if (ferror(fp)) {
732 nasm_error(ERR_NONFATAL,
733 "`incbin': error while"
734 " reading file `%s'", fname);
736 close_done:
737 if (buf)
738 nasm_free(buf);
739 if (map)
740 nasm_unmap_file(map, len);
741 fclose(fp);
742 done:
743 instruction->times = 1; /* Tell the upper layer not to iterate */
745 } else {
746 /* "Real" instruction */
748 /* Check to see if we need an address-size prefix */
749 add_asp(instruction, bits);
751 m = find_match(&temp, instruction, data.segment, data.offset, bits);
753 if (m == MOK_GOOD) {
754 /* Matches! */
755 int64_t insn_size = calcsize(data.segment, data.offset,
756 bits, instruction, temp);
757 nasm_assert(insn_size >= 0);
759 data.itemp = temp;
760 data.bits = bits;
761 data.insoffs = 0;
762 data.inslen = insn_size;
764 gencode(&data, instruction);
765 nasm_assert(data.insoffs == insn_size);
766 } else {
767 /* No match */
768 switch (m) {
769 case MERR_OPSIZEMISSING:
770 nasm_error(ERR_NONFATAL, "operation size not specified");
771 break;
772 case MERR_OPSIZEMISMATCH:
773 nasm_error(ERR_NONFATAL, "mismatch in operand sizes");
774 break;
775 case MERR_BRNOTHERE:
776 nasm_error(ERR_NONFATAL,
777 "broadcast not permitted on this operand");
778 break;
779 case MERR_BRNUMMISMATCH:
780 nasm_error(ERR_NONFATAL,
781 "mismatch in the number of broadcasting elements");
782 break;
783 case MERR_MASKNOTHERE:
784 nasm_error(ERR_NONFATAL,
785 "mask not permitted on this operand");
786 break;
787 case MERR_DECONOTHERE:
788 nasm_error(ERR_NONFATAL, "unsupported mode decorator for instruction");
789 break;
790 case MERR_BADCPU:
791 nasm_error(ERR_NONFATAL, "no instruction for this cpu level");
792 break;
793 case MERR_BADMODE:
794 nasm_error(ERR_NONFATAL, "instruction not supported in %d-bit mode",
795 bits);
796 break;
797 case MERR_ENCMISMATCH:
798 nasm_error(ERR_NONFATAL, "specific encoding scheme not available");
799 break;
800 case MERR_BADBND:
801 nasm_error(ERR_NONFATAL, "bnd prefix is not allowed");
802 break;
803 case MERR_BADREPNE:
804 nasm_error(ERR_NONFATAL, "%s prefix is not allowed",
805 (has_prefix(instruction, PPS_REP, P_REPNE) ?
806 "repne" : "repnz"));
807 break;
808 case MERR_REGSETSIZE:
809 nasm_error(ERR_NONFATAL, "invalid register set size");
810 break;
811 case MERR_REGSET:
812 nasm_error(ERR_NONFATAL, "register set not valid for operand");
813 break;
814 default:
815 nasm_error(ERR_NONFATAL,
816 "invalid combination of opcode and operands");
817 break;
820 instruction->times = 1; /* Avoid repeated error messages */
823 return data.offset - start;
826 int64_t insn_size(int32_t segment, int64_t offset, int bits, insn *instruction)
828 const struct itemplate *temp;
829 enum match_result m;
831 if (instruction->opcode == I_none)
832 return 0;
834 if (opcode_is_db(instruction->opcode)) {
835 extop *e;
836 int32_t isize, osize, wsize;
838 isize = 0;
839 wsize = db_bytes(instruction->opcode);
840 nasm_assert(wsize > 0);
842 list_for_each(e, instruction->eops) {
843 int32_t align;
845 osize = 0;
846 if (e->type == EOT_DB_NUMBER) {
847 osize = 1;
848 warn_overflow_const(e->offset, wsize);
849 } else if (e->type == EOT_DB_STRING ||
850 e->type == EOT_DB_STRING_FREE)
851 osize = e->stringlen;
853 align = (-osize) % wsize;
854 if (align < 0)
855 align += wsize;
856 isize += osize + align;
858 return isize;
861 if (instruction->opcode == I_INCBIN) {
862 const char *fname = instruction->eops->stringval;
863 off_t len;
865 len = nasm_file_size_by_path(fname);
866 if (len == (off_t)-1) {
867 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
868 fname);
869 return 0;
872 if (instruction->eops->next) {
873 if (len <= (off_t)instruction->eops->next->offset) {
874 len = 0;
875 } else {
876 len -= instruction->eops->next->offset;
877 if (instruction->eops->next->next &&
878 len > (off_t)instruction->eops->next->next->offset) {
879 len = (off_t)instruction->eops->next->next->offset;
884 len *= instruction->times;
885 instruction->times = 1; /* Tell the upper layer to not iterate */
887 return len;
890 /* Check to see if we need an address-size prefix */
891 add_asp(instruction, bits);
893 m = find_match(&temp, instruction, segment, offset, bits);
894 if (m == MOK_GOOD) {
895 /* we've matched an instruction. */
896 return calcsize(segment, offset, bits, instruction, temp);
897 } else {
898 return -1; /* didn't match any instruction */
902 static void bad_hle_warn(const insn * ins, uint8_t hleok)
904 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
905 enum whatwarn { w_none, w_lock, w_inval } ww;
906 static const enum whatwarn warn[2][4] =
908 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
909 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
911 unsigned int n;
913 n = (unsigned int)rep_pfx - P_XACQUIRE;
914 if (n > 1)
915 return; /* Not XACQUIRE/XRELEASE */
917 ww = warn[n][hleok];
918 if (!is_class(MEMORY, ins->oprs[0].type))
919 ww = w_inval; /* HLE requires operand 0 to be memory */
921 switch (ww) {
922 case w_none:
923 break;
925 case w_lock:
926 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
927 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
928 "%s with this instruction requires lock",
929 prefix_name(rep_pfx));
931 break;
933 case w_inval:
934 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
935 "%s invalid with this instruction",
936 prefix_name(rep_pfx));
937 break;
941 /* Common construct */
942 #define case3(x) case (x): case (x)+1: case (x)+2
943 #define case4(x) case3(x): case (x)+3
945 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
946 insn * ins, const struct itemplate *temp)
948 const uint8_t *codes = temp->code;
949 int64_t length = 0;
950 uint8_t c;
951 int rex_mask = ~0;
952 int op1, op2;
953 struct operand *opx;
954 uint8_t opex = 0;
955 enum ea_type eat;
956 uint8_t hleok = 0;
957 bool lockcheck = true;
958 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
959 const char *errmsg;
961 ins->rex = 0; /* Ensure REX is reset */
962 eat = EA_SCALAR; /* Expect a scalar EA */
963 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
965 if (ins->prefixes[PPS_OSIZE] == P_O64)
966 ins->rex |= REX_W;
968 (void)segment; /* Don't warn that this parameter is unused */
969 (void)offset; /* Don't warn that this parameter is unused */
971 while (*codes) {
972 c = *codes++;
973 op1 = (c & 3) + ((opex & 1) << 2);
974 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
975 opx = &ins->oprs[op1];
976 opex = 0; /* For the next iteration */
978 switch (c) {
979 case4(01):
980 codes += c, length += c;
981 break;
983 case3(05):
984 opex = c;
985 break;
987 case4(010):
988 ins->rex |=
989 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
990 codes++, length++;
991 break;
993 case4(014):
994 /* this is an index reg of MIB operand */
995 mib_index = opx->basereg;
996 break;
998 case4(020):
999 case4(024):
1000 length++;
1001 break;
1003 case4(030):
1004 length += 2;
1005 break;
1007 case4(034):
1008 if (opx->type & (BITS16 | BITS32 | BITS64))
1009 length += (opx->type & BITS16) ? 2 : 4;
1010 else
1011 length += (bits == 16) ? 2 : 4;
1012 break;
1014 case4(040):
1015 length += 4;
1016 break;
1018 case4(044):
1019 length += ins->addr_size >> 3;
1020 break;
1022 case4(050):
1023 length++;
1024 break;
1026 case4(054):
1027 length += 8; /* MOV reg64/imm */
1028 break;
1030 case4(060):
1031 length += 2;
1032 break;
1034 case4(064):
1035 if (opx->type & (BITS16 | BITS32 | BITS64))
1036 length += (opx->type & BITS16) ? 2 : 4;
1037 else
1038 length += (bits == 16) ? 2 : 4;
1039 break;
1041 case4(070):
1042 length += 4;
1043 break;
1045 case4(074):
1046 length += 2;
1047 break;
1049 case 0172:
1050 case 0173:
1051 codes++;
1052 length++;
1053 break;
1055 case4(0174):
1056 length++;
1057 break;
1059 case4(0240):
1060 ins->rex |= REX_EV;
1061 ins->vexreg = regval(opx);
1062 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
1063 ins->vex_cm = *codes++;
1064 ins->vex_wlp = *codes++;
1065 ins->evex_tuple = (*codes++ - 0300);
1066 break;
1068 case 0250:
1069 ins->rex |= REX_EV;
1070 ins->vexreg = 0;
1071 ins->vex_cm = *codes++;
1072 ins->vex_wlp = *codes++;
1073 ins->evex_tuple = (*codes++ - 0300);
1074 break;
1076 case4(0254):
1077 length += 4;
1078 break;
1080 case4(0260):
1081 ins->rex |= REX_V;
1082 ins->vexreg = regval(opx);
1083 ins->vex_cm = *codes++;
1084 ins->vex_wlp = *codes++;
1085 break;
1087 case 0270:
1088 ins->rex |= REX_V;
1089 ins->vexreg = 0;
1090 ins->vex_cm = *codes++;
1091 ins->vex_wlp = *codes++;
1092 break;
1094 case3(0271):
1095 hleok = c & 3;
1096 break;
1098 case4(0274):
1099 length++;
1100 break;
1102 case4(0300):
1103 break;
1105 case 0310:
1106 if (bits == 64)
1107 return -1;
1108 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
1109 break;
1111 case 0311:
1112 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
1113 break;
1115 case 0312:
1116 break;
1118 case 0313:
1119 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1120 has_prefix(ins, PPS_ASIZE, P_A32))
1121 return -1;
1122 break;
1124 case4(0314):
1125 break;
1127 case 0320:
1129 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1130 if (pfx == P_O16)
1131 break;
1132 if (pfx != P_none)
1133 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1134 else
1135 ins->prefixes[PPS_OSIZE] = P_O16;
1136 break;
1139 case 0321:
1141 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1142 if (pfx == P_O32)
1143 break;
1144 if (pfx != P_none)
1145 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1146 else
1147 ins->prefixes[PPS_OSIZE] = P_O32;
1148 break;
1151 case 0322:
1152 break;
1154 case 0323:
1155 rex_mask &= ~REX_W;
1156 break;
1158 case 0324:
1159 ins->rex |= REX_W;
1160 break;
1162 case 0325:
1163 ins->rex |= REX_NH;
1164 break;
1166 case 0326:
1167 break;
1169 case 0330:
1170 codes++, length++;
1171 break;
1173 case 0331:
1174 break;
1176 case 0332:
1177 case 0333:
1178 length++;
1179 break;
1181 case 0334:
1182 ins->rex |= REX_L;
1183 break;
1185 case 0335:
1186 break;
1188 case 0336:
1189 if (!ins->prefixes[PPS_REP])
1190 ins->prefixes[PPS_REP] = P_REP;
1191 break;
1193 case 0337:
1194 if (!ins->prefixes[PPS_REP])
1195 ins->prefixes[PPS_REP] = P_REPNE;
1196 break;
1198 case 0340:
1199 if (!absolute_op(&ins->oprs[0]))
1200 nasm_error(ERR_NONFATAL, "attempt to reserve non-constant"
1201 " quantity of BSS space");
1202 else if (ins->oprs[0].opflags & OPFLAG_FORWARD)
1203 nasm_error(ERR_WARNING | ERR_PASS1,
1204 "forward reference in RESx can have unpredictable results");
1205 else
1206 length += ins->oprs[0].offset;
1207 break;
1209 case 0341:
1210 if (!ins->prefixes[PPS_WAIT])
1211 ins->prefixes[PPS_WAIT] = P_WAIT;
1212 break;
1214 case 0360:
1215 break;
1217 case 0361:
1218 length++;
1219 break;
1221 case 0364:
1222 case 0365:
1223 break;
1225 case 0366:
1226 case 0367:
1227 length++;
1228 break;
1230 case 0370:
1231 case 0371:
1232 break;
1234 case 0373:
1235 length++;
1236 break;
1238 case 0374:
1239 eat = EA_XMMVSIB;
1240 break;
1242 case 0375:
1243 eat = EA_YMMVSIB;
1244 break;
1246 case 0376:
1247 eat = EA_ZMMVSIB;
1248 break;
1250 case4(0100):
1251 case4(0110):
1252 case4(0120):
1253 case4(0130):
1254 case4(0200):
1255 case4(0204):
1256 case4(0210):
1257 case4(0214):
1258 case4(0220):
1259 case4(0224):
1260 case4(0230):
1261 case4(0234):
1263 ea ea_data;
1264 int rfield;
1265 opflags_t rflags;
1266 struct operand *opy = &ins->oprs[op2];
1267 struct operand *op_er_sae;
1269 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
1271 if (c <= 0177) {
1272 /* pick rfield from operand b (opx) */
1273 rflags = regflag(opx);
1274 rfield = nasm_regvals[opx->basereg];
1275 } else {
1276 rflags = 0;
1277 rfield = c & 7;
1280 /* EVEX.b1 : evex_brerop contains the operand position */
1281 op_er_sae = (ins->evex_brerop >= 0 ?
1282 &ins->oprs[ins->evex_brerop] : NULL);
1284 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1285 /* set EVEX.b */
1286 ins->evex_p[2] |= EVEX_P2B;
1287 if (op_er_sae->decoflags & ER) {
1288 /* set EVEX.RC (rounding control) */
1289 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1290 & EVEX_P2RC;
1292 } else {
1293 /* set EVEX.L'L (vector length) */
1294 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
1295 ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W);
1296 if (opy->decoflags & BRDCAST_MASK) {
1297 /* set EVEX.b */
1298 ins->evex_p[2] |= EVEX_P2B;
1302 if (itemp_has(temp, IF_MIB)) {
1303 opy->eaflags |= EAF_MIB;
1305 * if a separate form of MIB (ICC style) is used,
1306 * the index reg info is merged into mem operand
1308 if (mib_index != R_none) {
1309 opy->indexreg = mib_index;
1310 opy->scale = 1;
1311 opy->hintbase = mib_index;
1312 opy->hinttype = EAH_NOTBASE;
1316 if (process_ea(opy, &ea_data, bits,
1317 rfield, rflags, ins, &errmsg) != eat) {
1318 nasm_error(ERR_NONFATAL, "%s", errmsg);
1319 return -1;
1320 } else {
1321 ins->rex |= ea_data.rex;
1322 length += ea_data.size;
1325 break;
1327 default:
1328 nasm_panic("internal instruction table corrupt"
1329 ": instruction code \\%o (0x%02X) given", c, c);
1330 break;
1334 ins->rex &= rex_mask;
1336 if (ins->rex & REX_NH) {
1337 if (ins->rex & REX_H) {
1338 nasm_error(ERR_NONFATAL, "instruction cannot use high registers");
1339 return -1;
1341 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
1344 switch (ins->prefixes[PPS_VEX]) {
1345 case P_EVEX:
1346 if (!(ins->rex & REX_EV))
1347 return -1;
1348 break;
1349 case P_VEX3:
1350 case P_VEX2:
1351 if (!(ins->rex & REX_V))
1352 return -1;
1353 break;
1354 default:
1355 break;
1358 if (ins->rex & (REX_V | REX_EV)) {
1359 int bad32 = REX_R|REX_W|REX_X|REX_B;
1361 if (ins->rex & REX_H) {
1362 nasm_error(ERR_NONFATAL, "cannot use high register in AVX instruction");
1363 return -1;
1365 switch (ins->vex_wlp & 060) {
1366 case 000:
1367 case 040:
1368 ins->rex &= ~REX_W;
1369 break;
1370 case 020:
1371 ins->rex |= REX_W;
1372 bad32 &= ~REX_W;
1373 break;
1374 case 060:
1375 /* Follow REX_W */
1376 break;
1379 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
1380 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1381 return -1;
1382 } else if (!(ins->rex & REX_EV) &&
1383 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
1384 nasm_error(ERR_NONFATAL, "invalid high-16 register in non-AVX-512");
1385 return -1;
1387 if (ins->rex & REX_EV)
1388 length += 4;
1389 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1390 ins->prefixes[PPS_VEX] == P_VEX3)
1391 length += 3;
1392 else
1393 length += 2;
1394 } else if (ins->rex & REX_MASK) {
1395 if (ins->rex & REX_H) {
1396 nasm_error(ERR_NONFATAL, "cannot use high register in rex instruction");
1397 return -1;
1398 } else if (bits == 64) {
1399 length++;
1400 } else if ((ins->rex & REX_L) &&
1401 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
1402 iflag_cpu_level_ok(&cpu, IF_X86_64)) {
1403 /* LOCK-as-REX.R */
1404 assert_no_prefix(ins, PPS_LOCK);
1405 lockcheck = false; /* Already errored, no need for warning */
1406 length++;
1407 } else {
1408 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1409 return -1;
1413 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
1414 (!itemp_has(temp,IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
1415 nasm_error(ERR_WARNING | ERR_WARN_LOCK | ERR_PASS2 ,
1416 "instruction is not lockable");
1419 bad_hle_warn(ins, hleok);
1422 * when BND prefix is set by DEFAULT directive,
1423 * BND prefix is added to every appropriate instruction line
1424 * unless it is overridden by NOBND prefix.
1426 if (globalbnd &&
1427 (itemp_has(temp, IF_BND) && !has_prefix(ins, PPS_REP, P_NOBND)))
1428 ins->prefixes[PPS_REP] = P_BND;
1431 * Add length of legacy prefixes
1433 length += emit_prefix(NULL, bits, ins);
1435 return length;
1438 static inline void emit_rex(struct out_data *data, insn *ins)
1440 if (data->bits == 64) {
1441 if ((ins->rex & REX_MASK) &&
1442 !(ins->rex & (REX_V | REX_EV)) &&
1443 !ins->rex_done) {
1444 uint8_t rex = (ins->rex & REX_MASK) | REX_P;
1445 out_rawbyte(data, rex);
1446 ins->rex_done = true;
1451 static int emit_prefix(struct out_data *data, const int bits, insn *ins)
1453 int bytes = 0;
1454 int j;
1456 for (j = 0; j < MAXPREFIX; j++) {
1457 uint8_t c = 0;
1458 switch (ins->prefixes[j]) {
1459 case P_WAIT:
1460 c = 0x9B;
1461 break;
1462 case P_LOCK:
1463 c = 0xF0;
1464 break;
1465 case P_REPNE:
1466 case P_REPNZ:
1467 case P_XACQUIRE:
1468 case P_BND:
1469 c = 0xF2;
1470 break;
1471 case P_REPE:
1472 case P_REPZ:
1473 case P_REP:
1474 case P_XRELEASE:
1475 c = 0xF3;
1476 break;
1477 case R_CS:
1478 if (bits == 64) {
1479 nasm_error(ERR_WARNING | ERR_PASS2,
1480 "cs segment base generated, but will be ignored in 64-bit mode");
1482 c = 0x2E;
1483 break;
1484 case R_DS:
1485 if (bits == 64) {
1486 nasm_error(ERR_WARNING | ERR_PASS2,
1487 "ds segment base generated, but will be ignored in 64-bit mode");
1489 c = 0x3E;
1490 break;
1491 case R_ES:
1492 if (bits == 64) {
1493 nasm_error(ERR_WARNING | ERR_PASS2,
1494 "es segment base generated, but will be ignored in 64-bit mode");
1496 c = 0x26;
1497 break;
1498 case R_FS:
1499 c = 0x64;
1500 break;
1501 case R_GS:
1502 c = 0x65;
1503 break;
1504 case R_SS:
1505 if (bits == 64) {
1506 nasm_error(ERR_WARNING | ERR_PASS2,
1507 "ss segment base generated, but will be ignored in 64-bit mode");
1509 c = 0x36;
1510 break;
1511 case R_SEGR6:
1512 case R_SEGR7:
1513 nasm_error(ERR_NONFATAL,
1514 "segr6 and segr7 cannot be used as prefixes");
1515 break;
1516 case P_A16:
1517 if (bits == 64) {
1518 nasm_error(ERR_NONFATAL,
1519 "16-bit addressing is not supported "
1520 "in 64-bit mode");
1521 } else if (bits != 16)
1522 c = 0x67;
1523 break;
1524 case P_A32:
1525 if (bits != 32)
1526 c = 0x67;
1527 break;
1528 case P_A64:
1529 if (bits != 64) {
1530 nasm_error(ERR_NONFATAL,
1531 "64-bit addressing is only supported "
1532 "in 64-bit mode");
1534 break;
1535 case P_ASP:
1536 c = 0x67;
1537 break;
1538 case P_O16:
1539 if (bits != 16)
1540 c = 0x66;
1541 break;
1542 case P_O32:
1543 if (bits == 16)
1544 c = 0x66;
1545 break;
1546 case P_O64:
1547 /* REX.W */
1548 break;
1549 case P_OSP:
1550 c = 0x66;
1551 break;
1552 case P_EVEX:
1553 case P_VEX3:
1554 case P_VEX2:
1555 case P_NOBND:
1556 case P_none:
1557 break;
1558 default:
1559 nasm_panic("invalid instruction prefix");
1561 if (c) {
1562 if (data)
1563 out_rawbyte(data, c);
1564 bytes++;
1567 return bytes;
1570 static void gencode(struct out_data *data, insn *ins)
1572 uint8_t c;
1573 uint8_t bytes[4];
1574 int64_t size;
1575 int op1, op2;
1576 struct operand *opx;
1577 const uint8_t *codes = data->itemp->code;
1578 uint8_t opex = 0;
1579 enum ea_type eat = EA_SCALAR;
1580 int r;
1581 const int bits = data->bits;
1582 const char *errmsg;
1584 ins->rex_done = false;
1586 emit_prefix(data, bits, ins);
1588 while (*codes) {
1589 c = *codes++;
1590 op1 = (c & 3) + ((opex & 1) << 2);
1591 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1592 opx = &ins->oprs[op1];
1593 opex = 0; /* For the next iteration */
1596 switch (c) {
1597 case 01:
1598 case 02:
1599 case 03:
1600 case 04:
1601 emit_rex(data, ins);
1602 out_rawdata(data, codes, c);
1603 codes += c;
1604 break;
1606 case 05:
1607 case 06:
1608 case 07:
1609 opex = c;
1610 break;
1612 case4(010):
1613 emit_rex(data, ins);
1614 out_rawbyte(data, *codes++ + (regval(opx) & 7));
1615 break;
1617 case4(014):
1618 break;
1620 case4(020):
1621 out_imm(data, opx, 1, OUT_WRAP);
1622 break;
1624 case4(024):
1625 out_imm(data, opx, 1, OUT_UNSIGNED);
1626 break;
1628 case4(030):
1629 out_imm(data, opx, 2, OUT_WRAP);
1630 break;
1632 case4(034):
1633 if (opx->type & (BITS16 | BITS32))
1634 size = (opx->type & BITS16) ? 2 : 4;
1635 else
1636 size = (bits == 16) ? 2 : 4;
1637 out_imm(data, opx, size, OUT_WRAP);
1638 break;
1640 case4(040):
1641 out_imm(data, opx, 4, OUT_WRAP);
1642 break;
1644 case4(044):
1645 size = ins->addr_size >> 3;
1646 out_imm(data, opx, size, OUT_WRAP);
1647 break;
1649 case4(050):
1650 if (opx->segment == data->segment) {
1651 int64_t delta = opx->offset - data->offset
1652 - (data->inslen - data->insoffs);
1653 if (delta > 127 || delta < -128)
1654 nasm_error(ERR_NONFATAL, "short jump is out of range");
1656 out_reladdr(data, opx, 1);
1657 break;
1659 case4(054):
1660 out_imm(data, opx, 8, OUT_WRAP);
1661 break;
1663 case4(060):
1664 out_reladdr(data, opx, 2);
1665 break;
1667 case4(064):
1668 if (opx->type & (BITS16 | BITS32 | BITS64))
1669 size = (opx->type & BITS16) ? 2 : 4;
1670 else
1671 size = (bits == 16) ? 2 : 4;
1673 out_reladdr(data, opx, size);
1674 break;
1676 case4(070):
1677 out_reladdr(data, opx, 4);
1678 break;
1680 case4(074):
1681 if (opx->segment == NO_SEG)
1682 nasm_error(ERR_NONFATAL, "value referenced by FAR is not"
1683 " relocatable");
1684 out_segment(data, opx);
1685 break;
1687 case 0172:
1689 int mask = ins->prefixes[PPS_VEX] == P_EVEX ? 7 : 15;
1690 const struct operand *opy;
1692 c = *codes++;
1693 opx = &ins->oprs[c >> 3];
1694 opy = &ins->oprs[c & 7];
1695 if (!absolute_op(opy)) {
1696 nasm_error(ERR_NONFATAL,
1697 "non-absolute expression not permitted as argument %d",
1698 c & 7);
1699 } else if (opy->offset & ~mask) {
1700 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1701 "is4 argument exceeds bounds");
1703 c = opy->offset & mask;
1704 goto emit_is4;
1707 case 0173:
1708 c = *codes++;
1709 opx = &ins->oprs[c >> 4];
1710 c &= 15;
1711 goto emit_is4;
1713 case4(0174):
1714 c = 0;
1715 emit_is4:
1716 r = nasm_regvals[opx->basereg];
1717 out_rawbyte(data, (r << 4) | ((r & 0x10) >> 1) | c);
1718 break;
1720 case4(0254):
1721 if (absolute_op(opx) &&
1722 (int32_t)opx->offset != (int64_t)opx->offset) {
1723 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1724 "signed dword immediate exceeds bounds");
1726 out_imm(data, opx, 4, OUT_SIGNED);
1727 break;
1729 case4(0240):
1730 case 0250:
1731 codes += 3;
1732 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1733 EVEX_P2Z | EVEX_P2AAA, 2);
1734 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
1735 bytes[0] = 0x62;
1736 /* EVEX.X can be set by either REX or EVEX for different reasons */
1737 bytes[1] = ((((ins->rex & 7) << 5) |
1738 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
1739 (ins->vex_cm & EVEX_P0MM);
1740 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
1741 ((~ins->vexreg & 15) << 3) |
1742 (1 << 2) | (ins->vex_wlp & 3);
1743 bytes[3] = ins->evex_p[2];
1744 out_rawdata(data, bytes, 4);
1745 break;
1747 case4(0260):
1748 case 0270:
1749 codes += 2;
1750 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1751 ins->prefixes[PPS_VEX] == P_VEX3) {
1752 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
1753 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
1754 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
1755 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
1756 out_rawdata(data, bytes, 3);
1757 } else {
1758 bytes[0] = 0xc5;
1759 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
1760 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
1761 out_rawdata(data, bytes, 2);
1763 break;
1765 case 0271:
1766 case 0272:
1767 case 0273:
1768 break;
1770 case4(0274):
1772 uint64_t uv, um;
1773 int s;
1775 if (absolute_op(opx)) {
1776 if (ins->rex & REX_W)
1777 s = 64;
1778 else if (ins->prefixes[PPS_OSIZE] == P_O16)
1779 s = 16;
1780 else if (ins->prefixes[PPS_OSIZE] == P_O32)
1781 s = 32;
1782 else
1783 s = bits;
1785 um = (uint64_t)2 << (s-1);
1786 uv = opx->offset;
1788 if (uv > 127 && uv < (uint64_t)-128 &&
1789 (uv < um-128 || uv > um-1)) {
1790 /* If this wasn't explicitly byte-sized, warn as though we
1791 * had fallen through to the imm16/32/64 case.
1793 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1794 "%s value exceeds bounds",
1795 (opx->type & BITS8) ? "signed byte" :
1796 s == 16 ? "word" :
1797 s == 32 ? "dword" :
1798 "signed dword");
1801 /* Output as a raw byte to avoid byte overflow check */
1802 out_rawbyte(data, (uint8_t)uv);
1803 } else {
1804 out_imm(data, opx, 1, OUT_WRAP); /* XXX: OUT_SIGNED? */
1806 break;
1809 case4(0300):
1810 break;
1812 case 0310:
1813 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16))
1814 out_rawbyte(data, 0x67);
1815 break;
1817 case 0311:
1818 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32))
1819 out_rawbyte(data, 0x67);
1820 break;
1822 case 0312:
1823 break;
1825 case 0313:
1826 ins->rex = 0;
1827 break;
1829 case4(0314):
1830 break;
1832 case 0320:
1833 case 0321:
1834 break;
1836 case 0322:
1837 case 0323:
1838 break;
1840 case 0324:
1841 ins->rex |= REX_W;
1842 break;
1844 case 0325:
1845 break;
1847 case 0326:
1848 break;
1850 case 0330:
1851 out_rawbyte(data, *codes++ ^ get_cond_opcode(ins->condition));
1852 break;
1854 case 0331:
1855 break;
1857 case 0332:
1858 case 0333:
1859 out_rawbyte(data, c - 0332 + 0xF2);
1860 break;
1862 case 0334:
1863 if (ins->rex & REX_R)
1864 out_rawbyte(data, 0xF0);
1865 ins->rex &= ~(REX_L|REX_R);
1866 break;
1868 case 0335:
1869 break;
1871 case 0336:
1872 case 0337:
1873 break;
1875 case 0340:
1876 if (ins->oprs[0].segment != NO_SEG)
1877 nasm_panic("non-constant BSS size in pass two");
1879 out_reserve(data, ins->oprs[0].offset);
1880 break;
1882 case 0341:
1883 break;
1885 case 0360:
1886 break;
1888 case 0361:
1889 out_rawbyte(data, 0x66);
1890 break;
1892 case 0364:
1893 case 0365:
1894 break;
1896 case 0366:
1897 case 0367:
1898 out_rawbyte(data, c - 0366 + 0x66);
1899 break;
1901 case3(0370):
1902 break;
1904 case 0373:
1905 out_rawbyte(data, bits == 16 ? 3 : 5);
1906 break;
1908 case 0374:
1909 eat = EA_XMMVSIB;
1910 break;
1912 case 0375:
1913 eat = EA_YMMVSIB;
1914 break;
1916 case 0376:
1917 eat = EA_ZMMVSIB;
1918 break;
1920 case4(0100):
1921 case4(0110):
1922 case4(0120):
1923 case4(0130):
1924 case4(0200):
1925 case4(0204):
1926 case4(0210):
1927 case4(0214):
1928 case4(0220):
1929 case4(0224):
1930 case4(0230):
1931 case4(0234):
1933 ea ea_data;
1934 int rfield;
1935 opflags_t rflags;
1936 uint8_t *p;
1937 struct operand *opy = &ins->oprs[op2];
1939 if (c <= 0177) {
1940 /* pick rfield from operand b (opx) */
1941 rflags = regflag(opx);
1942 rfield = nasm_regvals[opx->basereg];
1943 } else {
1944 /* rfield is constant */
1945 rflags = 0;
1946 rfield = c & 7;
1949 if (process_ea(opy, &ea_data, bits,
1950 rfield, rflags, ins, &errmsg) != eat)
1951 nasm_error(ERR_NONFATAL, "%s", errmsg);
1953 p = bytes;
1954 *p++ = ea_data.modrm;
1955 if (ea_data.sib_present)
1956 *p++ = ea_data.sib;
1957 out_rawdata(data, bytes, p - bytes);
1960 * Make sure the address gets the right offset in case
1961 * the line breaks in the .lst file (BR 1197827)
1964 if (ea_data.bytes) {
1965 /* use compressed displacement, if available */
1966 if (ea_data.disp8) {
1967 out_rawbyte(data, ea_data.disp8);
1968 } else if (ea_data.rip) {
1969 out_reladdr(data, opy, ea_data.bytes);
1970 } else {
1971 int asize = ins->addr_size >> 3;
1973 if (overflow_general(opy->offset, asize) ||
1974 signed_bits(opy->offset, ins->addr_size) !=
1975 signed_bits(opy->offset, ea_data.bytes << 3))
1976 warn_overflow(ea_data.bytes);
1978 out_imm(data, opy, ea_data.bytes,
1979 (asize > ea_data.bytes)
1980 ? OUT_SIGNED : OUT_WRAP);
1984 break;
1986 default:
1987 nasm_panic("internal instruction table corrupt"
1988 ": instruction code \\%o (0x%02X) given", c, c);
1989 break;
1994 static opflags_t regflag(const operand * o)
1996 if (!is_register(o->basereg))
1997 nasm_panic("invalid operand passed to regflag()");
1998 return nasm_reg_flags[o->basereg];
2001 static int32_t regval(const operand * o)
2003 if (!is_register(o->basereg))
2004 nasm_panic("invalid operand passed to regval()");
2005 return nasm_regvals[o->basereg];
2008 static int op_rexflags(const operand * o, int mask)
2010 opflags_t flags;
2011 int val;
2013 if (!is_register(o->basereg))
2014 nasm_panic("invalid operand passed to op_rexflags()");
2016 flags = nasm_reg_flags[o->basereg];
2017 val = nasm_regvals[o->basereg];
2019 return rexflags(val, flags, mask);
2022 static int rexflags(int val, opflags_t flags, int mask)
2024 int rex = 0;
2026 if (val >= 0 && (val & 8))
2027 rex |= REX_B|REX_X|REX_R;
2028 if (flags & BITS64)
2029 rex |= REX_W;
2030 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
2031 rex |= REX_H;
2032 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
2033 rex |= REX_P;
2035 return rex & mask;
2038 static int evexflags(int val, decoflags_t deco,
2039 int mask, uint8_t byte)
2041 int evex = 0;
2043 switch (byte) {
2044 case 0:
2045 if (val >= 0 && (val & 16))
2046 evex |= (EVEX_P0RP | EVEX_P0X);
2047 break;
2048 case 2:
2049 if (val >= 0 && (val & 16))
2050 evex |= EVEX_P2VP;
2051 if (deco & Z)
2052 evex |= EVEX_P2Z;
2053 if (deco & OPMASK_MASK)
2054 evex |= deco & EVEX_P2AAA;
2055 break;
2057 return evex & mask;
2060 static int op_evexflags(const operand * o, int mask, uint8_t byte)
2062 int val;
2064 val = nasm_regvals[o->basereg];
2066 return evexflags(val, o->decoflags, mask, byte);
2069 static enum match_result find_match(const struct itemplate **tempp,
2070 insn *instruction,
2071 int32_t segment, int64_t offset, int bits)
2073 const struct itemplate *temp;
2074 enum match_result m, merr;
2075 opflags_t xsizeflags[MAX_OPERANDS];
2076 bool opsizemissing = false;
2077 int8_t broadcast = instruction->evex_brerop;
2078 int i;
2080 /* broadcasting uses a different data element size */
2081 for (i = 0; i < instruction->operands; i++)
2082 if (i == broadcast)
2083 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
2084 else
2085 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
2087 merr = MERR_INVALOP;
2089 for (temp = nasm_instructions[instruction->opcode];
2090 temp->opcode != I_none; temp++) {
2091 m = matches(temp, instruction, bits);
2092 if (m == MOK_JUMP) {
2093 if (jmp_match(segment, offset, bits, instruction, temp))
2094 m = MOK_GOOD;
2095 else
2096 m = MERR_INVALOP;
2097 } else if (m == MERR_OPSIZEMISSING && !itemp_has(temp, IF_SX)) {
2099 * Missing operand size and a candidate for fuzzy matching...
2101 for (i = 0; i < temp->operands; i++)
2102 if (i == broadcast)
2103 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
2104 else
2105 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
2106 opsizemissing = true;
2108 if (m > merr)
2109 merr = m;
2110 if (merr == MOK_GOOD)
2111 goto done;
2114 /* No match, but see if we can get a fuzzy operand size match... */
2115 if (!opsizemissing)
2116 goto done;
2118 for (i = 0; i < instruction->operands; i++) {
2120 * We ignore extrinsic operand sizes on registers, so we should
2121 * never try to fuzzy-match on them. This also resolves the case
2122 * when we have e.g. "xmmrm128" in two different positions.
2124 if (is_class(REGISTER, instruction->oprs[i].type))
2125 continue;
2127 /* This tests if xsizeflags[i] has more than one bit set */
2128 if ((xsizeflags[i] & (xsizeflags[i]-1)))
2129 goto done; /* No luck */
2131 if (i == broadcast) {
2132 instruction->oprs[i].decoflags |= xsizeflags[i];
2133 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
2134 BITS32 : BITS64);
2135 } else {
2136 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
2140 /* Try matching again... */
2141 for (temp = nasm_instructions[instruction->opcode];
2142 temp->opcode != I_none; temp++) {
2143 m = matches(temp, instruction, bits);
2144 if (m == MOK_JUMP) {
2145 if (jmp_match(segment, offset, bits, instruction, temp))
2146 m = MOK_GOOD;
2147 else
2148 m = MERR_INVALOP;
2150 if (m > merr)
2151 merr = m;
2152 if (merr == MOK_GOOD)
2153 goto done;
2156 done:
2157 *tempp = temp;
2158 return merr;
2161 static uint8_t get_broadcast_num(opflags_t opflags, opflags_t brsize)
2163 unsigned int opsize = (opflags & SIZE_MASK) >> SIZE_SHIFT;
2164 uint8_t brcast_num;
2166 if (brsize > BITS64)
2167 nasm_error(ERR_FATAL,
2168 "size of broadcasting element is greater than 64 bits");
2171 * The shift term is to take care of the extra BITS80 inserted
2172 * between BITS64 and BITS128.
2174 brcast_num = ((opsize / (BITS64 >> SIZE_SHIFT)) * (BITS64 / brsize))
2175 >> (opsize > (BITS64 >> SIZE_SHIFT));
2177 return brcast_num;
2180 static enum match_result matches(const struct itemplate *itemp,
2181 insn *instruction, int bits)
2183 opflags_t size[MAX_OPERANDS], asize;
2184 bool opsizemissing = false;
2185 int i, oprs;
2188 * Check the opcode
2190 if (itemp->opcode != instruction->opcode)
2191 return MERR_INVALOP;
2194 * Count the operands
2196 if (itemp->operands != instruction->operands)
2197 return MERR_INVALOP;
2200 * Is it legal?
2202 if (!(optimizing.level > 0) && itemp_has(itemp, IF_OPT))
2203 return MERR_INVALOP;
2206 * {evex} available?
2208 switch (instruction->prefixes[PPS_VEX]) {
2209 case P_EVEX:
2210 if (!itemp_has(itemp, IF_EVEX))
2211 return MERR_ENCMISMATCH;
2212 break;
2213 case P_VEX3:
2214 case P_VEX2:
2215 if (!itemp_has(itemp, IF_VEX))
2216 return MERR_ENCMISMATCH;
2217 break;
2218 default:
2219 break;
2223 * Check that no spurious colons or TOs are present
2225 for (i = 0; i < itemp->operands; i++)
2226 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
2227 return MERR_INVALOP;
2230 * Process size flags
2232 switch (itemp_smask(itemp)) {
2233 case IF_GENBIT(IF_SB):
2234 asize = BITS8;
2235 break;
2236 case IF_GENBIT(IF_SW):
2237 asize = BITS16;
2238 break;
2239 case IF_GENBIT(IF_SD):
2240 asize = BITS32;
2241 break;
2242 case IF_GENBIT(IF_SQ):
2243 asize = BITS64;
2244 break;
2245 case IF_GENBIT(IF_SO):
2246 asize = BITS128;
2247 break;
2248 case IF_GENBIT(IF_SY):
2249 asize = BITS256;
2250 break;
2251 case IF_GENBIT(IF_SZ):
2252 asize = BITS512;
2253 break;
2254 case IF_GENBIT(IF_SIZE):
2255 switch (bits) {
2256 case 16:
2257 asize = BITS16;
2258 break;
2259 case 32:
2260 asize = BITS32;
2261 break;
2262 case 64:
2263 asize = BITS64;
2264 break;
2265 default:
2266 asize = 0;
2267 break;
2269 break;
2270 default:
2271 asize = 0;
2272 break;
2275 if (itemp_armask(itemp)) {
2276 /* S- flags only apply to a specific operand */
2277 i = itemp_arg(itemp);
2278 memset(size, 0, sizeof size);
2279 size[i] = asize;
2280 } else {
2281 /* S- flags apply to all operands */
2282 for (i = 0; i < MAX_OPERANDS; i++)
2283 size[i] = asize;
2287 * Check that the operand flags all match up,
2288 * it's a bit tricky so lets be verbose:
2290 * 1) Find out the size of operand. If instruction
2291 * doesn't have one specified -- we're trying to
2292 * guess it either from template (IF_S* flag) or
2293 * from code bits.
2295 * 2) If template operand do not match the instruction OR
2296 * template has an operand size specified AND this size differ
2297 * from which instruction has (perhaps we got it from code bits)
2298 * we are:
2299 * a) Check that only size of instruction and operand is differ
2300 * other characteristics do match
2301 * b) Perhaps it's a register specified in instruction so
2302 * for such a case we just mark that operand as "size
2303 * missing" and this will turn on fuzzy operand size
2304 * logic facility (handled by a caller)
2306 for (i = 0; i < itemp->operands; i++) {
2307 opflags_t type = instruction->oprs[i].type;
2308 decoflags_t deco = instruction->oprs[i].decoflags;
2309 decoflags_t ideco = itemp->deco[i];
2310 bool is_broadcast = deco & BRDCAST_MASK;
2311 uint8_t brcast_num = 0;
2312 opflags_t template_opsize, insn_opsize;
2314 if (!(type & SIZE_MASK))
2315 type |= size[i];
2317 insn_opsize = type & SIZE_MASK;
2318 if (!is_broadcast) {
2319 template_opsize = itemp->opd[i] & SIZE_MASK;
2320 } else {
2321 decoflags_t deco_brsize = ideco & BRSIZE_MASK;
2323 if (~ideco & BRDCAST_MASK)
2324 return MERR_BRNOTHERE;
2327 * when broadcasting, the element size depends on
2328 * the instruction type. decorator flag should match.
2330 if (deco_brsize) {
2331 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
2332 /* calculate the proper number : {1to<brcast_num>} */
2333 brcast_num = get_broadcast_num(itemp->opd[i], template_opsize);
2334 } else {
2335 template_opsize = 0;
2339 if (~ideco & deco & OPMASK_MASK)
2340 return MERR_MASKNOTHERE;
2342 if (~ideco & deco & (Z_MASK|STATICRND_MASK|SAE_MASK))
2343 return MERR_DECONOTHERE;
2345 if (itemp->opd[i] & ~type & ~(SIZE_MASK|REGSET_MASK))
2346 return MERR_INVALOP;
2348 if (~itemp->opd[i] & type & REGSET_MASK)
2349 return (itemp->opd[i] & REGSET_MASK)
2350 ? MERR_REGSETSIZE : MERR_REGSET;
2352 if (template_opsize) {
2353 if (template_opsize != insn_opsize) {
2354 if (insn_opsize) {
2355 return MERR_INVALOP;
2356 } else if (!is_class(REGISTER, type)) {
2358 * Note: we don't honor extrinsic operand sizes for registers,
2359 * so "missing operand size" for a register should be
2360 * considered a wildcard match rather than an error.
2362 opsizemissing = true;
2364 } else if (is_broadcast &&
2365 (brcast_num !=
2366 (2U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
2368 * broadcasting opsize matches but the number of repeated memory
2369 * element does not match.
2370 * if 64b double precision float is broadcasted to ymm (256b),
2371 * broadcasting decorator must be {1to4}.
2373 return MERR_BRNUMMISMATCH;
2378 if (opsizemissing)
2379 return MERR_OPSIZEMISSING;
2382 * Check operand sizes
2384 if (itemp_has(itemp, IF_SM) || itemp_has(itemp, IF_SM2)) {
2385 oprs = (itemp_has(itemp, IF_SM2) ? 2 : itemp->operands);
2386 for (i = 0; i < oprs; i++) {
2387 asize = itemp->opd[i] & SIZE_MASK;
2388 if (asize) {
2389 for (i = 0; i < oprs; i++)
2390 size[i] = asize;
2391 break;
2394 } else {
2395 oprs = itemp->operands;
2398 for (i = 0; i < itemp->operands; i++) {
2399 if (!(itemp->opd[i] & SIZE_MASK) &&
2400 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
2401 return MERR_OPSIZEMISMATCH;
2405 * Check template is okay at the set cpu level
2407 if (iflag_cmp_cpu_level(&insns_flags[itemp->iflag_idx], &cpu) > 0)
2408 return MERR_BADCPU;
2411 * Verify the appropriate long mode flag.
2413 if (itemp_has(itemp, (bits == 64 ? IF_NOLONG : IF_LONG)))
2414 return MERR_BADMODE;
2417 * If we have a HLE prefix, look for the NOHLE flag
2419 if (itemp_has(itemp, IF_NOHLE) &&
2420 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2421 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2422 return MERR_BADHLE;
2425 * Check if special handling needed for Jumps
2427 if ((itemp->code[0] & ~1) == 0370)
2428 return MOK_JUMP;
2431 * Check if BND prefix is allowed.
2432 * Other 0xF2 (REPNE/REPNZ) prefix is prohibited.
2434 if (!itemp_has(itemp, IF_BND) &&
2435 (has_prefix(instruction, PPS_REP, P_BND) ||
2436 has_prefix(instruction, PPS_REP, P_NOBND)))
2437 return MERR_BADBND;
2438 else if (itemp_has(itemp, IF_BND) &&
2439 (has_prefix(instruction, PPS_REP, P_REPNE) ||
2440 has_prefix(instruction, PPS_REP, P_REPNZ)))
2441 return MERR_BADREPNE;
2443 return MOK_GOOD;
2447 * Check if ModR/M.mod should/can be 01.
2448 * - EAF_BYTEOFFS is set
2449 * - offset can fit in a byte when EVEX is not used
2450 * - offset can be compressed when EVEX is used
2452 #define IS_MOD_01() (!(input->eaflags & EAF_WORDOFFS) && \
2453 (ins->rex & REX_EV ? seg == NO_SEG && !forw_ref && \
2454 is_disp8n(input, ins, &output->disp8) : \
2455 input->eaflags & EAF_BYTEOFFS || (o >= -128 && \
2456 o <= 127 && seg == NO_SEG && !forw_ref)))
2458 static enum ea_type process_ea(operand *input, ea *output, int bits,
2459 int rfield, opflags_t rflags, insn *ins,
2460 const char **errmsg)
2462 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
2463 int addrbits = ins->addr_size;
2464 int eaflags = input->eaflags;
2466 *errmsg = "invalid effective address"; /* Default error message */
2468 output->type = EA_SCALAR;
2469 output->rip = false;
2470 output->disp8 = 0;
2472 /* REX flags for the rfield operand */
2473 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
2474 /* EVEX.R' flag for the REG operand */
2475 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
2477 if (is_class(REGISTER, input->type)) {
2479 * It's a direct register.
2481 if (!is_register(input->basereg))
2482 goto err;
2484 if (!is_reg_class(REG_EA, input->basereg))
2485 goto err;
2487 /* broadcasting is not available with a direct register operand. */
2488 if (input->decoflags & BRDCAST_MASK) {
2489 *errmsg = "broadcast not allowed with register operand";
2490 goto err;
2493 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
2494 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
2495 output->sib_present = false; /* no SIB necessary */
2496 output->bytes = 0; /* no offset necessary either */
2497 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2498 } else {
2500 * It's a memory reference.
2503 /* Embedded rounding or SAE is not available with a mem ref operand. */
2504 if (input->decoflags & (ER | SAE)) {
2505 *errmsg = "embedded rounding is available only with "
2506 "register-register operations";
2507 goto err;
2510 if (input->basereg == -1 &&
2511 (input->indexreg == -1 || input->scale == 0)) {
2513 * It's a pure offset.
2515 if (bits == 64 && ((input->type & IP_REL) == IP_REL)) {
2516 if (input->segment == NO_SEG ||
2517 (input->opflags & OPFLAG_RELATIVE)) {
2518 nasm_error(ERR_WARNING | ERR_PASS2,
2519 "absolute address can not be RIP-relative");
2520 input->type &= ~IP_REL;
2521 input->type |= MEMORY;
2525 if (bits == 64 &&
2526 !(IP_REL & ~input->type) && (eaflags & EAF_MIB)) {
2527 *errmsg = "RIP-relative addressing is prohibited for MIB";
2528 goto err;
2531 if (eaflags & EAF_BYTEOFFS ||
2532 (eaflags & EAF_WORDOFFS &&
2533 input->disp_size != (addrbits != 16 ? 32 : 16))) {
2534 nasm_error(ERR_WARNING | ERR_PASS1,
2535 "displacement size ignored on absolute address");
2538 if (bits == 64 && (~input->type & IP_REL)) {
2539 output->sib_present = true;
2540 output->sib = GEN_SIB(0, 4, 5);
2541 output->bytes = 4;
2542 output->modrm = GEN_MODRM(0, rfield, 4);
2543 output->rip = false;
2544 } else {
2545 output->sib_present = false;
2546 output->bytes = (addrbits != 16 ? 4 : 2);
2547 output->modrm = GEN_MODRM(0, rfield,
2548 (addrbits != 16 ? 5 : 6));
2549 output->rip = bits == 64;
2551 } else {
2553 * It's an indirection.
2555 int i = input->indexreg, b = input->basereg, s = input->scale;
2556 int32_t seg = input->segment;
2557 int hb = input->hintbase, ht = input->hinttype;
2558 int t, it, bt; /* register numbers */
2559 opflags_t x, ix, bx; /* register flags */
2561 if (s == 0)
2562 i = -1; /* make this easy, at least */
2564 if (is_register(i)) {
2565 it = nasm_regvals[i];
2566 ix = nasm_reg_flags[i];
2567 } else {
2568 it = -1;
2569 ix = 0;
2572 if (is_register(b)) {
2573 bt = nasm_regvals[b];
2574 bx = nasm_reg_flags[b];
2575 } else {
2576 bt = -1;
2577 bx = 0;
2580 /* if either one are a vector register... */
2581 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
2582 opflags_t sok = BITS32 | BITS64;
2583 int32_t o = input->offset;
2584 int mod, scale, index, base;
2587 * For a vector SIB, one has to be a vector and the other,
2588 * if present, a GPR. The vector must be the index operand.
2590 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
2591 if (s == 0)
2592 s = 1;
2593 else if (s != 1)
2594 goto err;
2596 t = bt, bt = it, it = t;
2597 x = bx, bx = ix, ix = x;
2600 if (bt != -1) {
2601 if (REG_GPR & ~bx)
2602 goto err;
2603 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2604 sok &= bx;
2605 else
2606 goto err;
2610 * While we're here, ensure the user didn't specify
2611 * WORD or QWORD
2613 if (input->disp_size == 16 || input->disp_size == 64)
2614 goto err;
2616 if (addrbits == 16 ||
2617 (addrbits == 32 && !(sok & BITS32)) ||
2618 (addrbits == 64 && !(sok & BITS64)))
2619 goto err;
2621 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2622 : ((ix & YMMREG & ~REG_EA)
2623 ? EA_YMMVSIB : EA_XMMVSIB));
2625 output->rex |= rexflags(it, ix, REX_X);
2626 output->rex |= rexflags(bt, bx, REX_B);
2627 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
2629 index = it & 7; /* it is known to be != -1 */
2631 switch (s) {
2632 case 1:
2633 scale = 0;
2634 break;
2635 case 2:
2636 scale = 1;
2637 break;
2638 case 4:
2639 scale = 2;
2640 break;
2641 case 8:
2642 scale = 3;
2643 break;
2644 default: /* then what the smeg is it? */
2645 goto err; /* panic */
2648 if (bt == -1) {
2649 base = 5;
2650 mod = 0;
2651 } else {
2652 base = (bt & 7);
2653 if (base != REG_NUM_EBP && o == 0 &&
2654 seg == NO_SEG && !forw_ref &&
2655 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2656 mod = 0;
2657 else if (IS_MOD_01())
2658 mod = 1;
2659 else
2660 mod = 2;
2663 output->sib_present = true;
2664 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2665 output->modrm = GEN_MODRM(mod, rfield, 4);
2666 output->sib = GEN_SIB(scale, index, base);
2667 } else if ((ix|bx) & (BITS32|BITS64)) {
2669 * it must be a 32/64-bit memory reference. Firstly we have
2670 * to check that all registers involved are type E/Rxx.
2672 opflags_t sok = BITS32 | BITS64;
2673 int32_t o = input->offset;
2675 if (it != -1) {
2676 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2677 sok &= ix;
2678 else
2679 goto err;
2682 if (bt != -1) {
2683 if (REG_GPR & ~bx)
2684 goto err; /* Invalid register */
2685 if (~sok & bx & SIZE_MASK)
2686 goto err; /* Invalid size */
2687 sok &= bx;
2691 * While we're here, ensure the user didn't specify
2692 * WORD or QWORD
2694 if (input->disp_size == 16 || input->disp_size == 64)
2695 goto err;
2697 if (addrbits == 16 ||
2698 (addrbits == 32 && !(sok & BITS32)) ||
2699 (addrbits == 64 && !(sok & BITS64)))
2700 goto err;
2702 /* now reorganize base/index */
2703 if (s == 1 && bt != it && bt != -1 && it != -1 &&
2704 ((hb == b && ht == EAH_NOTBASE) ||
2705 (hb == i && ht == EAH_MAKEBASE))) {
2706 /* swap if hints say so */
2707 t = bt, bt = it, it = t;
2708 x = bx, bx = ix, ix = x;
2711 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
2712 /* make single reg base, unless hint */
2713 bt = it, bx = ix, it = -1, ix = 0;
2715 if (eaflags & EAF_MIB) {
2716 /* only for mib operands */
2717 if (it == -1 && (hb == b && ht == EAH_NOTBASE)) {
2719 * make a single reg index [reg*1].
2720 * gas uses this form for an explicit index register.
2722 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2724 if ((ht == EAH_SUMMED) && bt == -1) {
2725 /* separate once summed index into [base, index] */
2726 bt = it, bx = ix, s--;
2728 } else {
2729 if (((s == 2 && it != REG_NUM_ESP &&
2730 (!(eaflags & EAF_TIMESTWO) || (ht == EAH_SUMMED))) ||
2731 s == 3 || s == 5 || s == 9) && bt == -1) {
2732 /* convert 3*EAX to EAX+2*EAX */
2733 bt = it, bx = ix, s--;
2735 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
2736 (eaflags & EAF_TIMESTWO) &&
2737 (hb == b && ht == EAH_NOTBASE)) {
2739 * convert [NOSPLIT EAX*1]
2740 * to sib format with 0x0 displacement - [EAX*1+0].
2742 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2745 if (s == 1 && it == REG_NUM_ESP) {
2746 /* swap ESP into base if scale is 1 */
2747 t = it, it = bt, bt = t;
2748 x = ix, ix = bx, bx = x;
2750 if (it == REG_NUM_ESP ||
2751 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
2752 goto err; /* wrong, for various reasons */
2754 output->rex |= rexflags(it, ix, REX_X);
2755 output->rex |= rexflags(bt, bx, REX_B);
2757 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
2758 /* no SIB needed */
2759 int mod, rm;
2761 if (bt == -1) {
2762 rm = 5;
2763 mod = 0;
2764 } else {
2765 rm = (bt & 7);
2766 if (rm != REG_NUM_EBP && o == 0 &&
2767 seg == NO_SEG && !forw_ref &&
2768 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2769 mod = 0;
2770 else if (IS_MOD_01())
2771 mod = 1;
2772 else
2773 mod = 2;
2776 output->sib_present = false;
2777 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2778 output->modrm = GEN_MODRM(mod, rfield, rm);
2779 } else {
2780 /* we need a SIB */
2781 int mod, scale, index, base;
2783 if (it == -1)
2784 index = 4, s = 1;
2785 else
2786 index = (it & 7);
2788 switch (s) {
2789 case 1:
2790 scale = 0;
2791 break;
2792 case 2:
2793 scale = 1;
2794 break;
2795 case 4:
2796 scale = 2;
2797 break;
2798 case 8:
2799 scale = 3;
2800 break;
2801 default: /* then what the smeg is it? */
2802 goto err; /* panic */
2805 if (bt == -1) {
2806 base = 5;
2807 mod = 0;
2808 } else {
2809 base = (bt & 7);
2810 if (base != REG_NUM_EBP && o == 0 &&
2811 seg == NO_SEG && !forw_ref &&
2812 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2813 mod = 0;
2814 else if (IS_MOD_01())
2815 mod = 1;
2816 else
2817 mod = 2;
2820 output->sib_present = true;
2821 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2822 output->modrm = GEN_MODRM(mod, rfield, 4);
2823 output->sib = GEN_SIB(scale, index, base);
2825 } else { /* it's 16-bit */
2826 int mod, rm;
2827 int16_t o = input->offset;
2829 /* check for 64-bit long mode */
2830 if (addrbits == 64)
2831 goto err;
2833 /* check all registers are BX, BP, SI or DI */
2834 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
2835 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
2836 goto err;
2838 /* ensure the user didn't specify DWORD/QWORD */
2839 if (input->disp_size == 32 || input->disp_size == 64)
2840 goto err;
2842 if (s != 1 && i != -1)
2843 goto err; /* no can do, in 16-bit EA */
2844 if (b == -1 && i != -1) {
2845 int tmp = b;
2846 b = i;
2847 i = tmp;
2848 } /* swap */
2849 if ((b == R_SI || b == R_DI) && i != -1) {
2850 int tmp = b;
2851 b = i;
2852 i = tmp;
2854 /* have BX/BP as base, SI/DI index */
2855 if (b == i)
2856 goto err; /* shouldn't ever happen, in theory */
2857 if (i != -1 && b != -1 &&
2858 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
2859 goto err; /* invalid combinations */
2860 if (b == -1) /* pure offset: handled above */
2861 goto err; /* so if it gets to here, panic! */
2863 rm = -1;
2864 if (i != -1)
2865 switch (i * 256 + b) {
2866 case R_SI * 256 + R_BX:
2867 rm = 0;
2868 break;
2869 case R_DI * 256 + R_BX:
2870 rm = 1;
2871 break;
2872 case R_SI * 256 + R_BP:
2873 rm = 2;
2874 break;
2875 case R_DI * 256 + R_BP:
2876 rm = 3;
2877 break;
2878 } else
2879 switch (b) {
2880 case R_SI:
2881 rm = 4;
2882 break;
2883 case R_DI:
2884 rm = 5;
2885 break;
2886 case R_BP:
2887 rm = 6;
2888 break;
2889 case R_BX:
2890 rm = 7;
2891 break;
2893 if (rm == -1) /* can't happen, in theory */
2894 goto err; /* so panic if it does */
2896 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
2897 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2898 mod = 0;
2899 else if (IS_MOD_01())
2900 mod = 1;
2901 else
2902 mod = 2;
2904 output->sib_present = false; /* no SIB - it's 16-bit */
2905 output->bytes = mod; /* bytes of offset needed */
2906 output->modrm = GEN_MODRM(mod, rfield, rm);
2911 output->size = 1 + output->sib_present + output->bytes;
2912 return output->type;
2914 err:
2915 return output->type = EA_INVALID;
2918 static void add_asp(insn *ins, int addrbits)
2920 int j, valid;
2921 int defdisp;
2923 valid = (addrbits == 64) ? 64|32 : 32|16;
2925 switch (ins->prefixes[PPS_ASIZE]) {
2926 case P_A16:
2927 valid &= 16;
2928 break;
2929 case P_A32:
2930 valid &= 32;
2931 break;
2932 case P_A64:
2933 valid &= 64;
2934 break;
2935 case P_ASP:
2936 valid &= (addrbits == 32) ? 16 : 32;
2937 break;
2938 default:
2939 break;
2942 for (j = 0; j < ins->operands; j++) {
2943 if (is_class(MEMORY, ins->oprs[j].type)) {
2944 opflags_t i, b;
2946 /* Verify as Register */
2947 if (!is_register(ins->oprs[j].indexreg))
2948 i = 0;
2949 else
2950 i = nasm_reg_flags[ins->oprs[j].indexreg];
2952 /* Verify as Register */
2953 if (!is_register(ins->oprs[j].basereg))
2954 b = 0;
2955 else
2956 b = nasm_reg_flags[ins->oprs[j].basereg];
2958 if (ins->oprs[j].scale == 0)
2959 i = 0;
2961 if (!i && !b) {
2962 int ds = ins->oprs[j].disp_size;
2963 if ((addrbits != 64 && ds > 8) ||
2964 (addrbits == 64 && ds == 16))
2965 valid &= ds;
2966 } else {
2967 if (!(REG16 & ~b))
2968 valid &= 16;
2969 if (!(REG32 & ~b))
2970 valid &= 32;
2971 if (!(REG64 & ~b))
2972 valid &= 64;
2974 if (!(REG16 & ~i))
2975 valid &= 16;
2976 if (!(REG32 & ~i))
2977 valid &= 32;
2978 if (!(REG64 & ~i))
2979 valid &= 64;
2984 if (valid & addrbits) {
2985 ins->addr_size = addrbits;
2986 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
2987 /* Add an address size prefix */
2988 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
2989 ins->addr_size = (addrbits == 32) ? 16 : 32;
2990 } else {
2991 /* Impossible... */
2992 nasm_error(ERR_NONFATAL, "impossible combination of address sizes");
2993 ins->addr_size = addrbits; /* Error recovery */
2996 defdisp = ins->addr_size == 16 ? 16 : 32;
2998 for (j = 0; j < ins->operands; j++) {
2999 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
3000 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
3002 * mem_offs sizes must match the address size; if not,
3003 * strip the MEM_OFFS bit and match only EA instructions
3005 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);