Merge remote-tracking branch 'origin/nasm-2.13.xx'
[nasm.git] / asm / assemble.c
blob6d3e25eefaa277d40dc61f1a7d52043d47f238ce
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2017 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * assemble.c code generation for the Netwide Assembler
37 * Bytecode specification
38 * ----------------------
41 * Codes Mnemonic Explanation
43 * \0 terminates the code. (Unless it's a literal of course.)
44 * \1..\4 that many literal bytes follow in the code stream
45 * \5 add 4 to the primary operand number (b, low octdigit)
46 * \6 add 4 to the secondary operand number (a, middle octdigit)
47 * \7 add 4 to both the primary and the secondary operand number
48 * \10..\13 a literal byte follows in the code stream, to be added
49 * to the register value of operand 0..3
50 * \14..\17 the position of index register operand in MIB (BND insns)
51 * \20..\23 ib a byte immediate operand, from operand 0..3
52 * \24..\27 ib,u a zero-extended byte immediate operand, from operand 0..3
53 * \30..\33 iw a word immediate operand, from operand 0..3
54 * \34..\37 iwd select between \3[0-3] and \4[0-3] depending on 16/32 bit
55 * assembly mode or the operand-size override on the operand
56 * \40..\43 id a long immediate operand, from operand 0..3
57 * \44..\47 iwdq select between \3[0-3], \4[0-3] and \5[4-7]
58 * depending on the address size of the instruction.
59 * \50..\53 rel8 a byte relative operand, from operand 0..3
60 * \54..\57 iq a qword immediate operand, from operand 0..3
61 * \60..\63 rel16 a word relative operand, from operand 0..3
62 * \64..\67 rel select between \6[0-3] and \7[0-3] depending on 16/32 bit
63 * assembly mode or the operand-size override on the operand
64 * \70..\73 rel32 a long relative operand, from operand 0..3
65 * \74..\77 seg a word constant, from the _segment_ part of operand 0..3
66 * \1ab a ModRM, calculated on EA in operand a, with the spare
67 * field the register value of operand b.
68 * \172\ab the register number from operand a in bits 7..4, with
69 * the 4-bit immediate from operand b in bits 3..0.
70 * \173\xab the register number from operand a in bits 7..4, with
71 * the value b in bits 3..0.
72 * \174..\177 the register number from operand 0..3 in bits 7..4, and
73 * an arbitrary value in bits 3..0 (assembled as zero.)
74 * \2ab a ModRM, calculated on EA in operand a, with the spare
75 * field equal to digit b.
77 * \240..\243 this instruction uses EVEX rather than REX or VEX/XOP, with the
78 * V field taken from operand 0..3.
79 * \250 this instruction uses EVEX rather than REX or VEX/XOP, with the
80 * V field set to 1111b.
82 * EVEX prefixes are followed by the sequence:
83 * \cm\wlp\tup where cm is:
84 * cc 00m mmm
85 * c = 2 for EVEX and mmmm is the M field (EVEX.P0[3:0])
86 * and wlp is:
87 * 00 wwl lpp
88 * [l0] ll = 0 (.128, .lz)
89 * [l1] ll = 1 (.256)
90 * [l2] ll = 2 (.512)
91 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
93 * [w0] ww = 0 for W = 0
94 * [w1] ww = 1 for W = 1
95 * [wig] ww = 2 for W don't care (always assembled as 0)
96 * [ww] ww = 3 for W used as REX.W
98 * [p0] pp = 0 for no prefix
99 * [60] pp = 1 for legacy prefix 60
100 * [f3] pp = 2
101 * [f2] pp = 3
103 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
104 * (compressed displacement encoding)
106 * \254..\257 id,s a signed 32-bit operand to be extended to 64 bits.
107 * \260..\263 this instruction uses VEX/XOP rather than REX, with the
108 * V field taken from operand 0..3.
109 * \270 this instruction uses VEX/XOP rather than REX, with the
110 * V field set to 1111b.
112 * VEX/XOP prefixes are followed by the sequence:
113 * \tmm\wlp where mm is the M field; and wlp is:
114 * 00 wwl lpp
115 * [l0] ll = 0 for L = 0 (.128, .lz)
116 * [l1] ll = 1 for L = 1 (.256)
117 * [lig] ll = 2 for L don't care (always assembled as 0)
119 * [w0] ww = 0 for W = 0
120 * [w1 ] ww = 1 for W = 1
121 * [wig] ww = 2 for W don't care (always assembled as 0)
122 * [ww] ww = 3 for W used as REX.W
124 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
126 * \271 hlexr instruction takes XRELEASE (F3) with or without lock
127 * \272 hlenl instruction takes XACQUIRE/XRELEASE with or without lock
128 * \273 hle instruction takes XACQUIRE/XRELEASE with lock only
129 * \274..\277 ib,s a byte immediate operand, from operand 0..3, sign-extended
130 * to the operand size (if o16/o32/o64 present) or the bit size
131 * \310 a16 indicates fixed 16-bit address size, i.e. optional 0x67.
132 * \311 a32 indicates fixed 32-bit address size, i.e. optional 0x67.
133 * \312 adf (disassembler only) invalid with non-default address size.
134 * \313 a64 indicates fixed 64-bit address size, 0x67 invalid.
135 * \314 norexb (disassembler only) invalid with REX.B
136 * \315 norexx (disassembler only) invalid with REX.X
137 * \316 norexr (disassembler only) invalid with REX.R
138 * \317 norexw (disassembler only) invalid with REX.W
139 * \320 o16 indicates fixed 16-bit operand size, i.e. optional 0x66.
140 * \321 o32 indicates fixed 32-bit operand size, i.e. optional 0x66.
141 * \322 odf indicates that this instruction is only valid when the
142 * operand size is the default (instruction to disassembler,
143 * generates no code in the assembler)
144 * \323 o64nw indicates fixed 64-bit operand size, REX on extensions only.
145 * \324 o64 indicates 64-bit operand size requiring REX prefix.
146 * \325 nohi instruction which always uses spl/bpl/sil/dil
147 * \326 nof3 instruction not valid with 0xF3 REP prefix. Hint for
148 disassembler only; for SSE instructions.
149 * \330 a literal byte follows in the code stream, to be added
150 * to the condition code value of the instruction.
151 * \331 norep instruction not valid with REP prefix. Hint for
152 * disassembler only; for SSE instructions.
153 * \332 f2i REP prefix (0xF2 byte) used as opcode extension.
154 * \333 f3i REP prefix (0xF3 byte) used as opcode extension.
155 * \334 rex.l LOCK prefix used as REX.R (used in non-64-bit mode)
156 * \335 repe disassemble a rep (0xF3 byte) prefix as repe not rep.
157 * \336 mustrep force a REP(E) prefix (0xF3) even if not specified.
158 * \337 mustrepne force a REPNE prefix (0xF2) even if not specified.
159 * \336-\337 are still listed as prefixes in the disassembler.
160 * \340 resb reserve <operand 0> bytes of uninitialized storage.
161 * Operand 0 had better be a segmentless constant.
162 * \341 wait this instruction needs a WAIT "prefix"
163 * \360 np no SSE prefix (== \364\331)
164 * \361 66 SSE prefix (== \366\331)
165 * \364 !osp operand-size prefix (0x66) not permitted
166 * \365 !asp address-size prefix (0x67) not permitted
167 * \366 operand-size prefix (0x66) used as opcode extension
168 * \367 address-size prefix (0x67) used as opcode extension
169 * \370,\371 jcc8 match only if operand 0 meets byte jump criteria.
170 * jmp8 370 is used for Jcc, 371 is used for JMP.
171 * \373 jlen assemble 0x03 if bits==16, 0x05 if bits==32;
172 * used for conditional jump over longer jump
173 * \374 vsibx|vm32x|vm64x this instruction takes an XMM VSIB memory EA
174 * \375 vsiby|vm32y|vm64y this instruction takes an YMM VSIB memory EA
175 * \376 vsibz|vm32z|vm64z this instruction takes an ZMM VSIB memory EA
178 #include "compiler.h"
180 #include <stdio.h>
181 #include <string.h>
182 #include <stdlib.h>
184 #include "nasm.h"
185 #include "nasmlib.h"
186 #include "error.h"
187 #include "assemble.h"
188 #include "insns.h"
189 #include "tables.h"
190 #include "disp8.h"
191 #include "listing.h"
193 enum match_result {
195 * Matching errors. These should be sorted so that more specific
196 * errors come later in the sequence.
198 MERR_INVALOP,
199 MERR_OPSIZEMISSING,
200 MERR_OPSIZEMISMATCH,
201 MERR_BRNOTHERE,
202 MERR_BRNUMMISMATCH,
203 MERR_MASKNOTHERE,
204 MERR_DECONOTHERE,
205 MERR_BADCPU,
206 MERR_BADMODE,
207 MERR_BADHLE,
208 MERR_ENCMISMATCH,
209 MERR_BADBND,
210 MERR_BADREPNE,
212 * Matching success; the conditional ones first
214 MOK_JUMP, /* Matching OK but needs jmp_match() */
215 MOK_GOOD /* Matching unconditionally OK */
218 typedef struct {
219 enum ea_type type; /* what kind of EA is this? */
220 int sib_present; /* is a SIB byte necessary? */
221 int bytes; /* # of bytes of offset needed */
222 int size; /* lazy - this is sib+bytes+1 */
223 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
224 int8_t disp8; /* compressed displacement for EVEX */
225 } ea;
227 #define GEN_SIB(scale, index, base) \
228 (((scale) << 6) | ((index) << 3) | ((base)))
230 #define GEN_MODRM(mod, reg, rm) \
231 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
233 static int64_t calcsize(int32_t, int64_t, int, insn *,
234 const struct itemplate *);
235 static int emit_prefix(struct out_data *data, const int bits, insn *ins);
236 static void gencode(struct out_data *data, insn *ins);
237 static enum match_result find_match(const struct itemplate **tempp,
238 insn *instruction,
239 int32_t segment, int64_t offset, int bits);
240 static enum match_result matches(const struct itemplate *, insn *, int bits);
241 static opflags_t regflag(const operand *);
242 static int32_t regval(const operand *);
243 static int rexflags(int, opflags_t, int);
244 static int op_rexflags(const operand *, int);
245 static int op_evexflags(const operand *, int, uint8_t);
246 static void add_asp(insn *, int);
248 static enum ea_type process_ea(operand *, ea *, int, int,
249 opflags_t, insn *, const char **);
251 static inline bool absolute_op(const struct operand *o)
253 return o->segment == NO_SEG && o->wrt == NO_SEG &&
254 !(o->opflags & OPFLAG_RELATIVE);
257 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
259 return ins->prefixes[pos] == prefix;
262 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
264 if (ins->prefixes[pos])
265 nasm_error(ERR_NONFATAL, "invalid %s prefix",
266 prefix_name(ins->prefixes[pos]));
269 static const char *size_name(int size)
271 switch (size) {
272 case 1:
273 return "byte";
274 case 2:
275 return "word";
276 case 4:
277 return "dword";
278 case 8:
279 return "qword";
280 case 10:
281 return "tword";
282 case 16:
283 return "oword";
284 case 32:
285 return "yword";
286 case 64:
287 return "zword";
288 default:
289 return "???";
293 static void warn_overflow(int size)
295 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
296 "%s data exceeds bounds", size_name(size));
299 static void warn_overflow_const(int64_t data, int size)
301 if (overflow_general(data, size))
302 warn_overflow(size);
305 static void warn_overflow_opd(const struct operand *o, int size)
307 if (absolute_op(o)) {
308 if (overflow_general(o->offset, size))
309 warn_overflow(size);
313 static void warn_overflow_out(int64_t data, int size, enum out_sign sign)
315 bool err;
317 switch (sign) {
318 case OUT_WRAP:
319 err = overflow_general(data, size);
320 break;
321 case OUT_SIGNED:
322 err = overflow_signed(data, size);
323 break;
324 case OUT_UNSIGNED:
325 err = overflow_unsigned(data, size);
326 break;
327 default:
328 panic();
329 break;
332 if (err)
333 warn_overflow(size);
337 * This routine wrappers the real output format's output routine,
338 * in order to pass a copy of the data off to the listing file
339 * generator at the same time, flatten unnecessary relocations,
340 * and verify backend compatibility.
342 static void out(struct out_data *data)
344 static int32_t lineno = 0; /* static!!! */
345 static const char *lnfname = NULL;
346 union {
347 uint8_t b[8];
348 uint64_t q;
349 } xdata;
350 size_t asize, amax;
351 uint64_t zeropad = 0;
352 int64_t addrval;
353 int32_t fixseg; /* Segment for which to produce fixed data */
355 if (!data->size)
356 return; /* Nothing to do */
359 * Convert addresses to RAWDATA if possible
360 * XXX: not all backends want this for global symbols!!!!
362 switch (data->type) {
363 case OUT_ADDRESS:
364 addrval = data->toffset;
365 fixseg = NO_SEG; /* Absolute address is fixed data */
366 goto address;
368 case OUT_RELADDR:
369 addrval = data->toffset - data->relbase;
370 fixseg = data->segment; /* Our own segment is fixed data */
371 goto address;
373 address:
374 nasm_assert(data->size <= 8);
375 asize = data->size;
376 amax = ofmt->maxbits >> 3; /* Maximum address size in bytes */
377 if (data->tsegment == fixseg && data->twrt == NO_SEG) {
378 warn_overflow_out(addrval, asize, data->sign);
379 xdata.q = cpu_to_le64(addrval);
380 data->data = xdata.b;
381 data->type = OUT_RAWDATA;
382 asize = amax = 0; /* No longer an address */
384 break;
386 case OUT_SEGMENT:
387 nasm_assert(data->size <= 8);
388 asize = data->size;
389 amax = 2;
390 break;
392 default:
393 asize = amax = 0; /* Not an address */
394 break;
398 * this call to src_get determines when we call the
399 * debug-format-specific "linenum" function
400 * it updates lineno and lnfname to the current values
401 * returning 0 if "same as last time", -2 if lnfname
402 * changed, and the amount by which lineno changed,
403 * if it did. thus, these variables must be static
406 if (src_get(&lineno, &lnfname))
407 dfmt->linenum(lnfname, lineno, data->segment);
409 if (asize > amax) {
410 if (data->type == OUT_RELADDR || data->sign == OUT_SIGNED) {
411 nasm_error(ERR_NONFATAL,
412 "%u-bit signed relocation unsupported by output format %s",
413 (unsigned int)(asize << 3), ofmt->shortname);
414 } else {
415 nasm_error(ERR_WARNING | ERR_WARN_ZEXTRELOC,
416 "%u-bit %s relocation zero-extended from %u bits",
417 (unsigned int)(asize << 3),
418 data->type == OUT_SEGMENT ? "segment" : "unsigned",
419 (unsigned int)(amax << 3));
421 zeropad = data->size - amax;
422 data->size = amax;
424 lfmt->output(data);
425 ofmt->output(data);
426 data->offset += data->size;
427 data->insoffs += data->size;
429 if (zeropad) {
430 data->type = OUT_ZERODATA;
431 data->size = zeropad;
432 lfmt->output(data);
433 ofmt->output(data);
434 data->offset += zeropad;
435 data->insoffs += zeropad;
436 data->size += zeropad; /* Restore original size value */
440 static inline void out_rawdata(struct out_data *data, const void *rawdata,
441 size_t size)
443 data->type = OUT_RAWDATA;
444 data->data = rawdata;
445 data->size = size;
446 out(data);
449 static void out_rawbyte(struct out_data *data, uint8_t byte)
451 data->type = OUT_RAWDATA;
452 data->data = &byte;
453 data->size = 1;
454 out(data);
457 static inline void out_reserve(struct out_data *data, uint64_t size)
459 data->type = OUT_RESERVE;
460 data->size = size;
461 out(data);
464 static void out_segment(struct out_data *data, const struct operand *opx)
466 if (opx->opflags & OPFLAG_RELATIVE)
467 nasm_error(ERR_NONFATAL, "segment references cannot be relative");
469 data->type = OUT_SEGMENT;
470 data->sign = OUT_UNSIGNED;
471 data->size = 2;
472 data->toffset = opx->offset;
473 data->tsegment = ofmt->segbase(opx->segment | 1);
474 data->twrt = opx->wrt;
475 out(data);
478 static void out_imm(struct out_data *data, const struct operand *opx,
479 int size, enum out_sign sign)
481 if (opx->segment != NO_SEG && (opx->segment & 1)) {
483 * This is actually a segment reference, but eval() has
484 * already called ofmt->segbase() for us. Sigh.
486 if (size < 2)
487 nasm_error(ERR_NONFATAL, "segment reference must be 16 bits");
489 data->type = OUT_SEGMENT;
490 } else {
491 data->type = (opx->opflags & OPFLAG_RELATIVE)
492 ? OUT_RELADDR : OUT_ADDRESS;
494 data->sign = sign;
495 data->toffset = opx->offset;
496 data->tsegment = opx->segment;
497 data->twrt = opx->wrt;
499 * XXX: improve this if at some point in the future we can
500 * distinguish the subtrahend in expressions like [foo - bar]
501 * where bar is a symbol in the current segment. However, at the
502 * current point, if OPFLAG_RELATIVE is set that subtraction has
503 * already occurred.
505 data->relbase = 0;
506 data->size = size;
507 out(data);
510 static void out_reladdr(struct out_data *data, const struct operand *opx,
511 int size)
513 if (opx->opflags & OPFLAG_RELATIVE)
514 nasm_error(ERR_NONFATAL, "invalid use of self-relative expression");
516 data->type = OUT_RELADDR;
517 data->sign = OUT_SIGNED;
518 data->size = size;
519 data->toffset = opx->offset;
520 data->tsegment = opx->segment;
521 data->twrt = opx->wrt;
522 data->relbase = data->offset + (data->inslen - data->insoffs);
523 out(data);
526 static bool jmp_match(int32_t segment, int64_t offset, int bits,
527 insn * ins, const struct itemplate *temp)
529 int64_t isize;
530 const uint8_t *code = temp->code;
531 uint8_t c = code[0];
532 bool is_byte;
534 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
535 return false;
536 if (!optimizing)
537 return false;
538 if (optimizing < 0 && c == 0371)
539 return false;
541 isize = calcsize(segment, offset, bits, ins, temp);
543 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
544 /* Be optimistic in pass 1 */
545 return true;
547 if (ins->oprs[0].segment != segment)
548 return false;
550 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
551 is_byte = (isize >= -128 && isize <= 127); /* is it byte size? */
553 if (is_byte && c == 0371 && ins->prefixes[PPS_REP] == P_BND) {
554 /* jmp short (opcode eb) cannot be used with bnd prefix. */
555 ins->prefixes[PPS_REP] = P_none;
556 nasm_error(ERR_WARNING | ERR_WARN_BND | ERR_PASS2 ,
557 "jmp short does not init bnd regs - bnd prefix dropped.");
560 return is_byte;
563 /* This is totally just a wild guess what is reasonable... */
564 #define INCBIN_MAX_BUF (ZERO_BUF_SIZE * 16)
566 int64_t assemble(int32_t segment, int64_t start, int bits, insn *instruction)
568 struct out_data data;
569 const struct itemplate *temp;
570 enum match_result m;
571 int64_t wsize; /* size for DB etc. */
573 nasm_zero(data);
574 data.offset = start;
575 data.segment = segment;
576 data.itemp = NULL;
577 data.bits = bits;
579 wsize = db_bytes(instruction->opcode);
580 if (wsize == -1)
581 return 0;
583 if (wsize) {
584 extop *e;
586 list_for_each(e, instruction->eops) {
587 if (e->type == EOT_DB_NUMBER) {
588 if (wsize > 8) {
589 nasm_error(ERR_NONFATAL,
590 "integer supplied to a DT, DO, DY or DZ"
591 " instruction");
592 } else {
593 data.insoffs = 0;
594 data.inslen = data.size = wsize;
595 data.toffset = e->offset;
596 data.twrt = e->wrt;
597 data.relbase = 0;
598 if (e->segment != NO_SEG && (e->segment & 1)) {
599 data.tsegment = e->segment;
600 data.type = OUT_SEGMENT;
601 data.sign = OUT_UNSIGNED;
602 } else {
603 data.tsegment = e->segment;
604 data.type = e->relative ? OUT_RELADDR : OUT_ADDRESS;
605 data.sign = OUT_WRAP;
607 out(&data);
609 } else if (e->type == EOT_DB_STRING ||
610 e->type == EOT_DB_STRING_FREE) {
611 int align = e->stringlen % wsize;
612 if (align)
613 align = wsize - align;
615 data.insoffs = 0;
616 data.inslen = e->stringlen + align;
618 out_rawdata(&data, e->stringval, e->stringlen);
619 out_rawdata(&data, zero_buffer, align);
622 } else if (instruction->opcode == I_INCBIN) {
623 const char *fname = instruction->eops->stringval;
624 FILE *fp;
625 size_t t = instruction->times; /* INCBIN handles TIMES by itself */
626 off_t base = 0;
627 off_t len;
628 const void *map = NULL;
629 char *buf = NULL;
630 size_t blk = 0; /* Buffered I/O block size */
631 size_t m = 0; /* Bytes last read */
633 fp = nasm_open_read(fname, NF_BINARY|NF_FORMAP);
634 if (!fp) {
635 nasm_error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
636 fname);
637 goto done;
640 len = nasm_file_size(fp);
642 if (len == (off_t)-1) {
643 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
644 fname);
645 goto close_done;
648 if (instruction->eops->next) {
649 base = instruction->eops->next->offset;
650 if (base >= len) {
651 len = 0;
652 } else {
653 len -= base;
654 if (instruction->eops->next->next &&
655 len > (off_t)instruction->eops->next->next->offset)
656 len = (off_t)instruction->eops->next->next->offset;
660 lfmt->set_offset(data.offset);
661 lfmt->uplevel(LIST_INCBIN);
663 if (!len)
664 goto end_incbin;
666 /* Try to map file data */
667 map = nasm_map_file(fp, base, len);
668 if (!map) {
669 blk = len < (off_t)INCBIN_MAX_BUF ? (size_t)len : INCBIN_MAX_BUF;
670 buf = nasm_malloc(blk);
673 while (t--) {
675 * Consider these irrelevant for INCBIN, since it is fully
676 * possible that these might be (way) bigger than an int
677 * can hold; there is, however, no reason to widen these
678 * types just for INCBIN. data.inslen == 0 signals to the
679 * backend that these fields are meaningless, if at all
680 * needed.
682 data.insoffs = 0;
683 data.inslen = 0;
685 if (map) {
686 out_rawdata(&data, map, len);
687 } else if ((off_t)m == len) {
688 out_rawdata(&data, buf, len);
689 } else {
690 off_t l = len;
692 if (fseeko(fp, base, SEEK_SET) < 0 || ferror(fp)) {
693 nasm_error(ERR_NONFATAL,
694 "`incbin': unable to seek on file `%s'",
695 fname);
696 goto end_incbin;
698 while (l > 0) {
699 m = fread(buf, 1, l < (off_t)blk ? (size_t)l : blk, fp);
700 if (!m || feof(fp)) {
702 * This shouldn't happen unless the file
703 * actually changes while we are reading
704 * it.
706 nasm_error(ERR_NONFATAL,
707 "`incbin': unexpected EOF while"
708 " reading file `%s'", fname);
709 goto end_incbin;
711 out_rawdata(&data, buf, m);
712 l -= m;
716 end_incbin:
717 lfmt->downlevel(LIST_INCBIN);
718 if (instruction->times > 1) {
719 lfmt->uplevel(LIST_TIMES);
720 lfmt->downlevel(LIST_TIMES);
722 if (ferror(fp)) {
723 nasm_error(ERR_NONFATAL,
724 "`incbin': error while"
725 " reading file `%s'", fname);
727 close_done:
728 if (buf)
729 nasm_free(buf);
730 if (map)
731 nasm_unmap_file(map, len);
732 fclose(fp);
733 done:
734 instruction->times = 1; /* Tell the upper layer not to iterate */
736 } else {
737 /* "Real" instruction */
739 /* Check to see if we need an address-size prefix */
740 add_asp(instruction, bits);
742 m = find_match(&temp, instruction, data.segment, data.offset, bits);
744 if (m == MOK_GOOD) {
745 /* Matches! */
746 int64_t insn_size = calcsize(data.segment, data.offset,
747 bits, instruction, temp);
748 nasm_assert(insn_size >= 0);
750 data.itemp = temp;
751 data.bits = bits;
752 data.insoffs = 0;
753 data.inslen = insn_size;
755 gencode(&data, instruction);
756 nasm_assert(data.insoffs == insn_size);
757 } else {
758 /* No match */
759 switch (m) {
760 case MERR_OPSIZEMISSING:
761 nasm_error(ERR_NONFATAL, "operation size not specified");
762 break;
763 case MERR_OPSIZEMISMATCH:
764 nasm_error(ERR_NONFATAL, "mismatch in operand sizes");
765 break;
766 case MERR_BRNOTHERE:
767 nasm_error(ERR_NONFATAL,
768 "broadcast not permitted on this operand");
769 break;
770 case MERR_BRNUMMISMATCH:
771 nasm_error(ERR_NONFATAL,
772 "mismatch in the number of broadcasting elements");
773 break;
774 case MERR_MASKNOTHERE:
775 nasm_error(ERR_NONFATAL,
776 "mask not permitted on this operand");
777 break;
778 case MERR_DECONOTHERE:
779 nasm_error(ERR_NONFATAL, "unsupported mode decorator for instruction");
780 break;
781 case MERR_BADCPU:
782 nasm_error(ERR_NONFATAL, "no instruction for this cpu level");
783 break;
784 case MERR_BADMODE:
785 nasm_error(ERR_NONFATAL, "instruction not supported in %d-bit mode",
786 bits);
787 break;
788 case MERR_ENCMISMATCH:
789 nasm_error(ERR_NONFATAL, "specific encoding scheme not available");
790 break;
791 case MERR_BADBND:
792 nasm_error(ERR_NONFATAL, "bnd prefix is not allowed");
793 break;
794 case MERR_BADREPNE:
795 nasm_error(ERR_NONFATAL, "%s prefix is not allowed",
796 (has_prefix(instruction, PPS_REP, P_REPNE) ?
797 "repne" : "repnz"));
798 break;
799 default:
800 nasm_error(ERR_NONFATAL,
801 "invalid combination of opcode and operands");
802 break;
805 instruction->times = 1; /* Avoid repeated error messages */
808 return data.offset - start;
811 int64_t insn_size(int32_t segment, int64_t offset, int bits, insn *instruction)
813 const struct itemplate *temp;
814 enum match_result m;
816 if (instruction->opcode == I_none)
817 return 0;
819 if (opcode_is_db(instruction->opcode)) {
820 extop *e;
821 int32_t isize, osize, wsize;
823 isize = 0;
824 wsize = db_bytes(instruction->opcode);
825 nasm_assert(wsize > 0);
827 list_for_each(e, instruction->eops) {
828 int32_t align;
830 osize = 0;
831 if (e->type == EOT_DB_NUMBER) {
832 osize = 1;
833 warn_overflow_const(e->offset, wsize);
834 } else if (e->type == EOT_DB_STRING ||
835 e->type == EOT_DB_STRING_FREE)
836 osize = e->stringlen;
838 align = (-osize) % wsize;
839 if (align < 0)
840 align += wsize;
841 isize += osize + align;
843 return isize;
846 if (instruction->opcode == I_INCBIN) {
847 const char *fname = instruction->eops->stringval;
848 off_t len;
850 len = nasm_file_size_by_path(fname);
851 if (len == (off_t)-1) {
852 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
853 fname);
854 return 0;
857 if (instruction->eops->next) {
858 if (len <= (off_t)instruction->eops->next->offset) {
859 len = 0;
860 } else {
861 len -= instruction->eops->next->offset;
862 if (instruction->eops->next->next &&
863 len > (off_t)instruction->eops->next->next->offset) {
864 len = (off_t)instruction->eops->next->next->offset;
869 len *= instruction->times;
870 instruction->times = 1; /* Tell the upper layer to not iterate */
872 return len;
875 /* Check to see if we need an address-size prefix */
876 add_asp(instruction, bits);
878 m = find_match(&temp, instruction, segment, offset, bits);
879 if (m == MOK_GOOD) {
880 /* we've matched an instruction. */
881 return calcsize(segment, offset, bits, instruction, temp);
882 } else {
883 return -1; /* didn't match any instruction */
887 static void bad_hle_warn(const insn * ins, uint8_t hleok)
889 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
890 enum whatwarn { w_none, w_lock, w_inval } ww;
891 static const enum whatwarn warn[2][4] =
893 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
894 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
896 unsigned int n;
898 n = (unsigned int)rep_pfx - P_XACQUIRE;
899 if (n > 1)
900 return; /* Not XACQUIRE/XRELEASE */
902 ww = warn[n][hleok];
903 if (!is_class(MEMORY, ins->oprs[0].type))
904 ww = w_inval; /* HLE requires operand 0 to be memory */
906 switch (ww) {
907 case w_none:
908 break;
910 case w_lock:
911 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
912 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
913 "%s with this instruction requires lock",
914 prefix_name(rep_pfx));
916 break;
918 case w_inval:
919 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
920 "%s invalid with this instruction",
921 prefix_name(rep_pfx));
922 break;
926 /* Common construct */
927 #define case3(x) case (x): case (x)+1: case (x)+2
928 #define case4(x) case3(x): case (x)+3
930 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
931 insn * ins, const struct itemplate *temp)
933 const uint8_t *codes = temp->code;
934 int64_t length = 0;
935 uint8_t c;
936 int rex_mask = ~0;
937 int op1, op2;
938 struct operand *opx;
939 uint8_t opex = 0;
940 enum ea_type eat;
941 uint8_t hleok = 0;
942 bool lockcheck = true;
943 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
944 const char *errmsg;
946 ins->rex = 0; /* Ensure REX is reset */
947 eat = EA_SCALAR; /* Expect a scalar EA */
948 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
950 if (ins->prefixes[PPS_OSIZE] == P_O64)
951 ins->rex |= REX_W;
953 (void)segment; /* Don't warn that this parameter is unused */
954 (void)offset; /* Don't warn that this parameter is unused */
956 while (*codes) {
957 c = *codes++;
958 op1 = (c & 3) + ((opex & 1) << 2);
959 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
960 opx = &ins->oprs[op1];
961 opex = 0; /* For the next iteration */
963 switch (c) {
964 case4(01):
965 codes += c, length += c;
966 break;
968 case3(05):
969 opex = c;
970 break;
972 case4(010):
973 ins->rex |=
974 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
975 codes++, length++;
976 break;
978 case4(014):
979 /* this is an index reg of MIB operand */
980 mib_index = opx->basereg;
981 break;
983 case4(020):
984 case4(024):
985 length++;
986 break;
988 case4(030):
989 length += 2;
990 break;
992 case4(034):
993 if (opx->type & (BITS16 | BITS32 | BITS64))
994 length += (opx->type & BITS16) ? 2 : 4;
995 else
996 length += (bits == 16) ? 2 : 4;
997 break;
999 case4(040):
1000 length += 4;
1001 break;
1003 case4(044):
1004 length += ins->addr_size >> 3;
1005 break;
1007 case4(050):
1008 length++;
1009 break;
1011 case4(054):
1012 length += 8; /* MOV reg64/imm */
1013 break;
1015 case4(060):
1016 length += 2;
1017 break;
1019 case4(064):
1020 if (opx->type & (BITS16 | BITS32 | BITS64))
1021 length += (opx->type & BITS16) ? 2 : 4;
1022 else
1023 length += (bits == 16) ? 2 : 4;
1024 break;
1026 case4(070):
1027 length += 4;
1028 break;
1030 case4(074):
1031 length += 2;
1032 break;
1034 case 0172:
1035 case 0173:
1036 codes++;
1037 length++;
1038 break;
1040 case4(0174):
1041 length++;
1042 break;
1044 case4(0240):
1045 ins->rex |= REX_EV;
1046 ins->vexreg = regval(opx);
1047 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
1048 ins->vex_cm = *codes++;
1049 ins->vex_wlp = *codes++;
1050 ins->evex_tuple = (*codes++ - 0300);
1051 break;
1053 case 0250:
1054 ins->rex |= REX_EV;
1055 ins->vexreg = 0;
1056 ins->vex_cm = *codes++;
1057 ins->vex_wlp = *codes++;
1058 ins->evex_tuple = (*codes++ - 0300);
1059 break;
1061 case4(0254):
1062 length += 4;
1063 break;
1065 case4(0260):
1066 ins->rex |= REX_V;
1067 ins->vexreg = regval(opx);
1068 ins->vex_cm = *codes++;
1069 ins->vex_wlp = *codes++;
1070 break;
1072 case 0270:
1073 ins->rex |= REX_V;
1074 ins->vexreg = 0;
1075 ins->vex_cm = *codes++;
1076 ins->vex_wlp = *codes++;
1077 break;
1079 case3(0271):
1080 hleok = c & 3;
1081 break;
1083 case4(0274):
1084 length++;
1085 break;
1087 case4(0300):
1088 break;
1090 case 0310:
1091 if (bits == 64)
1092 return -1;
1093 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
1094 break;
1096 case 0311:
1097 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
1098 break;
1100 case 0312:
1101 break;
1103 case 0313:
1104 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1105 has_prefix(ins, PPS_ASIZE, P_A32))
1106 return -1;
1107 break;
1109 case4(0314):
1110 break;
1112 case 0320:
1114 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1115 if (pfx == P_O16)
1116 break;
1117 if (pfx != P_none)
1118 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1119 else
1120 ins->prefixes[PPS_OSIZE] = P_O16;
1121 break;
1124 case 0321:
1126 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1127 if (pfx == P_O32)
1128 break;
1129 if (pfx != P_none)
1130 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1131 else
1132 ins->prefixes[PPS_OSIZE] = P_O32;
1133 break;
1136 case 0322:
1137 break;
1139 case 0323:
1140 rex_mask &= ~REX_W;
1141 break;
1143 case 0324:
1144 ins->rex |= REX_W;
1145 break;
1147 case 0325:
1148 ins->rex |= REX_NH;
1149 break;
1151 case 0326:
1152 break;
1154 case 0330:
1155 codes++, length++;
1156 break;
1158 case 0331:
1159 break;
1161 case 0332:
1162 case 0333:
1163 length++;
1164 break;
1166 case 0334:
1167 ins->rex |= REX_L;
1168 break;
1170 case 0335:
1171 break;
1173 case 0336:
1174 if (!ins->prefixes[PPS_REP])
1175 ins->prefixes[PPS_REP] = P_REP;
1176 break;
1178 case 0337:
1179 if (!ins->prefixes[PPS_REP])
1180 ins->prefixes[PPS_REP] = P_REPNE;
1181 break;
1183 case 0340:
1184 if (!absolute_op(&ins->oprs[0]))
1185 nasm_error(ERR_NONFATAL, "attempt to reserve non-constant"
1186 " quantity of BSS space");
1187 else if (ins->oprs[0].opflags & OPFLAG_FORWARD)
1188 nasm_error(ERR_WARNING | ERR_PASS1,
1189 "forward reference in RESx can have unpredictable results");
1190 else
1191 length += ins->oprs[0].offset;
1192 break;
1194 case 0341:
1195 if (!ins->prefixes[PPS_WAIT])
1196 ins->prefixes[PPS_WAIT] = P_WAIT;
1197 break;
1199 case 0360:
1200 break;
1202 case 0361:
1203 length++;
1204 break;
1206 case 0364:
1207 case 0365:
1208 break;
1210 case 0366:
1211 case 0367:
1212 length++;
1213 break;
1215 case 0370:
1216 case 0371:
1217 break;
1219 case 0373:
1220 length++;
1221 break;
1223 case 0374:
1224 eat = EA_XMMVSIB;
1225 break;
1227 case 0375:
1228 eat = EA_YMMVSIB;
1229 break;
1231 case 0376:
1232 eat = EA_ZMMVSIB;
1233 break;
1235 case4(0100):
1236 case4(0110):
1237 case4(0120):
1238 case4(0130):
1239 case4(0200):
1240 case4(0204):
1241 case4(0210):
1242 case4(0214):
1243 case4(0220):
1244 case4(0224):
1245 case4(0230):
1246 case4(0234):
1248 ea ea_data;
1249 int rfield;
1250 opflags_t rflags;
1251 struct operand *opy = &ins->oprs[op2];
1252 struct operand *op_er_sae;
1254 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
1256 if (c <= 0177) {
1257 /* pick rfield from operand b (opx) */
1258 rflags = regflag(opx);
1259 rfield = nasm_regvals[opx->basereg];
1260 } else {
1261 rflags = 0;
1262 rfield = c & 7;
1265 /* EVEX.b1 : evex_brerop contains the operand position */
1266 op_er_sae = (ins->evex_brerop >= 0 ?
1267 &ins->oprs[ins->evex_brerop] : NULL);
1269 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1270 /* set EVEX.b */
1271 ins->evex_p[2] |= EVEX_P2B;
1272 if (op_er_sae->decoflags & ER) {
1273 /* set EVEX.RC (rounding control) */
1274 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1275 & EVEX_P2RC;
1277 } else {
1278 /* set EVEX.L'L (vector length) */
1279 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
1280 ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W);
1281 if (opy->decoflags & BRDCAST_MASK) {
1282 /* set EVEX.b */
1283 ins->evex_p[2] |= EVEX_P2B;
1287 if (itemp_has(temp, IF_MIB)) {
1288 opy->eaflags |= EAF_MIB;
1290 * if a separate form of MIB (ICC style) is used,
1291 * the index reg info is merged into mem operand
1293 if (mib_index != R_none) {
1294 opy->indexreg = mib_index;
1295 opy->scale = 1;
1296 opy->hintbase = mib_index;
1297 opy->hinttype = EAH_NOTBASE;
1301 if (process_ea(opy, &ea_data, bits,
1302 rfield, rflags, ins, &errmsg) != eat) {
1303 nasm_error(ERR_NONFATAL, "%s", errmsg);
1304 return -1;
1305 } else {
1306 ins->rex |= ea_data.rex;
1307 length += ea_data.size;
1310 break;
1312 default:
1313 nasm_panic(0, "internal instruction table corrupt"
1314 ": instruction code \\%o (0x%02X) given", c, c);
1315 break;
1319 ins->rex &= rex_mask;
1321 if (ins->rex & REX_NH) {
1322 if (ins->rex & REX_H) {
1323 nasm_error(ERR_NONFATAL, "instruction cannot use high registers");
1324 return -1;
1326 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
1329 switch (ins->prefixes[PPS_VEX]) {
1330 case P_EVEX:
1331 if (!(ins->rex & REX_EV))
1332 return -1;
1333 break;
1334 case P_VEX3:
1335 case P_VEX2:
1336 if (!(ins->rex & REX_V))
1337 return -1;
1338 break;
1339 default:
1340 break;
1343 if (ins->rex & (REX_V | REX_EV)) {
1344 int bad32 = REX_R|REX_W|REX_X|REX_B;
1346 if (ins->rex & REX_H) {
1347 nasm_error(ERR_NONFATAL, "cannot use high register in AVX instruction");
1348 return -1;
1350 switch (ins->vex_wlp & 060) {
1351 case 000:
1352 case 040:
1353 ins->rex &= ~REX_W;
1354 break;
1355 case 020:
1356 ins->rex |= REX_W;
1357 bad32 &= ~REX_W;
1358 break;
1359 case 060:
1360 /* Follow REX_W */
1361 break;
1364 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
1365 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1366 return -1;
1367 } else if (!(ins->rex & REX_EV) &&
1368 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
1369 nasm_error(ERR_NONFATAL, "invalid high-16 register in non-AVX-512");
1370 return -1;
1372 if (ins->rex & REX_EV)
1373 length += 4;
1374 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1375 ins->prefixes[PPS_VEX] == P_VEX3)
1376 length += 3;
1377 else
1378 length += 2;
1379 } else if (ins->rex & REX_MASK) {
1380 if (ins->rex & REX_H) {
1381 nasm_error(ERR_NONFATAL, "cannot use high register in rex instruction");
1382 return -1;
1383 } else if (bits == 64) {
1384 length++;
1385 } else if ((ins->rex & REX_L) &&
1386 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
1387 iflag_ffs(&cpu) >= IF_X86_64) {
1388 /* LOCK-as-REX.R */
1389 assert_no_prefix(ins, PPS_LOCK);
1390 lockcheck = false; /* Already errored, no need for warning */
1391 length++;
1392 } else {
1393 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1394 return -1;
1398 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
1399 (!itemp_has(temp,IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
1400 nasm_error(ERR_WARNING | ERR_WARN_LOCK | ERR_PASS2 ,
1401 "instruction is not lockable");
1404 bad_hle_warn(ins, hleok);
1407 * when BND prefix is set by DEFAULT directive,
1408 * BND prefix is added to every appropriate instruction line
1409 * unless it is overridden by NOBND prefix.
1411 if (globalbnd &&
1412 (itemp_has(temp, IF_BND) && !has_prefix(ins, PPS_REP, P_NOBND)))
1413 ins->prefixes[PPS_REP] = P_BND;
1416 * Add length of legacy prefixes
1418 length += emit_prefix(NULL, bits, ins);
1420 return length;
1423 static inline void emit_rex(struct out_data *data, insn *ins)
1425 if (data->bits == 64) {
1426 if ((ins->rex & REX_MASK) &&
1427 !(ins->rex & (REX_V | REX_EV)) &&
1428 !ins->rex_done) {
1429 uint8_t rex = (ins->rex & REX_MASK) | REX_P;
1430 out_rawbyte(data, rex);
1431 ins->rex_done = true;
1436 static int emit_prefix(struct out_data *data, const int bits, insn *ins)
1438 int bytes = 0;
1439 int j;
1441 for (j = 0; j < MAXPREFIX; j++) {
1442 uint8_t c = 0;
1443 switch (ins->prefixes[j]) {
1444 case P_WAIT:
1445 c = 0x9B;
1446 break;
1447 case P_LOCK:
1448 c = 0xF0;
1449 break;
1450 case P_REPNE:
1451 case P_REPNZ:
1452 case P_XACQUIRE:
1453 case P_BND:
1454 c = 0xF2;
1455 break;
1456 case P_REPE:
1457 case P_REPZ:
1458 case P_REP:
1459 case P_XRELEASE:
1460 c = 0xF3;
1461 break;
1462 case R_CS:
1463 if (bits == 64) {
1464 nasm_error(ERR_WARNING | ERR_PASS2,
1465 "cs segment base generated, but will be ignored in 64-bit mode");
1467 c = 0x2E;
1468 break;
1469 case R_DS:
1470 if (bits == 64) {
1471 nasm_error(ERR_WARNING | ERR_PASS2,
1472 "ds segment base generated, but will be ignored in 64-bit mode");
1474 c = 0x3E;
1475 break;
1476 case R_ES:
1477 if (bits == 64) {
1478 nasm_error(ERR_WARNING | ERR_PASS2,
1479 "es segment base generated, but will be ignored in 64-bit mode");
1481 c = 0x26;
1482 break;
1483 case R_FS:
1484 c = 0x64;
1485 break;
1486 case R_GS:
1487 c = 0x65;
1488 break;
1489 case R_SS:
1490 if (bits == 64) {
1491 nasm_error(ERR_WARNING | ERR_PASS2,
1492 "ss segment base generated, but will be ignored in 64-bit mode");
1494 c = 0x36;
1495 break;
1496 case R_SEGR6:
1497 case R_SEGR7:
1498 nasm_error(ERR_NONFATAL,
1499 "segr6 and segr7 cannot be used as prefixes");
1500 break;
1501 case P_A16:
1502 if (bits == 64) {
1503 nasm_error(ERR_NONFATAL,
1504 "16-bit addressing is not supported "
1505 "in 64-bit mode");
1506 } else if (bits != 16)
1507 c = 0x67;
1508 break;
1509 case P_A32:
1510 if (bits != 32)
1511 c = 0x67;
1512 break;
1513 case P_A64:
1514 if (bits != 64) {
1515 nasm_error(ERR_NONFATAL,
1516 "64-bit addressing is only supported "
1517 "in 64-bit mode");
1519 break;
1520 case P_ASP:
1521 c = 0x67;
1522 break;
1523 case P_O16:
1524 if (bits != 16)
1525 c = 0x66;
1526 break;
1527 case P_O32:
1528 if (bits == 16)
1529 c = 0x66;
1530 break;
1531 case P_O64:
1532 /* REX.W */
1533 break;
1534 case P_OSP:
1535 c = 0x66;
1536 break;
1537 case P_EVEX:
1538 case P_VEX3:
1539 case P_VEX2:
1540 case P_NOBND:
1541 case P_none:
1542 break;
1543 default:
1544 nasm_panic(0, "invalid instruction prefix");
1546 if (c) {
1547 if (data)
1548 out_rawbyte(data, c);
1549 bytes++;
1552 return bytes;
1555 static void gencode(struct out_data *data, insn *ins)
1557 uint8_t c;
1558 uint8_t bytes[4];
1559 int64_t size;
1560 int op1, op2;
1561 struct operand *opx;
1562 const uint8_t *codes = data->itemp->code;
1563 uint8_t opex = 0;
1564 enum ea_type eat = EA_SCALAR;
1565 int r;
1566 const int bits = data->bits;
1567 const char *errmsg;
1569 ins->rex_done = false;
1571 emit_prefix(data, bits, ins);
1573 while (*codes) {
1574 c = *codes++;
1575 op1 = (c & 3) + ((opex & 1) << 2);
1576 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1577 opx = &ins->oprs[op1];
1578 opex = 0; /* For the next iteration */
1581 switch (c) {
1582 case 01:
1583 case 02:
1584 case 03:
1585 case 04:
1586 emit_rex(data, ins);
1587 out_rawdata(data, codes, c);
1588 codes += c;
1589 break;
1591 case 05:
1592 case 06:
1593 case 07:
1594 opex = c;
1595 break;
1597 case4(010):
1598 emit_rex(data, ins);
1599 out_rawbyte(data, *codes++ + (regval(opx) & 7));
1600 break;
1602 case4(014):
1603 break;
1605 case4(020):
1606 if (opx->offset < -256 || opx->offset > 255)
1607 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1608 "byte value exceeds bounds");
1609 out_imm(data, opx, 1, OUT_WRAP);
1610 break;
1612 case4(024):
1613 if (opx->offset < 0 || opx->offset > 255)
1614 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1615 "unsigned byte value exceeds bounds");
1616 out_imm(data, opx, 1, OUT_UNSIGNED);
1617 break;
1619 case4(030):
1620 warn_overflow_opd(opx, 2);
1621 out_imm(data, opx, 2, OUT_WRAP);
1622 break;
1624 case4(034):
1625 if (opx->type & (BITS16 | BITS32))
1626 size = (opx->type & BITS16) ? 2 : 4;
1627 else
1628 size = (bits == 16) ? 2 : 4;
1629 warn_overflow_opd(opx, size);
1630 out_imm(data, opx, size, OUT_WRAP);
1631 break;
1633 case4(040):
1634 warn_overflow_opd(opx, 4);
1635 out_imm(data, opx, 4, OUT_WRAP);
1636 break;
1638 case4(044):
1639 size = ins->addr_size >> 3;
1640 warn_overflow_opd(opx, size);
1641 out_imm(data, opx, size, OUT_WRAP);
1642 break;
1644 case4(050):
1645 if (opx->segment == data->segment) {
1646 int64_t delta = opx->offset - data->offset
1647 - (data->inslen - data->insoffs);
1648 if (delta > 127 || delta < -128)
1649 nasm_error(ERR_NONFATAL, "short jump is out of range");
1651 out_reladdr(data, opx, 1);
1652 break;
1654 case4(054):
1655 out_imm(data, opx, 8, OUT_WRAP);
1656 break;
1658 case4(060):
1659 out_reladdr(data, opx, 2);
1660 break;
1662 case4(064):
1663 if (opx->type & (BITS16 | BITS32 | BITS64))
1664 size = (opx->type & BITS16) ? 2 : 4;
1665 else
1666 size = (bits == 16) ? 2 : 4;
1668 out_reladdr(data, opx, size);
1669 break;
1671 case4(070):
1672 out_reladdr(data, opx, 4);
1673 break;
1675 case4(074):
1676 if (opx->segment == NO_SEG)
1677 nasm_error(ERR_NONFATAL, "value referenced by FAR is not"
1678 " relocatable");
1679 out_segment(data, opx);
1680 break;
1682 case 0172:
1684 int mask = ins->prefixes[PPS_VEX] == P_EVEX ? 7 : 15;
1685 const struct operand *opy;
1687 c = *codes++;
1688 opx = &ins->oprs[c >> 3];
1689 opy = &ins->oprs[c & 7];
1690 if (!absolute_op(opy)) {
1691 nasm_error(ERR_NONFATAL,
1692 "non-absolute expression not permitted as argument %d",
1693 c & 7);
1694 } else if (opy->offset & ~mask) {
1695 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1696 "is4 argument exceeds bounds");
1698 c = opy->offset & mask;
1699 goto emit_is4;
1702 case 0173:
1703 c = *codes++;
1704 opx = &ins->oprs[c >> 4];
1705 c &= 15;
1706 goto emit_is4;
1708 case4(0174):
1709 c = 0;
1710 emit_is4:
1711 r = nasm_regvals[opx->basereg];
1712 out_rawbyte(data, (r << 4) | ((r & 0x10) >> 1) | c);
1713 break;
1715 case4(0254):
1716 if (absolute_op(opx) &&
1717 (int32_t)opx->offset != (int64_t)opx->offset) {
1718 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1719 "signed dword immediate exceeds bounds");
1721 out_imm(data, opx, 4, OUT_SIGNED);
1722 break;
1724 case4(0240):
1725 case 0250:
1726 codes += 3;
1727 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1728 EVEX_P2Z | EVEX_P2AAA, 2);
1729 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
1730 bytes[0] = 0x62;
1731 /* EVEX.X can be set by either REX or EVEX for different reasons */
1732 bytes[1] = ((((ins->rex & 7) << 5) |
1733 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
1734 (ins->vex_cm & EVEX_P0MM);
1735 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
1736 ((~ins->vexreg & 15) << 3) |
1737 (1 << 2) | (ins->vex_wlp & 3);
1738 bytes[3] = ins->evex_p[2];
1739 out_rawdata(data, bytes, 4);
1740 break;
1742 case4(0260):
1743 case 0270:
1744 codes += 2;
1745 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1746 ins->prefixes[PPS_VEX] == P_VEX3) {
1747 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
1748 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
1749 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
1750 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
1751 out_rawdata(data, bytes, 3);
1752 } else {
1753 bytes[0] = 0xc5;
1754 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
1755 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
1756 out_rawdata(data, bytes, 2);
1758 break;
1760 case 0271:
1761 case 0272:
1762 case 0273:
1763 break;
1765 case4(0274):
1767 uint64_t uv, um;
1768 int s;
1770 if (absolute_op(opx)) {
1771 if (ins->rex & REX_W)
1772 s = 64;
1773 else if (ins->prefixes[PPS_OSIZE] == P_O16)
1774 s = 16;
1775 else if (ins->prefixes[PPS_OSIZE] == P_O32)
1776 s = 32;
1777 else
1778 s = bits;
1780 um = (uint64_t)2 << (s-1);
1781 uv = opx->offset;
1783 if (uv > 127 && uv < (uint64_t)-128 &&
1784 (uv < um-128 || uv > um-1)) {
1785 /* If this wasn't explicitly byte-sized, warn as though we
1786 * had fallen through to the imm16/32/64 case.
1788 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1789 "%s value exceeds bounds",
1790 (opx->type & BITS8) ? "signed byte" :
1791 s == 16 ? "word" :
1792 s == 32 ? "dword" :
1793 "signed dword");
1796 /* Output as a raw byte to avoid byte overflow check */
1797 out_rawbyte(data, (uint8_t)uv);
1798 } else {
1799 out_imm(data, opx, 1, OUT_WRAP); /* XXX: OUT_SIGNED? */
1801 break;
1804 case4(0300):
1805 break;
1807 case 0310:
1808 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16))
1809 out_rawbyte(data, 0x67);
1810 break;
1812 case 0311:
1813 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32))
1814 out_rawbyte(data, 0x67);
1815 break;
1817 case 0312:
1818 break;
1820 case 0313:
1821 ins->rex = 0;
1822 break;
1824 case4(0314):
1825 break;
1827 case 0320:
1828 case 0321:
1829 break;
1831 case 0322:
1832 case 0323:
1833 break;
1835 case 0324:
1836 ins->rex |= REX_W;
1837 break;
1839 case 0325:
1840 break;
1842 case 0326:
1843 break;
1845 case 0330:
1846 out_rawbyte(data, *codes++ ^ get_cond_opcode(ins->condition));
1847 break;
1849 case 0331:
1850 break;
1852 case 0332:
1853 case 0333:
1854 out_rawbyte(data, c - 0332 + 0xF2);
1855 break;
1857 case 0334:
1858 if (ins->rex & REX_R)
1859 out_rawbyte(data, 0xF0);
1860 ins->rex &= ~(REX_L|REX_R);
1861 break;
1863 case 0335:
1864 break;
1866 case 0336:
1867 case 0337:
1868 break;
1870 case 0340:
1871 if (ins->oprs[0].segment != NO_SEG)
1872 nasm_panic(0, "non-constant BSS size in pass two");
1874 out_reserve(data, ins->oprs[0].offset);
1875 break;
1877 case 0341:
1878 break;
1880 case 0360:
1881 break;
1883 case 0361:
1884 out_rawbyte(data, 0x66);
1885 break;
1887 case 0364:
1888 case 0365:
1889 break;
1891 case 0366:
1892 case 0367:
1893 out_rawbyte(data, c - 0366 + 0x66);
1894 break;
1896 case3(0370):
1897 break;
1899 case 0373:
1900 out_rawbyte(data, bits == 16 ? 3 : 5);
1901 break;
1903 case 0374:
1904 eat = EA_XMMVSIB;
1905 break;
1907 case 0375:
1908 eat = EA_YMMVSIB;
1909 break;
1911 case 0376:
1912 eat = EA_ZMMVSIB;
1913 break;
1915 case4(0100):
1916 case4(0110):
1917 case4(0120):
1918 case4(0130):
1919 case4(0200):
1920 case4(0204):
1921 case4(0210):
1922 case4(0214):
1923 case4(0220):
1924 case4(0224):
1925 case4(0230):
1926 case4(0234):
1928 ea ea_data;
1929 int rfield;
1930 opflags_t rflags;
1931 uint8_t *p;
1932 struct operand *opy = &ins->oprs[op2];
1934 if (c <= 0177) {
1935 /* pick rfield from operand b (opx) */
1936 rflags = regflag(opx);
1937 rfield = nasm_regvals[opx->basereg];
1938 } else {
1939 /* rfield is constant */
1940 rflags = 0;
1941 rfield = c & 7;
1944 if (process_ea(opy, &ea_data, bits,
1945 rfield, rflags, ins, &errmsg) != eat)
1946 nasm_error(ERR_NONFATAL, "%s", errmsg);
1948 p = bytes;
1949 *p++ = ea_data.modrm;
1950 if (ea_data.sib_present)
1951 *p++ = ea_data.sib;
1952 out_rawdata(data, bytes, p - bytes);
1955 * Make sure the address gets the right offset in case
1956 * the line breaks in the .lst file (BR 1197827)
1959 if (ea_data.bytes) {
1960 /* use compressed displacement, if available */
1961 if (ea_data.disp8) {
1962 out_rawbyte(data, ea_data.disp8);
1963 } else if (ea_data.rip) {
1964 out_reladdr(data, opy, ea_data.bytes);
1965 } else {
1966 int asize = ins->addr_size >> 3;
1968 if (overflow_general(opy->offset, asize) ||
1969 signed_bits(opy->offset, ins->addr_size) !=
1970 signed_bits(opy->offset, ea_data.bytes << 3))
1971 warn_overflow(ea_data.bytes);
1973 out_imm(data, opy, ea_data.bytes,
1974 (asize > ea_data.bytes)
1975 ? OUT_SIGNED : OUT_WRAP);
1979 break;
1981 default:
1982 nasm_panic(0, "internal instruction table corrupt"
1983 ": instruction code \\%o (0x%02X) given", c, c);
1984 break;
1989 static opflags_t regflag(const operand * o)
1991 if (!is_register(o->basereg))
1992 nasm_panic(0, "invalid operand passed to regflag()");
1993 return nasm_reg_flags[o->basereg];
1996 static int32_t regval(const operand * o)
1998 if (!is_register(o->basereg))
1999 nasm_panic(0, "invalid operand passed to regval()");
2000 return nasm_regvals[o->basereg];
2003 static int op_rexflags(const operand * o, int mask)
2005 opflags_t flags;
2006 int val;
2008 if (!is_register(o->basereg))
2009 nasm_panic(0, "invalid operand passed to op_rexflags()");
2011 flags = nasm_reg_flags[o->basereg];
2012 val = nasm_regvals[o->basereg];
2014 return rexflags(val, flags, mask);
2017 static int rexflags(int val, opflags_t flags, int mask)
2019 int rex = 0;
2021 if (val >= 0 && (val & 8))
2022 rex |= REX_B|REX_X|REX_R;
2023 if (flags & BITS64)
2024 rex |= REX_W;
2025 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
2026 rex |= REX_H;
2027 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
2028 rex |= REX_P;
2030 return rex & mask;
2033 static int evexflags(int val, decoflags_t deco,
2034 int mask, uint8_t byte)
2036 int evex = 0;
2038 switch (byte) {
2039 case 0:
2040 if (val >= 0 && (val & 16))
2041 evex |= (EVEX_P0RP | EVEX_P0X);
2042 break;
2043 case 2:
2044 if (val >= 0 && (val & 16))
2045 evex |= EVEX_P2VP;
2046 if (deco & Z)
2047 evex |= EVEX_P2Z;
2048 if (deco & OPMASK_MASK)
2049 evex |= deco & EVEX_P2AAA;
2050 break;
2052 return evex & mask;
2055 static int op_evexflags(const operand * o, int mask, uint8_t byte)
2057 int val;
2059 val = nasm_regvals[o->basereg];
2061 return evexflags(val, o->decoflags, mask, byte);
2064 static enum match_result find_match(const struct itemplate **tempp,
2065 insn *instruction,
2066 int32_t segment, int64_t offset, int bits)
2068 const struct itemplate *temp;
2069 enum match_result m, merr;
2070 opflags_t xsizeflags[MAX_OPERANDS];
2071 bool opsizemissing = false;
2072 int8_t broadcast = instruction->evex_brerop;
2073 int i;
2075 /* broadcasting uses a different data element size */
2076 for (i = 0; i < instruction->operands; i++)
2077 if (i == broadcast)
2078 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
2079 else
2080 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
2082 merr = MERR_INVALOP;
2084 for (temp = nasm_instructions[instruction->opcode];
2085 temp->opcode != I_none; temp++) {
2086 m = matches(temp, instruction, bits);
2087 if (m == MOK_JUMP) {
2088 if (jmp_match(segment, offset, bits, instruction, temp))
2089 m = MOK_GOOD;
2090 else
2091 m = MERR_INVALOP;
2092 } else if (m == MERR_OPSIZEMISSING && !itemp_has(temp, IF_SX)) {
2094 * Missing operand size and a candidate for fuzzy matching...
2096 for (i = 0; i < temp->operands; i++)
2097 if (i == broadcast)
2098 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
2099 else
2100 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
2101 opsizemissing = true;
2103 if (m > merr)
2104 merr = m;
2105 if (merr == MOK_GOOD)
2106 goto done;
2109 /* No match, but see if we can get a fuzzy operand size match... */
2110 if (!opsizemissing)
2111 goto done;
2113 for (i = 0; i < instruction->operands; i++) {
2115 * We ignore extrinsic operand sizes on registers, so we should
2116 * never try to fuzzy-match on them. This also resolves the case
2117 * when we have e.g. "xmmrm128" in two different positions.
2119 if (is_class(REGISTER, instruction->oprs[i].type))
2120 continue;
2122 /* This tests if xsizeflags[i] has more than one bit set */
2123 if ((xsizeflags[i] & (xsizeflags[i]-1)))
2124 goto done; /* No luck */
2126 if (i == broadcast) {
2127 instruction->oprs[i].decoflags |= xsizeflags[i];
2128 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
2129 BITS32 : BITS64);
2130 } else {
2131 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
2135 /* Try matching again... */
2136 for (temp = nasm_instructions[instruction->opcode];
2137 temp->opcode != I_none; temp++) {
2138 m = matches(temp, instruction, bits);
2139 if (m == MOK_JUMP) {
2140 if (jmp_match(segment, offset, bits, instruction, temp))
2141 m = MOK_GOOD;
2142 else
2143 m = MERR_INVALOP;
2145 if (m > merr)
2146 merr = m;
2147 if (merr == MOK_GOOD)
2148 goto done;
2151 done:
2152 *tempp = temp;
2153 return merr;
2156 static uint8_t get_broadcast_num(opflags_t opflags, opflags_t brsize)
2158 unsigned int opsize = (opflags & SIZE_MASK) >> SIZE_SHIFT;
2159 uint8_t brcast_num;
2161 if (brsize > BITS64)
2162 nasm_error(ERR_FATAL,
2163 "size of broadcasting element is greater than 64 bits");
2166 * The shift term is to take care of the extra BITS80 inserted
2167 * between BITS64 and BITS128.
2169 brcast_num = ((opsize / (BITS64 >> SIZE_SHIFT)) * (BITS64 / brsize))
2170 >> (opsize > (BITS64 >> SIZE_SHIFT));
2172 return brcast_num;
2175 static enum match_result matches(const struct itemplate *itemp,
2176 insn *instruction, int bits)
2178 opflags_t size[MAX_OPERANDS], asize;
2179 bool opsizemissing = false;
2180 int i, oprs;
2183 * Check the opcode
2185 if (itemp->opcode != instruction->opcode)
2186 return MERR_INVALOP;
2189 * Count the operands
2191 if (itemp->operands != instruction->operands)
2192 return MERR_INVALOP;
2195 * Is it legal?
2197 if (!(optimizing > 0) && itemp_has(itemp, IF_OPT))
2198 return MERR_INVALOP;
2201 * {evex} available?
2203 switch (instruction->prefixes[PPS_VEX]) {
2204 case P_EVEX:
2205 if (!itemp_has(itemp, IF_EVEX))
2206 return MERR_ENCMISMATCH;
2207 break;
2208 case P_VEX3:
2209 case P_VEX2:
2210 if (!itemp_has(itemp, IF_VEX))
2211 return MERR_ENCMISMATCH;
2212 break;
2213 default:
2214 break;
2218 * Check that no spurious colons or TOs are present
2220 for (i = 0; i < itemp->operands; i++)
2221 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
2222 return MERR_INVALOP;
2225 * Process size flags
2227 switch (itemp_smask(itemp)) {
2228 case IF_GENBIT(IF_SB):
2229 asize = BITS8;
2230 break;
2231 case IF_GENBIT(IF_SW):
2232 asize = BITS16;
2233 break;
2234 case IF_GENBIT(IF_SD):
2235 asize = BITS32;
2236 break;
2237 case IF_GENBIT(IF_SQ):
2238 asize = BITS64;
2239 break;
2240 case IF_GENBIT(IF_SO):
2241 asize = BITS128;
2242 break;
2243 case IF_GENBIT(IF_SY):
2244 asize = BITS256;
2245 break;
2246 case IF_GENBIT(IF_SZ):
2247 asize = BITS512;
2248 break;
2249 case IF_GENBIT(IF_SIZE):
2250 switch (bits) {
2251 case 16:
2252 asize = BITS16;
2253 break;
2254 case 32:
2255 asize = BITS32;
2256 break;
2257 case 64:
2258 asize = BITS64;
2259 break;
2260 default:
2261 asize = 0;
2262 break;
2264 break;
2265 default:
2266 asize = 0;
2267 break;
2270 if (itemp_armask(itemp)) {
2271 /* S- flags only apply to a specific operand */
2272 i = itemp_arg(itemp);
2273 memset(size, 0, sizeof size);
2274 size[i] = asize;
2275 } else {
2276 /* S- flags apply to all operands */
2277 for (i = 0; i < MAX_OPERANDS; i++)
2278 size[i] = asize;
2282 * Check that the operand flags all match up,
2283 * it's a bit tricky so lets be verbose:
2285 * 1) Find out the size of operand. If instruction
2286 * doesn't have one specified -- we're trying to
2287 * guess it either from template (IF_S* flag) or
2288 * from code bits.
2290 * 2) If template operand do not match the instruction OR
2291 * template has an operand size specified AND this size differ
2292 * from which instruction has (perhaps we got it from code bits)
2293 * we are:
2294 * a) Check that only size of instruction and operand is differ
2295 * other characteristics do match
2296 * b) Perhaps it's a register specified in instruction so
2297 * for such a case we just mark that operand as "size
2298 * missing" and this will turn on fuzzy operand size
2299 * logic facility (handled by a caller)
2301 for (i = 0; i < itemp->operands; i++) {
2302 opflags_t type = instruction->oprs[i].type;
2303 decoflags_t deco = instruction->oprs[i].decoflags;
2304 decoflags_t ideco = itemp->deco[i];
2305 bool is_broadcast = deco & BRDCAST_MASK;
2306 uint8_t brcast_num = 0;
2307 opflags_t template_opsize, insn_opsize;
2309 if (!(type & SIZE_MASK))
2310 type |= size[i];
2312 insn_opsize = type & SIZE_MASK;
2313 if (!is_broadcast) {
2314 template_opsize = itemp->opd[i] & SIZE_MASK;
2315 } else {
2316 decoflags_t deco_brsize = ideco & BRSIZE_MASK;
2318 if (~ideco & BRDCAST_MASK)
2319 return MERR_BRNOTHERE;
2322 * when broadcasting, the element size depends on
2323 * the instruction type. decorator flag should match.
2325 if (deco_brsize) {
2326 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
2327 /* calculate the proper number : {1to<brcast_num>} */
2328 brcast_num = get_broadcast_num(itemp->opd[i], template_opsize);
2329 } else {
2330 template_opsize = 0;
2334 if (~ideco & deco & OPMASK_MASK)
2335 return MERR_MASKNOTHERE;
2337 if (~ideco & deco & (Z_MASK|STATICRND_MASK|SAE_MASK))
2338 return MERR_DECONOTHERE;
2340 if (itemp->opd[i] & ~type & ~SIZE_MASK) {
2341 return MERR_INVALOP;
2342 } else if (template_opsize) {
2343 if (template_opsize != insn_opsize) {
2344 if (insn_opsize) {
2345 return MERR_INVALOP;
2346 } else if (!is_class(REGISTER, type)) {
2348 * Note: we don't honor extrinsic operand sizes for registers,
2349 * so "missing operand size" for a register should be
2350 * considered a wildcard match rather than an error.
2352 opsizemissing = true;
2354 } else if (is_broadcast &&
2355 (brcast_num !=
2356 (2U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
2358 * broadcasting opsize matches but the number of repeated memory
2359 * element does not match.
2360 * if 64b double precision float is broadcasted to ymm (256b),
2361 * broadcasting decorator must be {1to4}.
2363 return MERR_BRNUMMISMATCH;
2368 if (opsizemissing)
2369 return MERR_OPSIZEMISSING;
2372 * Check operand sizes
2374 if (itemp_has(itemp, IF_SM) || itemp_has(itemp, IF_SM2)) {
2375 oprs = (itemp_has(itemp, IF_SM2) ? 2 : itemp->operands);
2376 for (i = 0; i < oprs; i++) {
2377 asize = itemp->opd[i] & SIZE_MASK;
2378 if (asize) {
2379 for (i = 0; i < oprs; i++)
2380 size[i] = asize;
2381 break;
2384 } else {
2385 oprs = itemp->operands;
2388 for (i = 0; i < itemp->operands; i++) {
2389 if (!(itemp->opd[i] & SIZE_MASK) &&
2390 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
2391 return MERR_OPSIZEMISMATCH;
2395 * Check template is okay at the set cpu level
2397 if (iflag_cmp_cpu_level(&insns_flags[itemp->iflag_idx], &cpu) > 0)
2398 return MERR_BADCPU;
2401 * Verify the appropriate long mode flag.
2403 if (itemp_has(itemp, (bits == 64 ? IF_NOLONG : IF_LONG)))
2404 return MERR_BADMODE;
2407 * If we have a HLE prefix, look for the NOHLE flag
2409 if (itemp_has(itemp, IF_NOHLE) &&
2410 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2411 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2412 return MERR_BADHLE;
2415 * Check if special handling needed for Jumps
2417 if ((itemp->code[0] & ~1) == 0370)
2418 return MOK_JUMP;
2421 * Check if BND prefix is allowed.
2422 * Other 0xF2 (REPNE/REPNZ) prefix is prohibited.
2424 if (!itemp_has(itemp, IF_BND) &&
2425 (has_prefix(instruction, PPS_REP, P_BND) ||
2426 has_prefix(instruction, PPS_REP, P_NOBND)))
2427 return MERR_BADBND;
2428 else if (itemp_has(itemp, IF_BND) &&
2429 (has_prefix(instruction, PPS_REP, P_REPNE) ||
2430 has_prefix(instruction, PPS_REP, P_REPNZ)))
2431 return MERR_BADREPNE;
2433 return MOK_GOOD;
2437 * Check if ModR/M.mod should/can be 01.
2438 * - EAF_BYTEOFFS is set
2439 * - offset can fit in a byte when EVEX is not used
2440 * - offset can be compressed when EVEX is used
2442 #define IS_MOD_01() (!(input->eaflags & EAF_WORDOFFS) && \
2443 (ins->rex & REX_EV ? seg == NO_SEG && !forw_ref && \
2444 is_disp8n(input, ins, &output->disp8) : \
2445 input->eaflags & EAF_BYTEOFFS || (o >= -128 && \
2446 o <= 127 && seg == NO_SEG && !forw_ref)))
2448 static enum ea_type process_ea(operand *input, ea *output, int bits,
2449 int rfield, opflags_t rflags, insn *ins,
2450 const char **errmsg)
2452 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
2453 int addrbits = ins->addr_size;
2454 int eaflags = input->eaflags;
2456 *errmsg = "invalid effective address"; /* Default error message */
2458 output->type = EA_SCALAR;
2459 output->rip = false;
2460 output->disp8 = 0;
2462 /* REX flags for the rfield operand */
2463 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
2464 /* EVEX.R' flag for the REG operand */
2465 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
2467 if (is_class(REGISTER, input->type)) {
2469 * It's a direct register.
2471 if (!is_register(input->basereg))
2472 goto err;
2474 if (!is_reg_class(REG_EA, input->basereg))
2475 goto err;
2477 /* broadcasting is not available with a direct register operand. */
2478 if (input->decoflags & BRDCAST_MASK) {
2479 *errmsg = "broadcast not allowed with register operand";
2480 goto err;
2483 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
2484 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
2485 output->sib_present = false; /* no SIB necessary */
2486 output->bytes = 0; /* no offset necessary either */
2487 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2488 } else {
2490 * It's a memory reference.
2493 /* Embedded rounding or SAE is not available with a mem ref operand. */
2494 if (input->decoflags & (ER | SAE)) {
2495 *errmsg = "embedded rounding is available only with "
2496 "register-register operations";
2497 goto err;
2500 if (input->basereg == -1 &&
2501 (input->indexreg == -1 || input->scale == 0)) {
2503 * It's a pure offset.
2505 if (bits == 64 && ((input->type & IP_REL) == IP_REL)) {
2506 if (input->segment == NO_SEG ||
2507 (input->opflags & OPFLAG_RELATIVE)) {
2508 nasm_error(ERR_WARNING | ERR_PASS2,
2509 "absolute address can not be RIP-relative");
2510 input->type &= ~IP_REL;
2511 input->type |= MEMORY;
2515 if (bits == 64 &&
2516 !(IP_REL & ~input->type) && (eaflags & EAF_MIB)) {
2517 *errmsg = "RIP-relative addressing is prohibited for MIB";
2518 goto err;
2521 if (eaflags & EAF_BYTEOFFS ||
2522 (eaflags & EAF_WORDOFFS &&
2523 input->disp_size != (addrbits != 16 ? 32 : 16))) {
2524 nasm_error(ERR_WARNING | ERR_PASS1,
2525 "displacement size ignored on absolute address");
2528 if (bits == 64 && (~input->type & IP_REL)) {
2529 output->sib_present = true;
2530 output->sib = GEN_SIB(0, 4, 5);
2531 output->bytes = 4;
2532 output->modrm = GEN_MODRM(0, rfield, 4);
2533 output->rip = false;
2534 } else {
2535 output->sib_present = false;
2536 output->bytes = (addrbits != 16 ? 4 : 2);
2537 output->modrm = GEN_MODRM(0, rfield,
2538 (addrbits != 16 ? 5 : 6));
2539 output->rip = bits == 64;
2541 } else {
2543 * It's an indirection.
2545 int i = input->indexreg, b = input->basereg, s = input->scale;
2546 int32_t seg = input->segment;
2547 int hb = input->hintbase, ht = input->hinttype;
2548 int t, it, bt; /* register numbers */
2549 opflags_t x, ix, bx; /* register flags */
2551 if (s == 0)
2552 i = -1; /* make this easy, at least */
2554 if (is_register(i)) {
2555 it = nasm_regvals[i];
2556 ix = nasm_reg_flags[i];
2557 } else {
2558 it = -1;
2559 ix = 0;
2562 if (is_register(b)) {
2563 bt = nasm_regvals[b];
2564 bx = nasm_reg_flags[b];
2565 } else {
2566 bt = -1;
2567 bx = 0;
2570 /* if either one are a vector register... */
2571 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
2572 opflags_t sok = BITS32 | BITS64;
2573 int32_t o = input->offset;
2574 int mod, scale, index, base;
2577 * For a vector SIB, one has to be a vector and the other,
2578 * if present, a GPR. The vector must be the index operand.
2580 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
2581 if (s == 0)
2582 s = 1;
2583 else if (s != 1)
2584 goto err;
2586 t = bt, bt = it, it = t;
2587 x = bx, bx = ix, ix = x;
2590 if (bt != -1) {
2591 if (REG_GPR & ~bx)
2592 goto err;
2593 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2594 sok &= bx;
2595 else
2596 goto err;
2600 * While we're here, ensure the user didn't specify
2601 * WORD or QWORD
2603 if (input->disp_size == 16 || input->disp_size == 64)
2604 goto err;
2606 if (addrbits == 16 ||
2607 (addrbits == 32 && !(sok & BITS32)) ||
2608 (addrbits == 64 && !(sok & BITS64)))
2609 goto err;
2611 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2612 : ((ix & YMMREG & ~REG_EA)
2613 ? EA_YMMVSIB : EA_XMMVSIB));
2615 output->rex |= rexflags(it, ix, REX_X);
2616 output->rex |= rexflags(bt, bx, REX_B);
2617 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
2619 index = it & 7; /* it is known to be != -1 */
2621 switch (s) {
2622 case 1:
2623 scale = 0;
2624 break;
2625 case 2:
2626 scale = 1;
2627 break;
2628 case 4:
2629 scale = 2;
2630 break;
2631 case 8:
2632 scale = 3;
2633 break;
2634 default: /* then what the smeg is it? */
2635 goto err; /* panic */
2638 if (bt == -1) {
2639 base = 5;
2640 mod = 0;
2641 } else {
2642 base = (bt & 7);
2643 if (base != REG_NUM_EBP && o == 0 &&
2644 seg == NO_SEG && !forw_ref &&
2645 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2646 mod = 0;
2647 else if (IS_MOD_01())
2648 mod = 1;
2649 else
2650 mod = 2;
2653 output->sib_present = true;
2654 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2655 output->modrm = GEN_MODRM(mod, rfield, 4);
2656 output->sib = GEN_SIB(scale, index, base);
2657 } else if ((ix|bx) & (BITS32|BITS64)) {
2659 * it must be a 32/64-bit memory reference. Firstly we have
2660 * to check that all registers involved are type E/Rxx.
2662 opflags_t sok = BITS32 | BITS64;
2663 int32_t o = input->offset;
2665 if (it != -1) {
2666 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2667 sok &= ix;
2668 else
2669 goto err;
2672 if (bt != -1) {
2673 if (REG_GPR & ~bx)
2674 goto err; /* Invalid register */
2675 if (~sok & bx & SIZE_MASK)
2676 goto err; /* Invalid size */
2677 sok &= bx;
2681 * While we're here, ensure the user didn't specify
2682 * WORD or QWORD
2684 if (input->disp_size == 16 || input->disp_size == 64)
2685 goto err;
2687 if (addrbits == 16 ||
2688 (addrbits == 32 && !(sok & BITS32)) ||
2689 (addrbits == 64 && !(sok & BITS64)))
2690 goto err;
2692 /* now reorganize base/index */
2693 if (s == 1 && bt != it && bt != -1 && it != -1 &&
2694 ((hb == b && ht == EAH_NOTBASE) ||
2695 (hb == i && ht == EAH_MAKEBASE))) {
2696 /* swap if hints say so */
2697 t = bt, bt = it, it = t;
2698 x = bx, bx = ix, ix = x;
2701 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
2702 /* make single reg base, unless hint */
2703 bt = it, bx = ix, it = -1, ix = 0;
2705 if (eaflags & EAF_MIB) {
2706 /* only for mib operands */
2707 if (it == -1 && (hb == b && ht == EAH_NOTBASE)) {
2709 * make a single reg index [reg*1].
2710 * gas uses this form for an explicit index register.
2712 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2714 if ((ht == EAH_SUMMED) && bt == -1) {
2715 /* separate once summed index into [base, index] */
2716 bt = it, bx = ix, s--;
2718 } else {
2719 if (((s == 2 && it != REG_NUM_ESP &&
2720 (!(eaflags & EAF_TIMESTWO) || (ht == EAH_SUMMED))) ||
2721 s == 3 || s == 5 || s == 9) && bt == -1) {
2722 /* convert 3*EAX to EAX+2*EAX */
2723 bt = it, bx = ix, s--;
2725 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
2726 (eaflags & EAF_TIMESTWO) &&
2727 (hb == b && ht == EAH_NOTBASE)) {
2729 * convert [NOSPLIT EAX*1]
2730 * to sib format with 0x0 displacement - [EAX*1+0].
2732 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2735 if (s == 1 && it == REG_NUM_ESP) {
2736 /* swap ESP into base if scale is 1 */
2737 t = it, it = bt, bt = t;
2738 x = ix, ix = bx, bx = x;
2740 if (it == REG_NUM_ESP ||
2741 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
2742 goto err; /* wrong, for various reasons */
2744 output->rex |= rexflags(it, ix, REX_X);
2745 output->rex |= rexflags(bt, bx, REX_B);
2747 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
2748 /* no SIB needed */
2749 int mod, rm;
2751 if (bt == -1) {
2752 rm = 5;
2753 mod = 0;
2754 } else {
2755 rm = (bt & 7);
2756 if (rm != REG_NUM_EBP && o == 0 &&
2757 seg == NO_SEG && !forw_ref &&
2758 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2759 mod = 0;
2760 else if (IS_MOD_01())
2761 mod = 1;
2762 else
2763 mod = 2;
2766 output->sib_present = false;
2767 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2768 output->modrm = GEN_MODRM(mod, rfield, rm);
2769 } else {
2770 /* we need a SIB */
2771 int mod, scale, index, base;
2773 if (it == -1)
2774 index = 4, s = 1;
2775 else
2776 index = (it & 7);
2778 switch (s) {
2779 case 1:
2780 scale = 0;
2781 break;
2782 case 2:
2783 scale = 1;
2784 break;
2785 case 4:
2786 scale = 2;
2787 break;
2788 case 8:
2789 scale = 3;
2790 break;
2791 default: /* then what the smeg is it? */
2792 goto err; /* panic */
2795 if (bt == -1) {
2796 base = 5;
2797 mod = 0;
2798 } else {
2799 base = (bt & 7);
2800 if (base != REG_NUM_EBP && o == 0 &&
2801 seg == NO_SEG && !forw_ref &&
2802 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2803 mod = 0;
2804 else if (IS_MOD_01())
2805 mod = 1;
2806 else
2807 mod = 2;
2810 output->sib_present = true;
2811 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2812 output->modrm = GEN_MODRM(mod, rfield, 4);
2813 output->sib = GEN_SIB(scale, index, base);
2815 } else { /* it's 16-bit */
2816 int mod, rm;
2817 int16_t o = input->offset;
2819 /* check for 64-bit long mode */
2820 if (addrbits == 64)
2821 goto err;
2823 /* check all registers are BX, BP, SI or DI */
2824 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
2825 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
2826 goto err;
2828 /* ensure the user didn't specify DWORD/QWORD */
2829 if (input->disp_size == 32 || input->disp_size == 64)
2830 goto err;
2832 if (s != 1 && i != -1)
2833 goto err; /* no can do, in 16-bit EA */
2834 if (b == -1 && i != -1) {
2835 int tmp = b;
2836 b = i;
2837 i = tmp;
2838 } /* swap */
2839 if ((b == R_SI || b == R_DI) && i != -1) {
2840 int tmp = b;
2841 b = i;
2842 i = tmp;
2844 /* have BX/BP as base, SI/DI index */
2845 if (b == i)
2846 goto err; /* shouldn't ever happen, in theory */
2847 if (i != -1 && b != -1 &&
2848 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
2849 goto err; /* invalid combinations */
2850 if (b == -1) /* pure offset: handled above */
2851 goto err; /* so if it gets to here, panic! */
2853 rm = -1;
2854 if (i != -1)
2855 switch (i * 256 + b) {
2856 case R_SI * 256 + R_BX:
2857 rm = 0;
2858 break;
2859 case R_DI * 256 + R_BX:
2860 rm = 1;
2861 break;
2862 case R_SI * 256 + R_BP:
2863 rm = 2;
2864 break;
2865 case R_DI * 256 + R_BP:
2866 rm = 3;
2867 break;
2868 } else
2869 switch (b) {
2870 case R_SI:
2871 rm = 4;
2872 break;
2873 case R_DI:
2874 rm = 5;
2875 break;
2876 case R_BP:
2877 rm = 6;
2878 break;
2879 case R_BX:
2880 rm = 7;
2881 break;
2883 if (rm == -1) /* can't happen, in theory */
2884 goto err; /* so panic if it does */
2886 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
2887 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2888 mod = 0;
2889 else if (IS_MOD_01())
2890 mod = 1;
2891 else
2892 mod = 2;
2894 output->sib_present = false; /* no SIB - it's 16-bit */
2895 output->bytes = mod; /* bytes of offset needed */
2896 output->modrm = GEN_MODRM(mod, rfield, rm);
2901 output->size = 1 + output->sib_present + output->bytes;
2902 return output->type;
2904 err:
2905 return output->type = EA_INVALID;
2908 static void add_asp(insn *ins, int addrbits)
2910 int j, valid;
2911 int defdisp;
2913 valid = (addrbits == 64) ? 64|32 : 32|16;
2915 switch (ins->prefixes[PPS_ASIZE]) {
2916 case P_A16:
2917 valid &= 16;
2918 break;
2919 case P_A32:
2920 valid &= 32;
2921 break;
2922 case P_A64:
2923 valid &= 64;
2924 break;
2925 case P_ASP:
2926 valid &= (addrbits == 32) ? 16 : 32;
2927 break;
2928 default:
2929 break;
2932 for (j = 0; j < ins->operands; j++) {
2933 if (is_class(MEMORY, ins->oprs[j].type)) {
2934 opflags_t i, b;
2936 /* Verify as Register */
2937 if (!is_register(ins->oprs[j].indexreg))
2938 i = 0;
2939 else
2940 i = nasm_reg_flags[ins->oprs[j].indexreg];
2942 /* Verify as Register */
2943 if (!is_register(ins->oprs[j].basereg))
2944 b = 0;
2945 else
2946 b = nasm_reg_flags[ins->oprs[j].basereg];
2948 if (ins->oprs[j].scale == 0)
2949 i = 0;
2951 if (!i && !b) {
2952 int ds = ins->oprs[j].disp_size;
2953 if ((addrbits != 64 && ds > 8) ||
2954 (addrbits == 64 && ds == 16))
2955 valid &= ds;
2956 } else {
2957 if (!(REG16 & ~b))
2958 valid &= 16;
2959 if (!(REG32 & ~b))
2960 valid &= 32;
2961 if (!(REG64 & ~b))
2962 valid &= 64;
2964 if (!(REG16 & ~i))
2965 valid &= 16;
2966 if (!(REG32 & ~i))
2967 valid &= 32;
2968 if (!(REG64 & ~i))
2969 valid &= 64;
2974 if (valid & addrbits) {
2975 ins->addr_size = addrbits;
2976 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
2977 /* Add an address size prefix */
2978 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
2979 ins->addr_size = (addrbits == 32) ? 16 : 32;
2980 } else {
2981 /* Impossible... */
2982 nasm_error(ERR_NONFATAL, "impossible combination of address sizes");
2983 ins->addr_size = addrbits; /* Error recovery */
2986 defdisp = ins->addr_size == 16 ? 16 : 32;
2988 for (j = 0; j < ins->operands; j++) {
2989 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
2990 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
2992 * mem_offs sizes must match the address size; if not,
2993 * strip the MEM_OFFS bit and match only EA instructions
2995 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);