rbtree: add rb_search_exact()
[nasm.git] / asm / assemble.c
blobe5d5682cfe78501e2e0dc233ee51d67599ed4495
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2019 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * assemble.c code generation for the Netwide Assembler
37 * Bytecode specification
38 * ----------------------
41 * Codes Mnemonic Explanation
43 * \0 terminates the code. (Unless it's a literal of course.)
44 * \1..\4 that many literal bytes follow in the code stream
45 * \5 add 4 to the primary operand number (b, low octdigit)
46 * \6 add 4 to the secondary operand number (a, middle octdigit)
47 * \7 add 4 to both the primary and the secondary operand number
48 * \10..\13 a literal byte follows in the code stream, to be added
49 * to the register value of operand 0..3
50 * \14..\17 the position of index register operand in MIB (BND insns)
51 * \20..\23 ib a byte immediate operand, from operand 0..3
52 * \24..\27 ib,u a zero-extended byte immediate operand, from operand 0..3
53 * \30..\33 iw a word immediate operand, from operand 0..3
54 * \34..\37 iwd select between \3[0-3] and \4[0-3] depending on 16/32 bit
55 * assembly mode or the operand-size override on the operand
56 * \40..\43 id a long immediate operand, from operand 0..3
57 * \44..\47 iwdq select between \3[0-3], \4[0-3] and \5[4-7]
58 * depending on the address size of the instruction.
59 * \50..\53 rel8 a byte relative operand, from operand 0..3
60 * \54..\57 iq a qword immediate operand, from operand 0..3
61 * \60..\63 rel16 a word relative operand, from operand 0..3
62 * \64..\67 rel select between \6[0-3] and \7[0-3] depending on 16/32 bit
63 * assembly mode or the operand-size override on the operand
64 * \70..\73 rel32 a long relative operand, from operand 0..3
65 * \74..\77 seg a word constant, from the _segment_ part of operand 0..3
66 * \1ab a ModRM, calculated on EA in operand a, with the spare
67 * field the register value of operand b.
68 * \172\ab the register number from operand a in bits 7..4, with
69 * the 4-bit immediate from operand b in bits 3..0.
70 * \173\xab the register number from operand a in bits 7..4, with
71 * the value b in bits 3..0.
72 * \174..\177 the register number from operand 0..3 in bits 7..4, and
73 * an arbitrary value in bits 3..0 (assembled as zero.)
74 * \2ab a ModRM, calculated on EA in operand a, with the spare
75 * field equal to digit b.
77 * \240..\243 this instruction uses EVEX rather than REX or VEX/XOP, with the
78 * V field taken from operand 0..3.
79 * \250 this instruction uses EVEX rather than REX or VEX/XOP, with the
80 * V field set to 1111b.
82 * EVEX prefixes are followed by the sequence:
83 * \cm\wlp\tup where cm is:
84 * cc 00m mmm
85 * c = 2 for EVEX and mmmm is the M field (EVEX.P0[3:0])
86 * and wlp is:
87 * 00 wwl lpp
88 * [l0] ll = 0 (.128, .lz)
89 * [l1] ll = 1 (.256)
90 * [l2] ll = 2 (.512)
91 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
93 * [w0] ww = 0 for W = 0
94 * [w1] ww = 1 for W = 1
95 * [wig] ww = 2 for W don't care (always assembled as 0)
96 * [ww] ww = 3 for W used as REX.W
98 * [p0] pp = 0 for no prefix
99 * [60] pp = 1 for legacy prefix 60
100 * [f3] pp = 2
101 * [f2] pp = 3
103 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
104 * (compressed displacement encoding)
106 * \254..\257 id,s a signed 32-bit operand to be extended to 64 bits.
107 * \260..\263 this instruction uses VEX/XOP rather than REX, with the
108 * V field taken from operand 0..3.
109 * \270 this instruction uses VEX/XOP rather than REX, with the
110 * V field set to 1111b.
112 * VEX/XOP prefixes are followed by the sequence:
113 * \tmm\wlp where mm is the M field; and wlp is:
114 * 00 wwl lpp
115 * [l0] ll = 0 for L = 0 (.128, .lz)
116 * [l1] ll = 1 for L = 1 (.256)
117 * [lig] ll = 2 for L don't care (always assembled as 0)
119 * [w0] ww = 0 for W = 0
120 * [w1 ] ww = 1 for W = 1
121 * [wig] ww = 2 for W don't care (always assembled as 0)
122 * [ww] ww = 3 for W used as REX.W
124 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
126 * \271 hlexr instruction takes XRELEASE (F3) with or without lock
127 * \272 hlenl instruction takes XACQUIRE/XRELEASE with or without lock
128 * \273 hle instruction takes XACQUIRE/XRELEASE with lock only
129 * \274..\277 ib,s a byte immediate operand, from operand 0..3, sign-extended
130 * to the operand size (if o16/o32/o64 present) or the bit size
131 * \310 a16 indicates fixed 16-bit address size, i.e. optional 0x67.
132 * \311 a32 indicates fixed 32-bit address size, i.e. optional 0x67.
133 * \312 adf (disassembler only) invalid with non-default address size.
134 * \313 a64 indicates fixed 64-bit address size, 0x67 invalid.
135 * \314 norexb (disassembler only) invalid with REX.B
136 * \315 norexx (disassembler only) invalid with REX.X
137 * \316 norexr (disassembler only) invalid with REX.R
138 * \317 norexw (disassembler only) invalid with REX.W
139 * \320 o16 indicates fixed 16-bit operand size, i.e. optional 0x66.
140 * \321 o32 indicates fixed 32-bit operand size, i.e. optional 0x66.
141 * \322 odf indicates that this instruction is only valid when the
142 * operand size is the default (instruction to disassembler,
143 * generates no code in the assembler)
144 * \323 o64nw indicates fixed 64-bit operand size, REX on extensions only.
145 * \324 o64 indicates 64-bit operand size requiring REX prefix.
146 * \325 nohi instruction which always uses spl/bpl/sil/dil
147 * \326 nof3 instruction not valid with 0xF3 REP prefix. Hint for
148 disassembler only; for SSE instructions.
149 * \330 a literal byte follows in the code stream, to be added
150 * to the condition code value of the instruction.
151 * \331 norep instruction not valid with REP prefix. Hint for
152 * disassembler only; for SSE instructions.
153 * \332 f2i REP prefix (0xF2 byte) used as opcode extension.
154 * \333 f3i REP prefix (0xF3 byte) used as opcode extension.
155 * \334 rex.l LOCK prefix used as REX.R (used in non-64-bit mode)
156 * \335 repe disassemble a rep (0xF3 byte) prefix as repe not rep.
157 * \336 mustrep force a REP(E) prefix (0xF3) even if not specified.
158 * \337 mustrepne force a REPNE prefix (0xF2) even if not specified.
159 * \336-\337 are still listed as prefixes in the disassembler.
160 * \340 resb reserve <operand 0> bytes of uninitialized storage.
161 * Operand 0 had better be a segmentless constant.
162 * \341 wait this instruction needs a WAIT "prefix"
163 * \360 np no SSE prefix (== \364\331)
164 * \361 66 SSE prefix (== \366\331)
165 * \364 !osp operand-size prefix (0x66) not permitted
166 * \365 !asp address-size prefix (0x67) not permitted
167 * \366 operand-size prefix (0x66) used as opcode extension
168 * \367 address-size prefix (0x67) used as opcode extension
169 * \370,\371 jcc8 match only if operand 0 meets byte jump criteria.
170 * jmp8 370 is used for Jcc, 371 is used for JMP.
171 * \373 jlen assemble 0x03 if bits==16, 0x05 if bits==32;
172 * used for conditional jump over longer jump
173 * \374 vsibx|vm32x|vm64x this instruction takes an XMM VSIB memory EA
174 * \375 vsiby|vm32y|vm64y this instruction takes an YMM VSIB memory EA
175 * \376 vsibz|vm32z|vm64z this instruction takes an ZMM VSIB memory EA
178 #include "compiler.h"
181 #include "nasm.h"
182 #include "nasmlib.h"
183 #include "error.h"
184 #include "assemble.h"
185 #include "insns.h"
186 #include "tables.h"
187 #include "disp8.h"
188 #include "listing.h"
190 enum match_result {
192 * Matching errors. These should be sorted so that more specific
193 * errors come later in the sequence.
195 MERR_INVALOP,
196 MERR_OPSIZEMISSING,
197 MERR_OPSIZEMISMATCH,
198 MERR_BRNOTHERE,
199 MERR_BRNUMMISMATCH,
200 MERR_MASKNOTHERE,
201 MERR_DECONOTHERE,
202 MERR_BADCPU,
203 MERR_BADMODE,
204 MERR_BADHLE,
205 MERR_ENCMISMATCH,
206 MERR_BADBND,
207 MERR_BADREPNE,
208 MERR_REGSETSIZE,
209 MERR_REGSET,
211 * Matching success; the conditional ones first
213 MOK_JUMP, /* Matching OK but needs jmp_match() */
214 MOK_GOOD /* Matching unconditionally OK */
217 typedef struct {
218 enum ea_type type; /* what kind of EA is this? */
219 int sib_present; /* is a SIB byte necessary? */
220 int bytes; /* # of bytes of offset needed */
221 int size; /* lazy - this is sib+bytes+1 */
222 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
223 int8_t disp8; /* compressed displacement for EVEX */
224 } ea;
226 #define GEN_SIB(scale, index, base) \
227 (((scale) << 6) | ((index) << 3) | ((base)))
229 #define GEN_MODRM(mod, reg, rm) \
230 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
232 static int64_t calcsize(int32_t, int64_t, int, insn *,
233 const struct itemplate *);
234 static int emit_prefix(struct out_data *data, const int bits, insn *ins);
235 static void gencode(struct out_data *data, insn *ins);
236 static enum match_result find_match(const struct itemplate **tempp,
237 insn *instruction,
238 int32_t segment, int64_t offset, int bits);
239 static enum match_result matches(const struct itemplate *, insn *, int bits);
240 static opflags_t regflag(const operand *);
241 static int32_t regval(const operand *);
242 static int rexflags(int, opflags_t, int);
243 static int op_rexflags(const operand *, int);
244 static int op_evexflags(const operand *, int, uint8_t);
245 static void add_asp(insn *, int);
247 static enum ea_type process_ea(operand *, ea *, int, int,
248 opflags_t, insn *, const char **);
250 static inline bool absolute_op(const struct operand *o)
252 return o->segment == NO_SEG && o->wrt == NO_SEG &&
253 !(o->opflags & OPFLAG_RELATIVE);
256 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
258 return ins->prefixes[pos] == prefix;
261 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
263 if (ins->prefixes[pos])
264 nasm_nonfatal("invalid %s prefix", prefix_name(ins->prefixes[pos]));
267 static const char *size_name(int size)
269 switch (size) {
270 case 1:
271 return "byte";
272 case 2:
273 return "word";
274 case 4:
275 return "dword";
276 case 8:
277 return "qword";
278 case 10:
279 return "tword";
280 case 16:
281 return "oword";
282 case 32:
283 return "yword";
284 case 64:
285 return "zword";
286 default:
287 return "???";
291 static void warn_overflow(int size)
293 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW, "%s data exceeds bounds",
294 size_name(size));
297 static void warn_overflow_const(int64_t data, int size)
299 if (overflow_general(data, size))
300 warn_overflow(size);
303 static void warn_overflow_out(int64_t data, int size, enum out_sign sign)
305 bool err;
307 switch (sign) {
308 case OUT_WRAP:
309 err = overflow_general(data, size);
310 break;
311 case OUT_SIGNED:
312 err = overflow_signed(data, size);
313 break;
314 case OUT_UNSIGNED:
315 err = overflow_unsigned(data, size);
316 break;
317 default:
318 panic();
319 break;
322 if (err)
323 warn_overflow(size);
327 * This routine wrappers the real output format's output routine,
328 * in order to pass a copy of the data off to the listing file
329 * generator at the same time, flatten unnecessary relocations,
330 * and verify backend compatibility.
333 * This warning is currently issued by backends, but in the future
334 * this code should be centralized.
336 *!zeroing [on] RESx in initialized section becomes zero
337 *! a \c{RESx} directive was used in a section which contains
338 *! initialized data, and the output format does not support
339 *! this. Instead, this will be replaced with explicit zero
340 *! content, which may produce a large output file.
342 static void out(struct out_data *data)
344 static int32_t lineno = 0; /* static!!! */
345 static const char *lnfname = NULL;
346 union {
347 uint8_t b[8];
348 uint64_t q;
349 } xdata;
350 size_t asize, amax;
351 uint64_t zeropad = 0;
352 int64_t addrval;
353 int32_t fixseg; /* Segment for which to produce fixed data */
355 if (!data->size)
356 return; /* Nothing to do */
359 * Convert addresses to RAWDATA if possible
360 * XXX: not all backends want this for global symbols!!!!
362 switch (data->type) {
363 case OUT_ADDRESS:
364 addrval = data->toffset;
365 fixseg = NO_SEG; /* Absolute address is fixed data */
366 goto address;
368 case OUT_RELADDR:
369 addrval = data->toffset - data->relbase;
370 fixseg = data->segment; /* Our own segment is fixed data */
371 goto address;
373 address:
374 nasm_assert(data->size <= 8);
375 asize = data->size;
376 amax = ofmt->maxbits >> 3; /* Maximum address size in bytes */
377 if ((ofmt->flags & OFMT_KEEP_ADDR) == 0 && data->tsegment == fixseg &&
378 data->twrt == NO_SEG) {
379 if (asize >= (size_t)(data->bits >> 3))
380 data->sign = OUT_WRAP; /* Support address space wrapping for low-bit modes */
381 warn_overflow_out(addrval, asize, data->sign);
382 xdata.q = cpu_to_le64(addrval);
383 data->data = xdata.b;
384 data->type = OUT_RAWDATA;
385 asize = amax = 0; /* No longer an address */
387 break;
389 case OUT_SEGMENT:
390 nasm_assert(data->size <= 8);
391 asize = data->size;
392 amax = 2;
393 break;
395 default:
396 asize = amax = 0; /* Not an address */
397 break;
401 * this call to src_get determines when we call the
402 * debug-format-specific "linenum" function
403 * it updates lineno and lnfname to the current values
404 * returning 0 if "same as last time", -2 if lnfname
405 * changed, and the amount by which lineno changed,
406 * if it did. thus, these variables must be static
409 if (src_get(&lineno, &lnfname))
410 dfmt->linenum(lnfname, lineno, data->segment);
412 if (asize > amax) {
413 if (data->type == OUT_RELADDR || data->sign == OUT_SIGNED) {
414 nasm_nonfatal("%u-bit signed relocation unsupported by output format %s",
415 (unsigned int)(asize << 3), ofmt->shortname);
416 } else {
418 *!zext-reloc [on] relocation zero-extended to match output format
419 *! warns that a relocation has been zero-extended due
420 *! to limitations in the output format.
422 nasm_warn(WARN_ZEXT_RELOC,
423 "%u-bit %s relocation zero-extended from %u bits",
424 (unsigned int)(asize << 3),
425 data->type == OUT_SEGMENT ? "segment" : "unsigned",
426 (unsigned int)(amax << 3));
428 zeropad = data->size - amax;
429 data->size = amax;
431 lfmt->output(data);
433 if (likely(data->segment != NO_SEG)) {
434 ofmt->output(data);
435 } else {
436 /* Outputting to ABSOLUTE section - only reserve is permitted */
437 if (data->type != OUT_RESERVE)
438 nasm_nonfatal("attempt to assemble code in [ABSOLUTE] space");
439 /* No need to push to the backend */
442 data->offset += data->size;
443 data->insoffs += data->size;
445 if (zeropad) {
446 data->type = OUT_ZERODATA;
447 data->size = zeropad;
448 lfmt->output(data);
449 ofmt->output(data);
450 data->offset += zeropad;
451 data->insoffs += zeropad;
452 data->size += zeropad; /* Restore original size value */
456 static inline void out_rawdata(struct out_data *data, const void *rawdata,
457 size_t size)
459 data->type = OUT_RAWDATA;
460 data->data = rawdata;
461 data->size = size;
462 out(data);
465 static void out_rawbyte(struct out_data *data, uint8_t byte)
467 data->type = OUT_RAWDATA;
468 data->data = &byte;
469 data->size = 1;
470 out(data);
473 static inline void out_reserve(struct out_data *data, uint64_t size)
475 data->type = OUT_RESERVE;
476 data->size = size;
477 out(data);
480 static void out_segment(struct out_data *data, const struct operand *opx)
482 if (opx->opflags & OPFLAG_RELATIVE)
483 nasm_nonfatal("segment references cannot be relative");
485 data->type = OUT_SEGMENT;
486 data->sign = OUT_UNSIGNED;
487 data->size = 2;
488 data->toffset = opx->offset;
489 data->tsegment = ofmt->segbase(opx->segment | 1);
490 data->twrt = opx->wrt;
491 out(data);
494 static void out_imm(struct out_data *data, const struct operand *opx,
495 int size, enum out_sign sign)
497 if (opx->segment != NO_SEG && (opx->segment & 1)) {
499 * This is actually a segment reference, but eval() has
500 * already called ofmt->segbase() for us. Sigh.
502 if (size < 2)
503 nasm_nonfatal("segment reference must be 16 bits");
505 data->type = OUT_SEGMENT;
506 } else {
507 data->type = (opx->opflags & OPFLAG_RELATIVE)
508 ? OUT_RELADDR : OUT_ADDRESS;
510 data->sign = sign;
511 data->toffset = opx->offset;
512 data->tsegment = opx->segment;
513 data->twrt = opx->wrt;
515 * XXX: improve this if at some point in the future we can
516 * distinguish the subtrahend in expressions like [foo - bar]
517 * where bar is a symbol in the current segment. However, at the
518 * current point, if OPFLAG_RELATIVE is set that subtraction has
519 * already occurred.
521 data->relbase = 0;
522 data->size = size;
523 out(data);
526 static void out_reladdr(struct out_data *data, const struct operand *opx,
527 int size)
529 if (opx->opflags & OPFLAG_RELATIVE)
530 nasm_nonfatal("invalid use of self-relative expression");
532 data->type = OUT_RELADDR;
533 data->sign = OUT_SIGNED;
534 data->size = size;
535 data->toffset = opx->offset;
536 data->tsegment = opx->segment;
537 data->twrt = opx->wrt;
538 data->relbase = data->offset + (data->inslen - data->insoffs);
539 out(data);
542 static bool jmp_match(int32_t segment, int64_t offset, int bits,
543 insn * ins, const struct itemplate *temp)
545 int64_t isize;
546 const uint8_t *code = temp->code;
547 uint8_t c = code[0];
548 bool is_byte;
550 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
551 return false;
552 if (!optimizing.level || (optimizing.flag & OPTIM_DISABLE_JMP_MATCH))
553 return false;
554 if (optimizing.level < 0 && c == 0371)
555 return false;
557 isize = calcsize(segment, offset, bits, ins, temp);
559 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
560 /* Be optimistic in pass 1 */
561 return true;
563 if (ins->oprs[0].segment != segment)
564 return false;
566 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
567 is_byte = (isize >= -128 && isize <= 127); /* is it byte size? */
569 if (is_byte && c == 0371 && ins->prefixes[PPS_REP] == P_BND) {
570 /* jmp short (opcode eb) cannot be used with bnd prefix. */
571 ins->prefixes[PPS_REP] = P_none;
573 *!bnd [on] invalid BND prefixes
574 *! warns about ineffective use of the \c{BND} prefix when the
575 *! \c{JMP} instruction is converted to the \c{SHORT} form.
576 *! This should be extremely rare since the short \c{JMP} only
577 *! is applicable to jumps inside the same module, but if
578 *! it is legitimate, it may be necessary to use
579 *! \c{bnd jmp dword}.
581 nasm_warn(WARN_BND | ERR_PASS2 ,
582 "jmp short does not init bnd regs - bnd prefix dropped");
585 return is_byte;
588 static inline int64_t merge_resb(insn *ins, int64_t isize)
590 int nbytes = resb_bytes(ins->opcode);
592 if (likely(!nbytes))
593 return isize;
595 if (isize != nbytes * ins->oprs[0].offset)
596 return isize; /* Has prefixes of some sort */
598 ins->oprs[0].offset *= ins->times;
599 isize *= ins->times;
600 ins->times = 1;
601 return isize;
604 /* This must be handle non-power-of-2 alignment values */
605 static inline size_t pad_bytes(size_t len, size_t align)
607 size_t partial = len % align;
608 return partial ? align - partial : 0;
611 static void out_eops(struct out_data *data, const extop *e)
613 while (e) {
614 size_t dup = e->dup;
616 switch (e->type) {
617 case EOT_NOTHING:
618 break;
620 case EOT_EXTOP:
621 while (dup--)
622 out_eops(data, e->val.subexpr);
623 break;
625 case EOT_DB_NUMBER:
626 if (e->elem > 8) {
627 nasm_nonfatal("integer supplied as %d-bit data",
628 e->elem << 3);
629 } else {
630 while (dup--) {
631 data->insoffs = 0;
632 data->inslen = data->size = e->elem;
633 data->tsegment = e->val.num.segment;
634 data->toffset = e->val.num.offset;
635 data->twrt = e->val.num.wrt;
636 data->relbase = 0;
637 if (e->val.num.segment != NO_SEG &&
638 (e->val.num.segment & 1)) {
639 data->type = OUT_SEGMENT;
640 data->sign = OUT_UNSIGNED;
641 } else {
642 data->type = e->val.num.relative
643 ? OUT_RELADDR : OUT_ADDRESS;
644 data->sign = OUT_WRAP;
646 out(data);
649 break;
651 case EOT_DB_FLOAT:
652 case EOT_DB_STRING:
653 case EOT_DB_STRING_FREE:
655 size_t pad, len;
657 pad = pad_bytes(e->val.string.len, e->elem);
658 len = e->val.string.len + pad;
660 while (dup--) {
661 data->insoffs = 0;
662 data->inslen = len;
663 out_rawdata(data, e->val.string.data, e->val.string.len);
664 if (pad)
665 out_rawdata(data, zero_buffer, pad);
667 break;
670 case EOT_DB_RESERVE:
671 data->insoffs = 0;
672 data->inslen = dup * e->elem;
673 out_reserve(data, data->inslen);
674 break;
677 e = e->next;
681 /* This is totally just a wild guess what is reasonable... */
682 #define INCBIN_MAX_BUF (ZERO_BUF_SIZE * 16)
684 int64_t assemble(int32_t segment, int64_t start, int bits, insn *instruction)
686 struct out_data data;
687 const struct itemplate *temp;
688 enum match_result m;
690 if (instruction->opcode == I_none)
691 return 0;
693 nasm_zero(data);
694 data.offset = start;
695 data.segment = segment;
696 data.itemp = NULL;
697 data.bits = bits;
699 if (opcode_is_db(instruction->opcode)) {
700 out_eops(&data, instruction->eops);
701 } else if (instruction->opcode == I_INCBIN) {
702 const char *fname = instruction->eops->val.string.data;
703 FILE *fp;
704 size_t t = instruction->times; /* INCBIN handles TIMES by itself */
705 off_t base = 0;
706 off_t len;
707 const void *map = NULL;
708 char *buf = NULL;
709 size_t blk = 0; /* Buffered I/O block size */
710 size_t m = 0; /* Bytes last read */
712 if (!t)
713 goto done;
715 fp = nasm_open_read(fname, NF_BINARY|NF_FORMAP);
716 if (!fp) {
717 nasm_nonfatal("`incbin': unable to open file `%s'",
718 fname);
719 goto done;
722 len = nasm_file_size(fp);
724 if (len == (off_t)-1) {
725 nasm_nonfatal("`incbin': unable to get length of file `%s'",
726 fname);
727 goto close_done;
730 if (instruction->eops->next) {
731 base = instruction->eops->next->val.num.offset;
732 if (base >= len) {
733 len = 0;
734 } else {
735 len -= base;
736 if (instruction->eops->next->next &&
737 len > (off_t)instruction->eops->next->next->val.num.offset)
738 len = (off_t)instruction->eops->next->next->val.num.offset;
742 lfmt->set_offset(data.offset);
743 lfmt->uplevel(LIST_INCBIN, len);
745 if (!len)
746 goto end_incbin;
748 /* Try to map file data */
749 map = nasm_map_file(fp, base, len);
750 if (!map) {
751 blk = len < (off_t)INCBIN_MAX_BUF ? (size_t)len : INCBIN_MAX_BUF;
752 buf = nasm_malloc(blk);
755 while (t--) {
757 * Consider these irrelevant for INCBIN, since it is fully
758 * possible that these might be (way) bigger than an int
759 * can hold; there is, however, no reason to widen these
760 * types just for INCBIN. data.inslen == 0 signals to the
761 * backend that these fields are meaningless, if at all
762 * needed.
764 data.insoffs = 0;
765 data.inslen = 0;
767 if (map) {
768 out_rawdata(&data, map, len);
769 } else if ((off_t)m == len) {
770 out_rawdata(&data, buf, len);
771 } else {
772 off_t l = len;
774 if (fseeko(fp, base, SEEK_SET) < 0 || ferror(fp)) {
775 nasm_nonfatal("`incbin': unable to seek on file `%s'",
776 fname);
777 goto end_incbin;
779 while (l > 0) {
780 m = fread(buf, 1, l < (off_t)blk ? (size_t)l : blk, fp);
781 if (!m || feof(fp)) {
783 * This shouldn't happen unless the file
784 * actually changes while we are reading
785 * it.
787 nasm_nonfatal("`incbin': unexpected EOF while"
788 " reading file `%s'", fname);
789 goto end_incbin;
791 out_rawdata(&data, buf, m);
792 l -= m;
796 end_incbin:
797 lfmt->downlevel(LIST_INCBIN);
798 if (instruction->times > 1) {
799 lfmt->uplevel(LIST_TIMES, instruction->times);
800 lfmt->downlevel(LIST_TIMES);
802 if (ferror(fp)) {
803 nasm_nonfatal("`incbin': error while"
804 " reading file `%s'", fname);
806 close_done:
807 if (buf)
808 nasm_free(buf);
809 if (map)
810 nasm_unmap_file(map, len);
811 fclose(fp);
812 done:
813 instruction->times = 1; /* Tell the upper layer not to iterate */
815 } else {
816 /* "Real" instruction */
818 /* Check to see if we need an address-size prefix */
819 add_asp(instruction, bits);
821 m = find_match(&temp, instruction, data.segment, data.offset, bits);
823 if (m == MOK_GOOD) {
824 /* Matches! */
825 if (unlikely(itemp_has(temp, IF_OBSOLETE))) {
826 errflags warning;
827 const char *whathappened;
828 const char *validity;
829 bool never = itemp_has(temp, IF_NEVER);
832 * If IF_OBSOLETE is set, warn the user. Different
833 * warning classes for "obsolete but valid for this
834 * specific CPU" and "obsolete and gone."
836 *!obsolete-removed [on] instruction obsolete and removed on the target CPU
837 *! warns for an instruction which has been removed
838 *! from the architecture, and is no longer included
839 *! in the CPU definition given in the \c{[CPU]}
840 *! directive, for example \c{POP CS}, the opcode for
841 *! which, \c{0Fh}, instead is an opcode prefix on
842 *! CPUs newer than the first generation 8086.
844 *!obsolete-nop [on] instruction obsolete and is a noop on the target CPU
845 *! warns for an instruction which has been removed
846 *! from the architecture, but has been architecturally
847 *! defined to be a noop for future CPUs.
849 *!obsolete-valid [on] instruction obsolete but valid on the target CPU
850 *! warns for an instruction which has been removed
851 *! from the architecture, but is still valid on the
852 *! specific CPU given in the \c{CPU} directive. Code
853 *! using these instructions is most likely not
854 *! forward compatible.
857 whathappened = never ? "never implemented" : "obsolete";
859 if (!never && !iflag_cmp_cpu_level(&insns_flags[temp->iflag_idx], &cpu)) {
860 warning = WARN_OBSOLETE_VALID;
861 validity = "but valid on";
862 } else if (itemp_has(temp, IF_NOP)) {
863 warning = WARN_OBSOLETE_NOP;
864 validity = "and is a noop on";
865 } else {
866 warning = WARN_OBSOLETE_REMOVED;
867 validity = never ? "and invalid on" : "and removed from";
870 nasm_warn(warning, "instruction %s %s the target CPU",
871 whathappened, validity);
874 data.itemp = temp;
875 data.bits = bits;
876 data.insoffs = 0;
878 data.inslen = calcsize(data.segment, data.offset,
879 bits, instruction, temp);
880 nasm_assert(data.inslen >= 0);
881 data.inslen = merge_resb(instruction, data.inslen);
883 gencode(&data, instruction);
884 nasm_assert(data.insoffs == data.inslen);
885 } else {
886 /* No match */
887 switch (m) {
888 case MERR_OPSIZEMISSING:
889 nasm_nonfatal("operation size not specified");
890 break;
891 case MERR_OPSIZEMISMATCH:
892 nasm_nonfatal("mismatch in operand sizes");
893 break;
894 case MERR_BRNOTHERE:
895 nasm_nonfatal("broadcast not permitted on this operand");
896 break;
897 case MERR_BRNUMMISMATCH:
898 nasm_nonfatal("mismatch in the number of broadcasting elements");
899 break;
900 case MERR_MASKNOTHERE:
901 nasm_nonfatal("mask not permitted on this operand");
902 break;
903 case MERR_DECONOTHERE:
904 nasm_nonfatal("unsupported mode decorator for instruction");
905 break;
906 case MERR_BADCPU:
907 nasm_nonfatal("no instruction for this cpu level");
908 break;
909 case MERR_BADMODE:
910 nasm_nonfatal("instruction not supported in %d-bit mode", bits);
911 break;
912 case MERR_ENCMISMATCH:
913 nasm_nonfatal("specific encoding scheme not available");
914 break;
915 case MERR_BADBND:
916 nasm_nonfatal("bnd prefix is not allowed");
917 break;
918 case MERR_BADREPNE:
919 nasm_nonfatal("%s prefix is not allowed",
920 (has_prefix(instruction, PPS_REP, P_REPNE) ?
921 "repne" : "repnz"));
922 break;
923 case MERR_REGSETSIZE:
924 nasm_nonfatal("invalid register set size");
925 break;
926 case MERR_REGSET:
927 nasm_nonfatal("register set not valid for operand");
928 break;
929 default:
930 nasm_nonfatal("invalid combination of opcode and operands");
931 break;
934 instruction->times = 1; /* Avoid repeated error messages */
937 return data.offset - start;
940 static int32_t eops_typeinfo(const extop *e)
942 int32_t typeinfo = 0;
944 while (e) {
945 switch (e->type) {
946 case EOT_NOTHING:
947 break;
949 case EOT_EXTOP:
950 typeinfo |= eops_typeinfo(e->val.subexpr);
951 break;
953 case EOT_DB_FLOAT:
954 switch (e->elem) {
955 case 1: typeinfo |= TY_BYTE; break;
956 case 2: typeinfo |= TY_WORD; break;
957 case 4: typeinfo |= TY_FLOAT; break;
958 case 8: typeinfo |= TY_QWORD; break; /* double? */
959 case 10: typeinfo |= TY_TBYTE; break; /* long double? */
960 case 16: typeinfo |= TY_YWORD; break;
961 case 32: typeinfo |= TY_ZWORD; break;
962 default: break;
964 break;
966 default:
967 switch (e->elem) {
968 case 1: typeinfo |= TY_BYTE; break;
969 case 2: typeinfo |= TY_WORD; break;
970 case 4: typeinfo |= TY_DWORD; break;
971 case 8: typeinfo |= TY_QWORD; break;
972 case 10: typeinfo |= TY_TBYTE; break;
973 case 16: typeinfo |= TY_YWORD; break;
974 case 32: typeinfo |= TY_ZWORD; break;
975 default: break;
977 break;
979 e = e->next;
982 return typeinfo;
985 static inline void debug_set_db_type(insn *instruction)
988 int32_t typeinfo = TYS_ELEMENTS(instruction->operands);
990 typeinfo |= eops_typeinfo(instruction->eops);
991 dfmt->debug_typevalue(typeinfo);
994 static void debug_set_type(insn *instruction)
996 int32_t typeinfo;
998 if (opcode_is_resb(instruction->opcode)) {
999 typeinfo = TYS_ELEMENTS(instruction->oprs[0].offset);
1001 switch (instruction->opcode) {
1002 case I_RESB:
1003 typeinfo |= TY_BYTE;
1004 break;
1005 case I_RESW:
1006 typeinfo |= TY_WORD;
1007 break;
1008 case I_RESD:
1009 typeinfo |= TY_DWORD;
1010 break;
1011 case I_RESQ:
1012 typeinfo |= TY_QWORD;
1013 break;
1014 case I_REST:
1015 typeinfo |= TY_TBYTE;
1016 break;
1017 case I_RESO:
1018 typeinfo |= TY_OWORD;
1019 break;
1020 case I_RESY:
1021 typeinfo |= TY_YWORD;
1022 break;
1023 case I_RESZ:
1024 typeinfo |= TY_ZWORD;
1025 break;
1026 default:
1027 panic();
1029 } else {
1030 typeinfo = TY_LABEL;
1033 dfmt->debug_typevalue(typeinfo);
1037 /* Proecess an EQU directive */
1038 static void define_equ(insn * instruction)
1040 if (!instruction->label) {
1041 nasm_nonfatal("EQU not preceded by label");
1042 } else if (instruction->operands == 1 &&
1043 (instruction->oprs[0].type & IMMEDIATE) &&
1044 instruction->oprs[0].wrt == NO_SEG) {
1045 define_label(instruction->label,
1046 instruction->oprs[0].segment,
1047 instruction->oprs[0].offset, false);
1048 } else if (instruction->operands == 2
1049 && (instruction->oprs[0].type & IMMEDIATE)
1050 && (instruction->oprs[0].type & COLON)
1051 && instruction->oprs[0].segment == NO_SEG
1052 && instruction->oprs[0].wrt == NO_SEG
1053 && (instruction->oprs[1].type & IMMEDIATE)
1054 && instruction->oprs[1].segment == NO_SEG
1055 && instruction->oprs[1].wrt == NO_SEG) {
1056 define_label(instruction->label,
1057 instruction->oprs[0].offset | SEG_ABS,
1058 instruction->oprs[1].offset, false);
1059 } else {
1060 nasm_nonfatal("bad syntax for EQU");
1064 static int64_t len_extops(const extop *e)
1066 int64_t isize = 0;
1067 size_t pad;
1069 while (e) {
1070 switch (e->type) {
1071 case EOT_NOTHING:
1072 break;
1074 case EOT_EXTOP:
1075 isize += e->dup * len_extops(e->val.subexpr);
1076 break;
1078 case EOT_DB_STRING:
1079 case EOT_DB_STRING_FREE:
1080 case EOT_DB_FLOAT:
1081 pad = pad_bytes(e->val.string.len, e->elem);
1082 isize += e->dup * (e->val.string.len + pad);
1083 break;
1085 case EOT_DB_NUMBER:
1086 warn_overflow_const(e->val.num.offset, e->elem);
1087 isize += e->dup * e->elem;
1088 break;
1090 case EOT_DB_RESERVE:
1091 isize += e->dup;
1092 break;
1095 e = e->next;
1098 return isize;
1101 int64_t insn_size(int32_t segment, int64_t offset, int bits, insn *instruction)
1103 const struct itemplate *temp;
1104 enum match_result m;
1105 int64_t isize = 0;
1107 if (instruction->opcode == I_none) {
1108 return 0;
1109 } else if (instruction->opcode == I_EQU) {
1110 define_equ(instruction);
1111 return 0;
1112 } else if (opcode_is_db(instruction->opcode)) {
1113 isize = len_extops(instruction->eops);
1114 debug_set_db_type(instruction);
1115 return isize;
1116 } else if (instruction->opcode == I_INCBIN) {
1117 const extop *e = instruction->eops;
1118 const char *fname = e->val.string.data;
1119 off_t len;
1121 len = nasm_file_size_by_path(fname);
1122 if (len == (off_t)-1) {
1123 nasm_nonfatal("`incbin': unable to get length of file `%s'",
1124 fname);
1125 return 0;
1128 e = e->next;
1129 if (e) {
1130 if (len <= (off_t)e->val.num.offset) {
1131 len = 0;
1132 } else {
1133 len -= e->val.num.offset;
1134 e = e->next;
1135 if (e && len > (off_t)e->val.num.offset) {
1136 len = (off_t)e->val.num.offset;
1141 len *= instruction->times;
1142 instruction->times = 1; /* Tell the upper layer to not iterate */
1144 return len;
1145 } else {
1146 /* Normal instruction, or RESx */
1148 /* Check to see if we need an address-size prefix */
1149 add_asp(instruction, bits);
1151 m = find_match(&temp, instruction, segment, offset, bits);
1152 if (m != MOK_GOOD)
1153 return -1; /* No match */
1155 isize = calcsize(segment, offset, bits, instruction, temp);
1156 debug_set_type(instruction);
1157 isize = merge_resb(instruction, isize);
1159 return isize;
1163 static void bad_hle_warn(const insn * ins, uint8_t hleok)
1165 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
1166 enum whatwarn { w_none, w_lock, w_inval } ww;
1167 static const enum whatwarn warn[2][4] =
1169 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
1170 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
1172 unsigned int n;
1174 n = (unsigned int)rep_pfx - P_XACQUIRE;
1175 if (n > 1)
1176 return; /* Not XACQUIRE/XRELEASE */
1178 ww = warn[n][hleok];
1179 if (!is_class(MEMORY, ins->oprs[0].type))
1180 ww = w_inval; /* HLE requires operand 0 to be memory */
1183 *!hle [on] invalid HLE prefixes
1184 *! warns about invalid use of the HLE \c{XACQUIRE} or \c{XRELEASE}
1185 *! prefixes.
1187 switch (ww) {
1188 case w_none:
1189 break;
1191 case w_lock:
1192 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
1193 nasm_warn(WARN_HLE | ERR_PASS2,
1194 "%s with this instruction requires lock",
1195 prefix_name(rep_pfx));
1197 break;
1199 case w_inval:
1200 nasm_warn(WARN_HLE | ERR_PASS2,
1201 "%s invalid with this instruction",
1202 prefix_name(rep_pfx));
1203 break;
1207 /* Common construct */
1208 #define case3(x) case (x): case (x)+1: case (x)+2
1209 #define case4(x) case3(x): case (x)+3
1211 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
1212 insn * ins, const struct itemplate *temp)
1214 const uint8_t *codes = temp->code;
1215 int64_t length = 0;
1216 uint8_t c;
1217 int rex_mask = ~0;
1218 int op1, op2;
1219 struct operand *opx;
1220 uint8_t opex = 0;
1221 enum ea_type eat;
1222 uint8_t hleok = 0;
1223 bool lockcheck = true;
1224 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
1225 const char *errmsg;
1227 ins->rex = 0; /* Ensure REX is reset */
1228 eat = EA_SCALAR; /* Expect a scalar EA */
1229 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
1231 if (ins->prefixes[PPS_OSIZE] == P_O64)
1232 ins->rex |= REX_W;
1234 (void)segment; /* Don't warn that this parameter is unused */
1235 (void)offset; /* Don't warn that this parameter is unused */
1237 while (*codes) {
1238 c = *codes++;
1239 op1 = (c & 3) + ((opex & 1) << 2);
1240 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1241 opx = &ins->oprs[op1];
1242 opex = 0; /* For the next iteration */
1244 switch (c) {
1245 case4(01):
1246 codes += c, length += c;
1247 break;
1249 case3(05):
1250 opex = c;
1251 break;
1253 case4(010):
1254 ins->rex |=
1255 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
1256 codes++, length++;
1257 break;
1259 case4(014):
1260 /* this is an index reg of MIB operand */
1261 mib_index = opx->basereg;
1262 break;
1264 case4(020):
1265 case4(024):
1266 length++;
1267 break;
1269 case4(030):
1270 length += 2;
1271 break;
1273 case4(034):
1274 if (opx->type & (BITS16 | BITS32 | BITS64))
1275 length += (opx->type & BITS16) ? 2 : 4;
1276 else
1277 length += (bits == 16) ? 2 : 4;
1278 break;
1280 case4(040):
1281 length += 4;
1282 break;
1284 case4(044):
1285 length += ins->addr_size >> 3;
1286 break;
1288 case4(050):
1289 length++;
1290 break;
1292 case4(054):
1293 length += 8; /* MOV reg64/imm */
1294 break;
1296 case4(060):
1297 length += 2;
1298 break;
1300 case4(064):
1301 if (opx->type & (BITS16 | BITS32 | BITS64))
1302 length += (opx->type & BITS16) ? 2 : 4;
1303 else
1304 length += (bits == 16) ? 2 : 4;
1305 break;
1307 case4(070):
1308 length += 4;
1309 break;
1311 case4(074):
1312 length += 2;
1313 break;
1315 case 0172:
1316 case 0173:
1317 codes++;
1318 length++;
1319 break;
1321 case4(0174):
1322 length++;
1323 break;
1325 case4(0240):
1326 ins->rex |= REX_EV;
1327 ins->vexreg = regval(opx);
1328 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
1329 ins->vex_cm = *codes++;
1330 ins->vex_wlp = *codes++;
1331 ins->evex_tuple = (*codes++ - 0300);
1332 break;
1334 case 0250:
1335 ins->rex |= REX_EV;
1336 ins->vexreg = 0;
1337 ins->vex_cm = *codes++;
1338 ins->vex_wlp = *codes++;
1339 ins->evex_tuple = (*codes++ - 0300);
1340 break;
1342 case4(0254):
1343 length += 4;
1344 break;
1346 case4(0260):
1347 ins->rex |= REX_V;
1348 ins->vexreg = regval(opx);
1349 ins->vex_cm = *codes++;
1350 ins->vex_wlp = *codes++;
1351 break;
1353 case 0270:
1354 ins->rex |= REX_V;
1355 ins->vexreg = 0;
1356 ins->vex_cm = *codes++;
1357 ins->vex_wlp = *codes++;
1358 break;
1360 case3(0271):
1361 hleok = c & 3;
1362 break;
1364 case4(0274):
1365 length++;
1366 break;
1368 case4(0300):
1369 break;
1371 case 0310:
1372 if (bits == 64)
1373 return -1;
1374 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
1375 break;
1377 case 0311:
1378 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
1379 break;
1381 case 0312:
1382 break;
1384 case 0313:
1385 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1386 has_prefix(ins, PPS_ASIZE, P_A32))
1387 return -1;
1388 break;
1390 case4(0314):
1391 break;
1393 case 0320:
1395 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1396 if (pfx == P_O16)
1397 break;
1398 if (pfx != P_none)
1399 nasm_warn(WARN_OTHER|ERR_PASS2, "invalid operand size prefix");
1400 else
1401 ins->prefixes[PPS_OSIZE] = P_O16;
1402 break;
1405 case 0321:
1407 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1408 if (pfx == P_O32)
1409 break;
1410 if (pfx != P_none)
1411 nasm_warn(WARN_OTHER|ERR_PASS2, "invalid operand size prefix");
1412 else
1413 ins->prefixes[PPS_OSIZE] = P_O32;
1414 break;
1417 case 0322:
1418 break;
1420 case 0323:
1421 rex_mask &= ~REX_W;
1422 break;
1424 case 0324:
1425 ins->rex |= REX_W;
1426 break;
1428 case 0325:
1429 ins->rex |= REX_NH;
1430 break;
1432 case 0326:
1433 break;
1435 case 0330:
1436 codes++, length++;
1437 break;
1439 case 0331:
1440 break;
1442 case 0332:
1443 case 0333:
1444 length++;
1445 break;
1447 case 0334:
1448 ins->rex |= REX_L;
1449 break;
1451 case 0335:
1452 break;
1454 case 0336:
1455 if (!ins->prefixes[PPS_REP])
1456 ins->prefixes[PPS_REP] = P_REP;
1457 break;
1459 case 0337:
1460 if (!ins->prefixes[PPS_REP])
1461 ins->prefixes[PPS_REP] = P_REPNE;
1462 break;
1464 case 0340:
1465 if (!absolute_op(&ins->oprs[0]))
1466 nasm_nonfatal("attempt to reserve non-constant"
1467 " quantity of BSS space");
1468 else if (ins->oprs[0].opflags & OPFLAG_FORWARD)
1469 nasm_warn(WARN_OTHER, "forward reference in RESx "
1470 "can have unpredictable results");
1471 else
1472 length += ins->oprs[0].offset * resb_bytes(ins->opcode);
1473 break;
1475 case 0341:
1476 if (!ins->prefixes[PPS_WAIT])
1477 ins->prefixes[PPS_WAIT] = P_WAIT;
1478 break;
1480 case 0360:
1481 break;
1483 case 0361:
1484 length++;
1485 break;
1487 case 0364:
1488 case 0365:
1489 break;
1491 case 0366:
1492 case 0367:
1493 length++;
1494 break;
1496 case 0370:
1497 case 0371:
1498 break;
1500 case 0373:
1501 length++;
1502 break;
1504 case 0374:
1505 eat = EA_XMMVSIB;
1506 break;
1508 case 0375:
1509 eat = EA_YMMVSIB;
1510 break;
1512 case 0376:
1513 eat = EA_ZMMVSIB;
1514 break;
1516 case4(0100):
1517 case4(0110):
1518 case4(0120):
1519 case4(0130):
1520 case4(0200):
1521 case4(0204):
1522 case4(0210):
1523 case4(0214):
1524 case4(0220):
1525 case4(0224):
1526 case4(0230):
1527 case4(0234):
1529 ea ea_data;
1530 int rfield;
1531 opflags_t rflags;
1532 struct operand *opy = &ins->oprs[op2];
1533 struct operand *op_er_sae;
1535 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
1537 if (c <= 0177) {
1538 /* pick rfield from operand b (opx) */
1539 rflags = regflag(opx);
1540 rfield = nasm_regvals[opx->basereg];
1541 } else {
1542 rflags = 0;
1543 rfield = c & 7;
1546 /* EVEX.b1 : evex_brerop contains the operand position */
1547 op_er_sae = (ins->evex_brerop >= 0 ?
1548 &ins->oprs[ins->evex_brerop] : NULL);
1550 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1551 /* set EVEX.b */
1552 ins->evex_p[2] |= EVEX_P2B;
1553 if (op_er_sae->decoflags & ER) {
1554 /* set EVEX.RC (rounding control) */
1555 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1556 & EVEX_P2RC;
1558 } else {
1559 /* set EVEX.L'L (vector length) */
1560 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
1561 ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W);
1562 if (opy->decoflags & BRDCAST_MASK) {
1563 /* set EVEX.b */
1564 ins->evex_p[2] |= EVEX_P2B;
1568 if (itemp_has(temp, IF_MIB)) {
1569 opy->eaflags |= EAF_MIB;
1571 * if a separate form of MIB (ICC style) is used,
1572 * the index reg info is merged into mem operand
1574 if (mib_index != R_none) {
1575 opy->indexreg = mib_index;
1576 opy->scale = 1;
1577 opy->hintbase = mib_index;
1578 opy->hinttype = EAH_NOTBASE;
1582 if (process_ea(opy, &ea_data, bits,
1583 rfield, rflags, ins, &errmsg) != eat) {
1584 nasm_nonfatal("%s", errmsg);
1585 return -1;
1586 } else {
1587 ins->rex |= ea_data.rex;
1588 length += ea_data.size;
1591 break;
1593 default:
1594 nasm_panic("internal instruction table corrupt"
1595 ": instruction code \\%o (0x%02X) given", c, c);
1596 break;
1600 ins->rex &= rex_mask;
1602 if (ins->rex & REX_NH) {
1603 if (ins->rex & REX_H) {
1604 nasm_nonfatal("instruction cannot use high registers");
1605 return -1;
1607 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
1610 switch (ins->prefixes[PPS_VEX]) {
1611 case P_EVEX:
1612 if (!(ins->rex & REX_EV))
1613 return -1;
1614 break;
1615 case P_VEX3:
1616 case P_VEX2:
1617 if (!(ins->rex & REX_V))
1618 return -1;
1619 break;
1620 default:
1621 break;
1624 if (ins->rex & (REX_V | REX_EV)) {
1625 int bad32 = REX_R|REX_W|REX_X|REX_B;
1627 if (ins->rex & REX_H) {
1628 nasm_nonfatal("cannot use high register in AVX instruction");
1629 return -1;
1631 switch (ins->vex_wlp & 060) {
1632 case 000:
1633 case 040:
1634 ins->rex &= ~REX_W;
1635 break;
1636 case 020:
1637 ins->rex |= REX_W;
1638 bad32 &= ~REX_W;
1639 break;
1640 case 060:
1641 /* Follow REX_W */
1642 break;
1645 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
1646 nasm_nonfatal("invalid operands in non-64-bit mode");
1647 return -1;
1648 } else if (!(ins->rex & REX_EV) &&
1649 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
1650 nasm_nonfatal("invalid high-16 register in non-AVX-512");
1651 return -1;
1653 if (ins->rex & REX_EV)
1654 length += 4;
1655 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1656 ins->prefixes[PPS_VEX] == P_VEX3)
1657 length += 3;
1658 else
1659 length += 2;
1660 } else if (ins->rex & REX_MASK) {
1661 if (ins->rex & REX_H) {
1662 nasm_nonfatal("cannot use high register in rex instruction");
1663 return -1;
1664 } else if (bits == 64) {
1665 length++;
1666 } else if ((ins->rex & REX_L) &&
1667 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
1668 iflag_cpu_level_ok(&cpu, IF_X86_64)) {
1669 /* LOCK-as-REX.R */
1670 assert_no_prefix(ins, PPS_LOCK);
1671 lockcheck = false; /* Already errored, no need for warning */
1672 length++;
1673 } else {
1674 nasm_nonfatal("invalid operands in non-64-bit mode");
1675 return -1;
1679 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
1680 (!itemp_has(temp,IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
1682 *!lock [on] LOCK prefix on unlockable instructions
1683 *! warns about \c{LOCK} prefixes on unlockable instructions.
1685 nasm_warn(WARN_LOCK | ERR_PASS2 , "instruction is not lockable");
1688 bad_hle_warn(ins, hleok);
1691 * when BND prefix is set by DEFAULT directive,
1692 * BND prefix is added to every appropriate instruction line
1693 * unless it is overridden by NOBND prefix.
1695 if (globalbnd &&
1696 (itemp_has(temp, IF_BND) && !has_prefix(ins, PPS_REP, P_NOBND)))
1697 ins->prefixes[PPS_REP] = P_BND;
1700 * Add length of legacy prefixes
1702 length += emit_prefix(NULL, bits, ins);
1704 return length;
1707 static inline void emit_rex(struct out_data *data, insn *ins)
1709 if (data->bits == 64) {
1710 if ((ins->rex & REX_MASK) &&
1711 !(ins->rex & (REX_V | REX_EV)) &&
1712 !ins->rex_done) {
1713 uint8_t rex = (ins->rex & REX_MASK) | REX_P;
1714 out_rawbyte(data, rex);
1715 ins->rex_done = true;
1720 static int emit_prefix(struct out_data *data, const int bits, insn *ins)
1722 int bytes = 0;
1723 int j;
1725 for (j = 0; j < MAXPREFIX; j++) {
1726 uint8_t c = 0;
1727 switch (ins->prefixes[j]) {
1728 case P_WAIT:
1729 c = 0x9B;
1730 break;
1731 case P_LOCK:
1732 c = 0xF0;
1733 break;
1734 case P_REPNE:
1735 case P_REPNZ:
1736 case P_XACQUIRE:
1737 case P_BND:
1738 c = 0xF2;
1739 break;
1740 case P_REPE:
1741 case P_REPZ:
1742 case P_REP:
1743 case P_XRELEASE:
1744 c = 0xF3;
1745 break;
1746 case R_CS:
1747 if (bits == 64)
1748 nasm_warn(WARN_OTHER|ERR_PASS2, "cs segment base generated, "
1749 "but will be ignored in 64-bit mode");
1750 c = 0x2E;
1751 break;
1752 case R_DS:
1753 if (bits == 64)
1754 nasm_warn(WARN_OTHER|ERR_PASS2, "ds segment base generated, "
1755 "but will be ignored in 64-bit mode");
1756 c = 0x3E;
1757 break;
1758 case R_ES:
1759 if (bits == 64)
1760 nasm_warn(WARN_OTHER|ERR_PASS2, "es segment base generated, "
1761 "but will be ignored in 64-bit mode");
1762 c = 0x26;
1763 break;
1764 case R_FS:
1765 c = 0x64;
1766 break;
1767 case R_GS:
1768 c = 0x65;
1769 break;
1770 case R_SS:
1771 if (bits == 64) {
1772 nasm_warn(WARN_OTHER|ERR_PASS2, "ss segment base generated, "
1773 "but will be ignored in 64-bit mode");
1775 c = 0x36;
1776 break;
1777 case R_SEGR6:
1778 case R_SEGR7:
1779 nasm_nonfatal("segr6 and segr7 cannot be used as prefixes");
1780 break;
1781 case P_A16:
1782 if (bits == 64) {
1783 nasm_nonfatal("16-bit addressing is not supported "
1784 "in 64-bit mode");
1785 } else if (bits != 16)
1786 c = 0x67;
1787 break;
1788 case P_A32:
1789 if (bits != 32)
1790 c = 0x67;
1791 break;
1792 case P_A64:
1793 if (bits != 64) {
1794 nasm_nonfatal("64-bit addressing is only supported "
1795 "in 64-bit mode");
1797 break;
1798 case P_ASP:
1799 c = 0x67;
1800 break;
1801 case P_O16:
1802 if (bits != 16)
1803 c = 0x66;
1804 break;
1805 case P_O32:
1806 if (bits == 16)
1807 c = 0x66;
1808 break;
1809 case P_O64:
1810 /* REX.W */
1811 break;
1812 case P_OSP:
1813 c = 0x66;
1814 break;
1815 case P_EVEX:
1816 case P_VEX3:
1817 case P_VEX2:
1818 case P_NOBND:
1819 case P_none:
1820 break;
1821 default:
1822 nasm_panic("invalid instruction prefix");
1824 if (c) {
1825 if (data)
1826 out_rawbyte(data, c);
1827 bytes++;
1830 return bytes;
1833 static void gencode(struct out_data *data, insn *ins)
1835 uint8_t c;
1836 uint8_t bytes[4];
1837 int64_t size;
1838 int op1, op2;
1839 struct operand *opx;
1840 const uint8_t *codes = data->itemp->code;
1841 uint8_t opex = 0;
1842 enum ea_type eat = EA_SCALAR;
1843 int r;
1844 const int bits = data->bits;
1845 const char *errmsg;
1847 ins->rex_done = false;
1849 emit_prefix(data, bits, ins);
1851 while (*codes) {
1852 c = *codes++;
1853 op1 = (c & 3) + ((opex & 1) << 2);
1854 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1855 opx = &ins->oprs[op1];
1856 opex = 0; /* For the next iteration */
1859 switch (c) {
1860 case 01:
1861 case 02:
1862 case 03:
1863 case 04:
1864 emit_rex(data, ins);
1865 out_rawdata(data, codes, c);
1866 codes += c;
1867 break;
1869 case 05:
1870 case 06:
1871 case 07:
1872 opex = c;
1873 break;
1875 case4(010):
1876 emit_rex(data, ins);
1877 out_rawbyte(data, *codes++ + (regval(opx) & 7));
1878 break;
1880 case4(014):
1881 break;
1883 case4(020):
1884 out_imm(data, opx, 1, OUT_WRAP);
1885 break;
1887 case4(024):
1888 out_imm(data, opx, 1, OUT_UNSIGNED);
1889 break;
1891 case4(030):
1892 out_imm(data, opx, 2, OUT_WRAP);
1893 break;
1895 case4(034):
1896 if (opx->type & (BITS16 | BITS32))
1897 size = (opx->type & BITS16) ? 2 : 4;
1898 else
1899 size = (bits == 16) ? 2 : 4;
1900 out_imm(data, opx, size, OUT_WRAP);
1901 break;
1903 case4(040):
1904 out_imm(data, opx, 4, OUT_WRAP);
1905 break;
1907 case4(044):
1908 size = ins->addr_size >> 3;
1909 out_imm(data, opx, size, OUT_WRAP);
1910 break;
1912 case4(050):
1913 if (opx->segment == data->segment) {
1914 int64_t delta = opx->offset - data->offset
1915 - (data->inslen - data->insoffs);
1916 if (delta > 127 || delta < -128)
1917 nasm_nonfatal("short jump is out of range");
1919 out_reladdr(data, opx, 1);
1920 break;
1922 case4(054):
1923 out_imm(data, opx, 8, OUT_WRAP);
1924 break;
1926 case4(060):
1927 out_reladdr(data, opx, 2);
1928 break;
1930 case4(064):
1931 if (opx->type & (BITS16 | BITS32 | BITS64))
1932 size = (opx->type & BITS16) ? 2 : 4;
1933 else
1934 size = (bits == 16) ? 2 : 4;
1936 out_reladdr(data, opx, size);
1937 break;
1939 case4(070):
1940 out_reladdr(data, opx, 4);
1941 break;
1943 case4(074):
1944 if (opx->segment == NO_SEG)
1945 nasm_nonfatal("value referenced by FAR is not relocatable");
1946 out_segment(data, opx);
1947 break;
1949 case 0172:
1951 int mask = ins->prefixes[PPS_VEX] == P_EVEX ? 7 : 15;
1952 const struct operand *opy;
1954 c = *codes++;
1955 opx = &ins->oprs[c >> 3];
1956 opy = &ins->oprs[c & 7];
1957 if (!absolute_op(opy))
1958 nasm_nonfatal("non-absolute expression not permitted "
1959 "as argument %d", c & 7);
1960 else if (opy->offset & ~mask)
1961 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW,
1962 "is4 argument exceeds bounds");
1963 c = opy->offset & mask;
1964 goto emit_is4;
1967 case 0173:
1968 c = *codes++;
1969 opx = &ins->oprs[c >> 4];
1970 c &= 15;
1971 goto emit_is4;
1973 case4(0174):
1974 c = 0;
1975 emit_is4:
1976 r = nasm_regvals[opx->basereg];
1977 out_rawbyte(data, (r << 4) | ((r & 0x10) >> 1) | c);
1978 break;
1980 case4(0254):
1981 if (absolute_op(opx) &&
1982 (int32_t)opx->offset != (int64_t)opx->offset) {
1983 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW,
1984 "signed dword immediate exceeds bounds");
1986 out_imm(data, opx, 4, OUT_SIGNED);
1987 break;
1989 case4(0240):
1990 case 0250:
1991 codes += 3;
1992 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1993 EVEX_P2Z | EVEX_P2AAA, 2);
1994 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
1995 bytes[0] = 0x62;
1996 /* EVEX.X can be set by either REX or EVEX for different reasons */
1997 bytes[1] = ((((ins->rex & 7) << 5) |
1998 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
1999 (ins->vex_cm & EVEX_P0MM);
2000 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
2001 ((~ins->vexreg & 15) << 3) |
2002 (1 << 2) | (ins->vex_wlp & 3);
2003 bytes[3] = ins->evex_p[2];
2004 out_rawdata(data, bytes, 4);
2005 break;
2007 case4(0260):
2008 case 0270:
2009 codes += 2;
2010 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
2011 ins->prefixes[PPS_VEX] == P_VEX3) {
2012 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
2013 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
2014 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
2015 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
2016 out_rawdata(data, bytes, 3);
2017 } else {
2018 bytes[0] = 0xc5;
2019 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
2020 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
2021 out_rawdata(data, bytes, 2);
2023 break;
2025 case 0271:
2026 case 0272:
2027 case 0273:
2028 break;
2030 case4(0274):
2032 uint64_t uv, um;
2033 int s;
2035 if (absolute_op(opx)) {
2036 if (ins->rex & REX_W)
2037 s = 64;
2038 else if (ins->prefixes[PPS_OSIZE] == P_O16)
2039 s = 16;
2040 else if (ins->prefixes[PPS_OSIZE] == P_O32)
2041 s = 32;
2042 else
2043 s = bits;
2045 um = (uint64_t)2 << (s-1);
2046 uv = opx->offset;
2048 if (uv > 127 && uv < (uint64_t)-128 &&
2049 (uv < um-128 || uv > um-1)) {
2050 /* If this wasn't explicitly byte-sized, warn as though we
2051 * had fallen through to the imm16/32/64 case.
2053 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW,
2054 "%s value exceeds bounds",
2055 (opx->type & BITS8) ? "signed byte" :
2056 s == 16 ? "word" :
2057 s == 32 ? "dword" :
2058 "signed dword");
2061 /* Output as a raw byte to avoid byte overflow check */
2062 out_rawbyte(data, (uint8_t)uv);
2063 } else {
2064 out_imm(data, opx, 1, OUT_WRAP); /* XXX: OUT_SIGNED? */
2066 break;
2069 case4(0300):
2070 break;
2072 case 0310:
2073 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16))
2074 out_rawbyte(data, 0x67);
2075 break;
2077 case 0311:
2078 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32))
2079 out_rawbyte(data, 0x67);
2080 break;
2082 case 0312:
2083 break;
2085 case 0313:
2086 break;
2088 case4(0314):
2089 break;
2091 case 0320:
2092 case 0321:
2093 break;
2095 case 0322:
2096 case 0323:
2097 break;
2099 case 0324:
2100 ins->rex |= REX_W;
2101 break;
2103 case 0325:
2104 break;
2106 case 0326:
2107 break;
2109 case 0330:
2110 out_rawbyte(data, *codes++ ^ get_cond_opcode(ins->condition));
2111 break;
2113 case 0331:
2114 break;
2116 case 0332:
2117 case 0333:
2118 out_rawbyte(data, c - 0332 + 0xF2);
2119 break;
2121 case 0334:
2122 if (ins->rex & REX_R)
2123 out_rawbyte(data, 0xF0);
2124 ins->rex &= ~(REX_L|REX_R);
2125 break;
2127 case 0335:
2128 break;
2130 case 0336:
2131 case 0337:
2132 break;
2134 case 0340:
2135 if (ins->oprs[0].segment != NO_SEG)
2136 nasm_panic("non-constant BSS size in pass two");
2138 out_reserve(data, ins->oprs[0].offset * resb_bytes(ins->opcode));
2139 break;
2141 case 0341:
2142 break;
2144 case 0360:
2145 break;
2147 case 0361:
2148 out_rawbyte(data, 0x66);
2149 break;
2151 case 0364:
2152 case 0365:
2153 break;
2155 case 0366:
2156 case 0367:
2157 out_rawbyte(data, c - 0366 + 0x66);
2158 break;
2160 case3(0370):
2161 break;
2163 case 0373:
2164 out_rawbyte(data, bits == 16 ? 3 : 5);
2165 break;
2167 case 0374:
2168 eat = EA_XMMVSIB;
2169 break;
2171 case 0375:
2172 eat = EA_YMMVSIB;
2173 break;
2175 case 0376:
2176 eat = EA_ZMMVSIB;
2177 break;
2179 case4(0100):
2180 case4(0110):
2181 case4(0120):
2182 case4(0130):
2183 case4(0200):
2184 case4(0204):
2185 case4(0210):
2186 case4(0214):
2187 case4(0220):
2188 case4(0224):
2189 case4(0230):
2190 case4(0234):
2192 ea ea_data;
2193 int rfield;
2194 opflags_t rflags;
2195 uint8_t *p;
2196 struct operand *opy = &ins->oprs[op2];
2198 if (c <= 0177) {
2199 /* pick rfield from operand b (opx) */
2200 rflags = regflag(opx);
2201 rfield = nasm_regvals[opx->basereg];
2202 } else {
2203 /* rfield is constant */
2204 rflags = 0;
2205 rfield = c & 7;
2208 if (process_ea(opy, &ea_data, bits,
2209 rfield, rflags, ins, &errmsg) != eat)
2210 nasm_nonfatal("%s", errmsg);
2212 p = bytes;
2213 *p++ = ea_data.modrm;
2214 if (ea_data.sib_present)
2215 *p++ = ea_data.sib;
2216 out_rawdata(data, bytes, p - bytes);
2219 * Make sure the address gets the right offset in case
2220 * the line breaks in the .lst file (BR 1197827)
2223 if (ea_data.bytes) {
2224 /* use compressed displacement, if available */
2225 if (ea_data.disp8) {
2226 out_rawbyte(data, ea_data.disp8);
2227 } else if (ea_data.rip) {
2228 out_reladdr(data, opy, ea_data.bytes);
2229 } else {
2230 int asize = ins->addr_size >> 3;
2232 if (overflow_general(opy->offset, asize) ||
2233 signed_bits(opy->offset, ins->addr_size) !=
2234 signed_bits(opy->offset, ea_data.bytes << 3))
2235 warn_overflow(ea_data.bytes);
2237 out_imm(data, opy, ea_data.bytes,
2238 (asize > ea_data.bytes)
2239 ? OUT_SIGNED : OUT_WRAP);
2243 break;
2245 default:
2246 nasm_panic("internal instruction table corrupt"
2247 ": instruction code \\%o (0x%02X) given", c, c);
2248 break;
2253 static opflags_t regflag(const operand * o)
2255 if (!is_register(o->basereg))
2256 nasm_panic("invalid operand passed to regflag()");
2257 return nasm_reg_flags[o->basereg];
2260 static int32_t regval(const operand * o)
2262 if (!is_register(o->basereg))
2263 nasm_panic("invalid operand passed to regval()");
2264 return nasm_regvals[o->basereg];
2267 static int op_rexflags(const operand * o, int mask)
2269 opflags_t flags;
2270 int val;
2272 if (!is_register(o->basereg))
2273 nasm_panic("invalid operand passed to op_rexflags()");
2275 flags = nasm_reg_flags[o->basereg];
2276 val = nasm_regvals[o->basereg];
2278 return rexflags(val, flags, mask);
2281 static int rexflags(int val, opflags_t flags, int mask)
2283 int rex = 0;
2285 if (val >= 0 && (val & 8))
2286 rex |= REX_B|REX_X|REX_R;
2287 if (flags & BITS64)
2288 rex |= REX_W;
2289 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
2290 rex |= REX_H;
2291 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
2292 rex |= REX_P;
2294 return rex & mask;
2297 static int evexflags(int val, decoflags_t deco,
2298 int mask, uint8_t byte)
2300 int evex = 0;
2302 switch (byte) {
2303 case 0:
2304 if (val >= 0 && (val & 16))
2305 evex |= (EVEX_P0RP | EVEX_P0X);
2306 break;
2307 case 2:
2308 if (val >= 0 && (val & 16))
2309 evex |= EVEX_P2VP;
2310 if (deco & Z)
2311 evex |= EVEX_P2Z;
2312 if (deco & OPMASK_MASK)
2313 evex |= deco & EVEX_P2AAA;
2314 break;
2316 return evex & mask;
2319 static int op_evexflags(const operand * o, int mask, uint8_t byte)
2321 int val;
2323 val = nasm_regvals[o->basereg];
2325 return evexflags(val, o->decoflags, mask, byte);
2328 static enum match_result find_match(const struct itemplate **tempp,
2329 insn *instruction,
2330 int32_t segment, int64_t offset, int bits)
2332 const struct itemplate *temp;
2333 enum match_result m, merr;
2334 opflags_t xsizeflags[MAX_OPERANDS];
2335 bool opsizemissing = false;
2336 int8_t broadcast = instruction->evex_brerop;
2337 int i;
2339 /* broadcasting uses a different data element size */
2340 for (i = 0; i < instruction->operands; i++)
2341 if (i == broadcast)
2342 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
2343 else
2344 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
2346 merr = MERR_INVALOP;
2348 for (temp = nasm_instructions[instruction->opcode];
2349 temp->opcode != I_none; temp++) {
2350 m = matches(temp, instruction, bits);
2351 if (m == MOK_JUMP) {
2352 if (jmp_match(segment, offset, bits, instruction, temp))
2353 m = MOK_GOOD;
2354 else
2355 m = MERR_INVALOP;
2356 } else if (m == MERR_OPSIZEMISSING && !itemp_has(temp, IF_SX)) {
2358 * Missing operand size and a candidate for fuzzy matching...
2360 for (i = 0; i < temp->operands; i++)
2361 if (i == broadcast)
2362 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
2363 else
2364 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
2365 opsizemissing = true;
2367 if (m > merr)
2368 merr = m;
2369 if (merr == MOK_GOOD)
2370 goto done;
2373 /* No match, but see if we can get a fuzzy operand size match... */
2374 if (!opsizemissing)
2375 goto done;
2377 for (i = 0; i < instruction->operands; i++) {
2379 * We ignore extrinsic operand sizes on registers, so we should
2380 * never try to fuzzy-match on them. This also resolves the case
2381 * when we have e.g. "xmmrm128" in two different positions.
2383 if (is_class(REGISTER, instruction->oprs[i].type))
2384 continue;
2386 /* This tests if xsizeflags[i] has more than one bit set */
2387 if ((xsizeflags[i] & (xsizeflags[i]-1)))
2388 goto done; /* No luck */
2390 if (i == broadcast) {
2391 instruction->oprs[i].decoflags |= xsizeflags[i];
2392 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
2393 BITS32 : BITS64);
2394 } else {
2395 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
2399 /* Try matching again... */
2400 for (temp = nasm_instructions[instruction->opcode];
2401 temp->opcode != I_none; temp++) {
2402 m = matches(temp, instruction, bits);
2403 if (m == MOK_JUMP) {
2404 if (jmp_match(segment, offset, bits, instruction, temp))
2405 m = MOK_GOOD;
2406 else
2407 m = MERR_INVALOP;
2409 if (m > merr)
2410 merr = m;
2411 if (merr == MOK_GOOD)
2412 goto done;
2415 done:
2416 *tempp = temp;
2417 return merr;
2420 static uint8_t get_broadcast_num(opflags_t opflags, opflags_t brsize)
2422 unsigned int opsize = (opflags & SIZE_MASK) >> SIZE_SHIFT;
2423 uint8_t brcast_num;
2425 if (brsize > BITS64)
2426 nasm_fatal("size of broadcasting element is greater than 64 bits");
2429 * The shift term is to take care of the extra BITS80 inserted
2430 * between BITS64 and BITS128.
2432 brcast_num = ((opsize / (BITS64 >> SIZE_SHIFT)) * (BITS64 / brsize))
2433 >> (opsize > (BITS64 >> SIZE_SHIFT));
2435 return brcast_num;
2438 static enum match_result matches(const struct itemplate *itemp,
2439 insn *instruction, int bits)
2441 opflags_t size[MAX_OPERANDS], asize;
2442 bool opsizemissing = false;
2443 int i, oprs;
2446 * Check the opcode
2448 if (itemp->opcode != instruction->opcode)
2449 return MERR_INVALOP;
2452 * Count the operands
2454 if (itemp->operands != instruction->operands)
2455 return MERR_INVALOP;
2458 * Is it legal?
2460 if (!(optimizing.level > 0) && itemp_has(itemp, IF_OPT))
2461 return MERR_INVALOP;
2464 * {evex} available?
2466 switch (instruction->prefixes[PPS_VEX]) {
2467 case P_EVEX:
2468 if (!itemp_has(itemp, IF_EVEX))
2469 return MERR_ENCMISMATCH;
2470 break;
2471 case P_VEX3:
2472 case P_VEX2:
2473 if (!itemp_has(itemp, IF_VEX))
2474 return MERR_ENCMISMATCH;
2475 break;
2476 default:
2477 break;
2481 * Check that no spurious colons or TOs are present
2483 for (i = 0; i < itemp->operands; i++)
2484 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
2485 return MERR_INVALOP;
2488 * Process size flags
2490 switch (itemp_smask(itemp)) {
2491 case IF_GENBIT(IF_SB):
2492 asize = BITS8;
2493 break;
2494 case IF_GENBIT(IF_SW):
2495 asize = BITS16;
2496 break;
2497 case IF_GENBIT(IF_SD):
2498 asize = BITS32;
2499 break;
2500 case IF_GENBIT(IF_SQ):
2501 asize = BITS64;
2502 break;
2503 case IF_GENBIT(IF_SO):
2504 asize = BITS128;
2505 break;
2506 case IF_GENBIT(IF_SY):
2507 asize = BITS256;
2508 break;
2509 case IF_GENBIT(IF_SZ):
2510 asize = BITS512;
2511 break;
2512 case IF_GENBIT(IF_ANYSIZE):
2513 asize = SIZE_MASK;
2514 break;
2515 case IF_GENBIT(IF_SIZE):
2516 switch (bits) {
2517 case 16:
2518 asize = BITS16;
2519 break;
2520 case 32:
2521 asize = BITS32;
2522 break;
2523 case 64:
2524 asize = BITS64;
2525 break;
2526 default:
2527 asize = 0;
2528 break;
2530 break;
2531 default:
2532 asize = 0;
2533 break;
2536 if (itemp_armask(itemp)) {
2537 /* S- flags only apply to a specific operand */
2538 i = itemp_arg(itemp);
2539 memset(size, 0, sizeof size);
2540 size[i] = asize;
2541 } else {
2542 /* S- flags apply to all operands */
2543 for (i = 0; i < MAX_OPERANDS; i++)
2544 size[i] = asize;
2548 * Check that the operand flags all match up,
2549 * it's a bit tricky so lets be verbose:
2551 * 1) Find out the size of operand. If instruction
2552 * doesn't have one specified -- we're trying to
2553 * guess it either from template (IF_S* flag) or
2554 * from code bits.
2556 * 2) If template operand do not match the instruction OR
2557 * template has an operand size specified AND this size differ
2558 * from which instruction has (perhaps we got it from code bits)
2559 * we are:
2560 * a) Check that only size of instruction and operand is differ
2561 * other characteristics do match
2562 * b) Perhaps it's a register specified in instruction so
2563 * for such a case we just mark that operand as "size
2564 * missing" and this will turn on fuzzy operand size
2565 * logic facility (handled by a caller)
2567 for (i = 0; i < itemp->operands; i++) {
2568 opflags_t type = instruction->oprs[i].type;
2569 decoflags_t deco = instruction->oprs[i].decoflags;
2570 decoflags_t ideco = itemp->deco[i];
2571 bool is_broadcast = deco & BRDCAST_MASK;
2572 uint8_t brcast_num = 0;
2573 opflags_t template_opsize, insn_opsize;
2575 if (!(type & SIZE_MASK))
2576 type |= size[i];
2578 insn_opsize = type & SIZE_MASK;
2579 if (!is_broadcast) {
2580 template_opsize = itemp->opd[i] & SIZE_MASK;
2581 } else {
2582 decoflags_t deco_brsize = ideco & BRSIZE_MASK;
2584 if (~ideco & BRDCAST_MASK)
2585 return MERR_BRNOTHERE;
2588 * when broadcasting, the element size depends on
2589 * the instruction type. decorator flag should match.
2591 if (deco_brsize) {
2592 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
2593 /* calculate the proper number : {1to<brcast_num>} */
2594 brcast_num = get_broadcast_num(itemp->opd[i], template_opsize);
2595 } else {
2596 template_opsize = 0;
2600 if (~ideco & deco & OPMASK_MASK)
2601 return MERR_MASKNOTHERE;
2603 if (~ideco & deco & (Z_MASK|STATICRND_MASK|SAE_MASK))
2604 return MERR_DECONOTHERE;
2606 if (itemp->opd[i] & ~type & ~(SIZE_MASK|REGSET_MASK))
2607 return MERR_INVALOP;
2609 if (~itemp->opd[i] & type & REGSET_MASK)
2610 return (itemp->opd[i] & REGSET_MASK)
2611 ? MERR_REGSETSIZE : MERR_REGSET;
2613 if (template_opsize) {
2614 if (template_opsize != insn_opsize) {
2615 if (insn_opsize) {
2616 return MERR_INVALOP;
2617 } else if (!is_class(REGISTER, type)) {
2619 * Note: we don't honor extrinsic operand sizes for registers,
2620 * so "missing operand size" for a register should be
2621 * considered a wildcard match rather than an error.
2623 opsizemissing = true;
2625 } else if (is_broadcast &&
2626 (brcast_num !=
2627 (2U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
2629 * broadcasting opsize matches but the number of repeated memory
2630 * element does not match.
2631 * if 64b double precision float is broadcasted to ymm (256b),
2632 * broadcasting decorator must be {1to4}.
2634 return MERR_BRNUMMISMATCH;
2639 if (opsizemissing)
2640 return MERR_OPSIZEMISSING;
2643 * Check operand sizes
2645 if (itemp_has(itemp, IF_SM) || itemp_has(itemp, IF_SM2)) {
2646 oprs = (itemp_has(itemp, IF_SM2) ? 2 : itemp->operands);
2647 for (i = 0; i < oprs; i++) {
2648 asize = itemp->opd[i] & SIZE_MASK;
2649 if (asize) {
2650 for (i = 0; i < oprs; i++)
2651 size[i] = asize;
2652 break;
2655 } else {
2656 oprs = itemp->operands;
2659 for (i = 0; i < itemp->operands; i++) {
2660 if (!(itemp->opd[i] & SIZE_MASK) &&
2661 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
2662 return MERR_OPSIZEMISMATCH;
2666 * Check template is okay at the set cpu level
2668 if (iflag_cmp_cpu_level(&insns_flags[itemp->iflag_idx], &cpu) > 0)
2669 return MERR_BADCPU;
2672 * Verify the appropriate long mode flag.
2674 if (itemp_has(itemp, (bits == 64 ? IF_NOLONG : IF_LONG)))
2675 return MERR_BADMODE;
2678 * If we have a HLE prefix, look for the NOHLE flag
2680 if (itemp_has(itemp, IF_NOHLE) &&
2681 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2682 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2683 return MERR_BADHLE;
2686 * Check if special handling needed for Jumps
2688 if ((itemp->code[0] & ~1) == 0370)
2689 return MOK_JUMP;
2692 * Check if BND prefix is allowed.
2693 * Other 0xF2 (REPNE/REPNZ) prefix is prohibited.
2695 if (!itemp_has(itemp, IF_BND) &&
2696 (has_prefix(instruction, PPS_REP, P_BND) ||
2697 has_prefix(instruction, PPS_REP, P_NOBND)))
2698 return MERR_BADBND;
2699 else if (itemp_has(itemp, IF_BND) &&
2700 (has_prefix(instruction, PPS_REP, P_REPNE) ||
2701 has_prefix(instruction, PPS_REP, P_REPNZ)))
2702 return MERR_BADREPNE;
2704 return MOK_GOOD;
2708 * Check if ModR/M.mod should/can be 01.
2709 * - EAF_BYTEOFFS is set
2710 * - offset can fit in a byte when EVEX is not used
2711 * - offset can be compressed when EVEX is used
2713 #define IS_MOD_01() (!(input->eaflags & EAF_WORDOFFS) && \
2714 (ins->rex & REX_EV ? seg == NO_SEG && !forw_ref && \
2715 is_disp8n(input, ins, &output->disp8) : \
2716 input->eaflags & EAF_BYTEOFFS || (o >= -128 && \
2717 o <= 127 && seg == NO_SEG && !forw_ref)))
2719 static enum ea_type process_ea(operand *input, ea *output, int bits,
2720 int rfield, opflags_t rflags, insn *ins,
2721 const char **errmsg)
2723 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
2724 int addrbits = ins->addr_size;
2725 int eaflags = input->eaflags;
2727 *errmsg = "invalid effective address"; /* Default error message */
2729 output->type = EA_SCALAR;
2730 output->rip = false;
2731 output->disp8 = 0;
2733 /* REX flags for the rfield operand */
2734 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
2735 /* EVEX.R' flag for the REG operand */
2736 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
2738 if (is_class(REGISTER, input->type)) {
2740 * It's a direct register.
2742 if (!is_register(input->basereg))
2743 goto err;
2745 if (!is_reg_class(REG_EA, input->basereg))
2746 goto err;
2748 /* broadcasting is not available with a direct register operand. */
2749 if (input->decoflags & BRDCAST_MASK) {
2750 *errmsg = "broadcast not allowed with register operand";
2751 goto err;
2754 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
2755 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
2756 output->sib_present = false; /* no SIB necessary */
2757 output->bytes = 0; /* no offset necessary either */
2758 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2759 } else {
2761 * It's a memory reference.
2764 /* Embedded rounding or SAE is not available with a mem ref operand. */
2765 if (input->decoflags & (ER | SAE)) {
2766 *errmsg = "embedded rounding is available only with "
2767 "register-register operations";
2768 goto err;
2771 if (input->basereg == -1 &&
2772 (input->indexreg == -1 || input->scale == 0)) {
2774 * It's a pure offset.
2776 if (bits == 64 && ((input->type & IP_REL) == IP_REL)) {
2777 if (input->segment == NO_SEG ||
2778 (input->opflags & OPFLAG_RELATIVE)) {
2779 nasm_warn(WARN_OTHER|ERR_PASS2, "absolute address can not be RIP-relative");
2780 input->type &= ~IP_REL;
2781 input->type |= MEMORY;
2785 if (bits == 64 &&
2786 !(IP_REL & ~input->type) && (eaflags & EAF_MIB)) {
2787 *errmsg = "RIP-relative addressing is prohibited for MIB";
2788 goto err;
2791 if (eaflags & EAF_BYTEOFFS ||
2792 (eaflags & EAF_WORDOFFS &&
2793 input->disp_size != (addrbits != 16 ? 32 : 16)))
2794 nasm_warn(WARN_OTHER, "displacement size ignored on absolute address");
2796 if (bits == 64 && (~input->type & IP_REL)) {
2797 output->sib_present = true;
2798 output->sib = GEN_SIB(0, 4, 5);
2799 output->bytes = 4;
2800 output->modrm = GEN_MODRM(0, rfield, 4);
2801 output->rip = false;
2802 } else {
2803 output->sib_present = false;
2804 output->bytes = (addrbits != 16 ? 4 : 2);
2805 output->modrm = GEN_MODRM(0, rfield,
2806 (addrbits != 16 ? 5 : 6));
2807 output->rip = bits == 64;
2809 } else {
2811 * It's an indirection.
2813 int i = input->indexreg, b = input->basereg, s = input->scale;
2814 int32_t seg = input->segment;
2815 int hb = input->hintbase, ht = input->hinttype;
2816 int t, it, bt; /* register numbers */
2817 opflags_t x, ix, bx; /* register flags */
2819 if (s == 0)
2820 i = -1; /* make this easy, at least */
2822 if (is_register(i)) {
2823 it = nasm_regvals[i];
2824 ix = nasm_reg_flags[i];
2825 } else {
2826 it = -1;
2827 ix = 0;
2830 if (is_register(b)) {
2831 bt = nasm_regvals[b];
2832 bx = nasm_reg_flags[b];
2833 } else {
2834 bt = -1;
2835 bx = 0;
2838 /* if either one are a vector register... */
2839 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
2840 opflags_t sok = BITS32 | BITS64;
2841 int32_t o = input->offset;
2842 int mod, scale, index, base;
2845 * For a vector SIB, one has to be a vector and the other,
2846 * if present, a GPR. The vector must be the index operand.
2848 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
2849 if (s == 0)
2850 s = 1;
2851 else if (s != 1)
2852 goto err;
2854 t = bt, bt = it, it = t;
2855 x = bx, bx = ix, ix = x;
2858 if (bt != -1) {
2859 if (REG_GPR & ~bx)
2860 goto err;
2861 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2862 sok &= bx;
2863 else
2864 goto err;
2868 * While we're here, ensure the user didn't specify
2869 * WORD or QWORD
2871 if (input->disp_size == 16 || input->disp_size == 64)
2872 goto err;
2874 if (addrbits == 16 ||
2875 (addrbits == 32 && !(sok & BITS32)) ||
2876 (addrbits == 64 && !(sok & BITS64)))
2877 goto err;
2879 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2880 : ((ix & YMMREG & ~REG_EA)
2881 ? EA_YMMVSIB : EA_XMMVSIB));
2883 output->rex |= rexflags(it, ix, REX_X);
2884 output->rex |= rexflags(bt, bx, REX_B);
2885 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
2887 index = it & 7; /* it is known to be != -1 */
2889 switch (s) {
2890 case 1:
2891 scale = 0;
2892 break;
2893 case 2:
2894 scale = 1;
2895 break;
2896 case 4:
2897 scale = 2;
2898 break;
2899 case 8:
2900 scale = 3;
2901 break;
2902 default: /* then what the smeg is it? */
2903 goto err; /* panic */
2906 if (bt == -1) {
2907 base = 5;
2908 mod = 0;
2909 } else {
2910 base = (bt & 7);
2911 if (base != REG_NUM_EBP && o == 0 &&
2912 seg == NO_SEG && !forw_ref &&
2913 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2914 mod = 0;
2915 else if (IS_MOD_01())
2916 mod = 1;
2917 else
2918 mod = 2;
2921 output->sib_present = true;
2922 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2923 output->modrm = GEN_MODRM(mod, rfield, 4);
2924 output->sib = GEN_SIB(scale, index, base);
2925 } else if ((ix|bx) & (BITS32|BITS64)) {
2927 * it must be a 32/64-bit memory reference. Firstly we have
2928 * to check that all registers involved are type E/Rxx.
2930 opflags_t sok = BITS32 | BITS64;
2931 int32_t o = input->offset;
2933 if (it != -1) {
2934 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2935 sok &= ix;
2936 else
2937 goto err;
2940 if (bt != -1) {
2941 if (REG_GPR & ~bx)
2942 goto err; /* Invalid register */
2943 if (~sok & bx & SIZE_MASK)
2944 goto err; /* Invalid size */
2945 sok &= bx;
2949 * While we're here, ensure the user didn't specify
2950 * WORD or QWORD
2952 if (input->disp_size == 16 || input->disp_size == 64)
2953 goto err;
2955 if (addrbits == 16 ||
2956 (addrbits == 32 && !(sok & BITS32)) ||
2957 (addrbits == 64 && !(sok & BITS64)))
2958 goto err;
2960 /* now reorganize base/index */
2961 if (s == 1 && bt != it && bt != -1 && it != -1 &&
2962 ((hb == b && ht == EAH_NOTBASE) ||
2963 (hb == i && ht == EAH_MAKEBASE))) {
2964 /* swap if hints say so */
2965 t = bt, bt = it, it = t;
2966 x = bx, bx = ix, ix = x;
2969 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
2970 /* make single reg base, unless hint */
2971 bt = it, bx = ix, it = -1, ix = 0;
2973 if (eaflags & EAF_MIB) {
2974 /* only for mib operands */
2975 if (it == -1 && (hb == b && ht == EAH_NOTBASE)) {
2977 * make a single reg index [reg*1].
2978 * gas uses this form for an explicit index register.
2980 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2982 if ((ht == EAH_SUMMED) && bt == -1) {
2983 /* separate once summed index into [base, index] */
2984 bt = it, bx = ix, s--;
2986 } else {
2987 if (((s == 2 && it != REG_NUM_ESP &&
2988 (!(eaflags & EAF_TIMESTWO) || (ht == EAH_SUMMED))) ||
2989 s == 3 || s == 5 || s == 9) && bt == -1) {
2990 /* convert 3*EAX to EAX+2*EAX */
2991 bt = it, bx = ix, s--;
2993 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
2994 (eaflags & EAF_TIMESTWO) &&
2995 (hb == b && ht == EAH_NOTBASE)) {
2997 * convert [NOSPLIT EAX*1]
2998 * to sib format with 0x0 displacement - [EAX*1+0].
3000 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
3003 if (s == 1 && it == REG_NUM_ESP) {
3004 /* swap ESP into base if scale is 1 */
3005 t = it, it = bt, bt = t;
3006 x = ix, ix = bx, bx = x;
3008 if (it == REG_NUM_ESP ||
3009 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
3010 goto err; /* wrong, for various reasons */
3012 output->rex |= rexflags(it, ix, REX_X);
3013 output->rex |= rexflags(bt, bx, REX_B);
3015 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
3016 /* no SIB needed */
3017 int mod, rm;
3019 if (bt == -1) {
3020 rm = 5;
3021 mod = 0;
3022 } else {
3023 rm = (bt & 7);
3024 if (rm != REG_NUM_EBP && o == 0 &&
3025 seg == NO_SEG && !forw_ref &&
3026 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
3027 mod = 0;
3028 else if (IS_MOD_01())
3029 mod = 1;
3030 else
3031 mod = 2;
3034 output->sib_present = false;
3035 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
3036 output->modrm = GEN_MODRM(mod, rfield, rm);
3037 } else {
3038 /* we need a SIB */
3039 int mod, scale, index, base;
3041 if (it == -1)
3042 index = 4, s = 1;
3043 else
3044 index = (it & 7);
3046 switch (s) {
3047 case 1:
3048 scale = 0;
3049 break;
3050 case 2:
3051 scale = 1;
3052 break;
3053 case 4:
3054 scale = 2;
3055 break;
3056 case 8:
3057 scale = 3;
3058 break;
3059 default: /* then what the smeg is it? */
3060 goto err; /* panic */
3063 if (bt == -1) {
3064 base = 5;
3065 mod = 0;
3066 } else {
3067 base = (bt & 7);
3068 if (base != REG_NUM_EBP && o == 0 &&
3069 seg == NO_SEG && !forw_ref &&
3070 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
3071 mod = 0;
3072 else if (IS_MOD_01())
3073 mod = 1;
3074 else
3075 mod = 2;
3078 output->sib_present = true;
3079 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
3080 output->modrm = GEN_MODRM(mod, rfield, 4);
3081 output->sib = GEN_SIB(scale, index, base);
3083 } else { /* it's 16-bit */
3084 int mod, rm;
3085 int16_t o = input->offset;
3087 /* check for 64-bit long mode */
3088 if (addrbits == 64)
3089 goto err;
3091 /* check all registers are BX, BP, SI or DI */
3092 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
3093 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
3094 goto err;
3096 /* ensure the user didn't specify DWORD/QWORD */
3097 if (input->disp_size == 32 || input->disp_size == 64)
3098 goto err;
3100 if (s != 1 && i != -1)
3101 goto err; /* no can do, in 16-bit EA */
3102 if (b == -1 && i != -1) {
3103 int tmp = b;
3104 b = i;
3105 i = tmp;
3106 } /* swap */
3107 if ((b == R_SI || b == R_DI) && i != -1) {
3108 int tmp = b;
3109 b = i;
3110 i = tmp;
3112 /* have BX/BP as base, SI/DI index */
3113 if (b == i)
3114 goto err; /* shouldn't ever happen, in theory */
3115 if (i != -1 && b != -1 &&
3116 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
3117 goto err; /* invalid combinations */
3118 if (b == -1) /* pure offset: handled above */
3119 goto err; /* so if it gets to here, panic! */
3121 rm = -1;
3122 if (i != -1)
3123 switch (i * 256 + b) {
3124 case R_SI * 256 + R_BX:
3125 rm = 0;
3126 break;
3127 case R_DI * 256 + R_BX:
3128 rm = 1;
3129 break;
3130 case R_SI * 256 + R_BP:
3131 rm = 2;
3132 break;
3133 case R_DI * 256 + R_BP:
3134 rm = 3;
3135 break;
3136 } else
3137 switch (b) {
3138 case R_SI:
3139 rm = 4;
3140 break;
3141 case R_DI:
3142 rm = 5;
3143 break;
3144 case R_BP:
3145 rm = 6;
3146 break;
3147 case R_BX:
3148 rm = 7;
3149 break;
3151 if (rm == -1) /* can't happen, in theory */
3152 goto err; /* so panic if it does */
3154 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
3155 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
3156 mod = 0;
3157 else if (IS_MOD_01())
3158 mod = 1;
3159 else
3160 mod = 2;
3162 output->sib_present = false; /* no SIB - it's 16-bit */
3163 output->bytes = mod; /* bytes of offset needed */
3164 output->modrm = GEN_MODRM(mod, rfield, rm);
3169 output->size = 1 + output->sib_present + output->bytes;
3170 return output->type;
3172 err:
3173 return output->type = EA_INVALID;
3176 static void add_asp(insn *ins, int addrbits)
3178 int j, valid;
3179 int defdisp;
3181 valid = (addrbits == 64) ? 64|32 : 32|16;
3183 switch (ins->prefixes[PPS_ASIZE]) {
3184 case P_A16:
3185 valid &= 16;
3186 break;
3187 case P_A32:
3188 valid &= 32;
3189 break;
3190 case P_A64:
3191 valid &= 64;
3192 break;
3193 case P_ASP:
3194 valid &= (addrbits == 32) ? 16 : 32;
3195 break;
3196 default:
3197 break;
3200 for (j = 0; j < ins->operands; j++) {
3201 if (is_class(MEMORY, ins->oprs[j].type)) {
3202 opflags_t i, b;
3204 /* Verify as Register */
3205 if (!is_register(ins->oprs[j].indexreg))
3206 i = 0;
3207 else
3208 i = nasm_reg_flags[ins->oprs[j].indexreg];
3210 /* Verify as Register */
3211 if (!is_register(ins->oprs[j].basereg))
3212 b = 0;
3213 else
3214 b = nasm_reg_flags[ins->oprs[j].basereg];
3216 if (ins->oprs[j].scale == 0)
3217 i = 0;
3219 if (!i && !b) {
3220 int ds = ins->oprs[j].disp_size;
3221 if ((addrbits != 64 && ds > 8) ||
3222 (addrbits == 64 && ds == 16))
3223 valid &= ds;
3224 } else {
3225 if (!(REG16 & ~b))
3226 valid &= 16;
3227 if (!(REG32 & ~b))
3228 valid &= 32;
3229 if (!(REG64 & ~b))
3230 valid &= 64;
3232 if (!(REG16 & ~i))
3233 valid &= 16;
3234 if (!(REG32 & ~i))
3235 valid &= 32;
3236 if (!(REG64 & ~i))
3237 valid &= 64;
3242 if (valid & addrbits) {
3243 ins->addr_size = addrbits;
3244 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
3245 /* Add an address size prefix */
3246 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
3247 ins->addr_size = (addrbits == 32) ? 16 : 32;
3248 } else {
3249 /* Impossible... */
3250 nasm_nonfatal("impossible combination of address sizes");
3251 ins->addr_size = addrbits; /* Error recovery */
3254 defdisp = ins->addr_size == 16 ? 16 : 32;
3256 for (j = 0; j < ins->operands; j++) {
3257 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
3258 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
3260 * mem_offs sizes must match the address size; if not,
3261 * strip the MEM_OFFS bit and match only EA instructions
3263 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);