blob: e4f58311a129372d4a918bf01ba83bbb3be202ec [file] [log] [blame]
H. Peter Anvin9e6747c2009-06-28 17:13:04 -07001/* ----------------------------------------------------------------------- *
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002 *
H. Peter Anvina7ecf262018-02-06 14:43:07 -08003 * Copyright 1996-2018 The NASM Authors - All Rights Reserved
H. Peter Anvin9e6747c2009-06-28 17:13:04 -07004 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00006 *
H. Peter Anvin9e6747c2009-06-28 17:13:04 -07007 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
Cyrill Gorcunov1de95002009-11-06 00:08:38 +030017 *
H. Peter Anvin9e6747c2009-06-28 17:13:04 -070018 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * ----------------------------------------------------------------------- */
33
34/*
35 * assemble.c code generation for the Netwide Assembler
H. Peter Anvinea6e34d2002-04-30 20:51:32 +000036 *
Cyrill Gorcunov5d488a32014-08-25 17:50:53 +040037 * Bytecode specification
38 * ----------------------
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -070039 *
Cyrill Gorcunov5d488a32014-08-25 17:50:53 +040040 *
41 * Codes Mnemonic Explanation
42 *
43 * \0 terminates the code. (Unless it's a literal of course.)
44 * \1..\4 that many literal bytes follow in the code stream
45 * \5 add 4 to the primary operand number (b, low octdigit)
46 * \6 add 4 to the secondary operand number (a, middle octdigit)
47 * \7 add 4 to both the primary and the secondary operand number
48 * \10..\13 a literal byte follows in the code stream, to be added
49 * to the register value of operand 0..3
50 * \14..\17 the position of index register operand in MIB (BND insns)
51 * \20..\23 ib a byte immediate operand, from operand 0..3
52 * \24..\27 ib,u a zero-extended byte immediate operand, from operand 0..3
53 * \30..\33 iw a word immediate operand, from operand 0..3
54 * \34..\37 iwd select between \3[0-3] and \4[0-3] depending on 16/32 bit
55 * assembly mode or the operand-size override on the operand
56 * \40..\43 id a long immediate operand, from operand 0..3
57 * \44..\47 iwdq select between \3[0-3], \4[0-3] and \5[4-7]
58 * depending on the address size of the instruction.
59 * \50..\53 rel8 a byte relative operand, from operand 0..3
60 * \54..\57 iq a qword immediate operand, from operand 0..3
61 * \60..\63 rel16 a word relative operand, from operand 0..3
62 * \64..\67 rel select between \6[0-3] and \7[0-3] depending on 16/32 bit
63 * assembly mode or the operand-size override on the operand
64 * \70..\73 rel32 a long relative operand, from operand 0..3
65 * \74..\77 seg a word constant, from the _segment_ part of operand 0..3
66 * \1ab a ModRM, calculated on EA in operand a, with the spare
67 * field the register value of operand b.
68 * \172\ab the register number from operand a in bits 7..4, with
69 * the 4-bit immediate from operand b in bits 3..0.
70 * \173\xab the register number from operand a in bits 7..4, with
71 * the value b in bits 3..0.
72 * \174..\177 the register number from operand 0..3 in bits 7..4, and
73 * an arbitrary value in bits 3..0 (assembled as zero.)
74 * \2ab a ModRM, calculated on EA in operand a, with the spare
75 * field equal to digit b.
76 *
77 * \240..\243 this instruction uses EVEX rather than REX or VEX/XOP, with the
78 * V field taken from operand 0..3.
79 * \250 this instruction uses EVEX rather than REX or VEX/XOP, with the
80 * V field set to 1111b.
81 *
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -070082 * EVEX prefixes are followed by the sequence:
83 * \cm\wlp\tup where cm is:
H. Peter Anvin2c9b6ad2016-05-13 14:42:55 -070084 * cc 00m mmm
85 * c = 2 for EVEX and mmmm is the M field (EVEX.P0[3:0])
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -070086 * and wlp is:
87 * 00 wwl lpp
88 * [l0] ll = 0 (.128, .lz)
89 * [l1] ll = 1 (.256)
90 * [l2] ll = 2 (.512)
91 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
92 *
93 * [w0] ww = 0 for W = 0
94 * [w1] ww = 1 for W = 1
95 * [wig] ww = 2 for W don't care (always assembled as 0)
96 * [ww] ww = 3 for W used as REX.W
97 *
98 * [p0] pp = 0 for no prefix
99 * [60] pp = 1 for legacy prefix 60
100 * [f3] pp = 2
101 * [f2] pp = 3
102 *
103 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
104 * (compressed displacement encoding)
105 *
Cyrill Gorcunov5d488a32014-08-25 17:50:53 +0400106 * \254..\257 id,s a signed 32-bit operand to be extended to 64 bits.
107 * \260..\263 this instruction uses VEX/XOP rather than REX, with the
108 * V field taken from operand 0..3.
109 * \270 this instruction uses VEX/XOP rather than REX, with the
110 * V field set to 1111b.
H. Peter Anvind85d2502008-05-04 17:53:31 -0700111 *
H. Peter Anvina04019c2009-05-03 21:42:34 -0700112 * VEX/XOP prefixes are followed by the sequence:
113 * \tmm\wlp where mm is the M field; and wlp is:
H. Peter Anvin421059c2010-08-16 14:56:33 -0700114 * 00 wwl lpp
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -0700115 * [l0] ll = 0 for L = 0 (.128, .lz)
116 * [l1] ll = 1 for L = 1 (.256)
117 * [lig] ll = 2 for L don't care (always assembled as 0)
H. Peter Anvin421059c2010-08-16 14:56:33 -0700118 *
H. Peter Anvin978c2172010-08-16 13:48:43 -0700119 * [w0] ww = 0 for W = 0
120 * [w1 ] ww = 1 for W = 1
121 * [wig] ww = 2 for W don't care (always assembled as 0)
122 * [ww] ww = 3 for W used as REX.W
H. Peter Anvinbd420c72008-05-22 11:24:35 -0700123 *
H. Peter Anvina04019c2009-05-03 21:42:34 -0700124 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
H. Peter Anvind85d2502008-05-04 17:53:31 -0700125 *
Cyrill Gorcunov5d488a32014-08-25 17:50:53 +0400126 * \271 hlexr instruction takes XRELEASE (F3) with or without lock
127 * \272 hlenl instruction takes XACQUIRE/XRELEASE with or without lock
128 * \273 hle instruction takes XACQUIRE/XRELEASE with lock only
129 * \274..\277 ib,s a byte immediate operand, from operand 0..3, sign-extended
130 * to the operand size (if o16/o32/o64 present) or the bit size
131 * \310 a16 indicates fixed 16-bit address size, i.e. optional 0x67.
132 * \311 a32 indicates fixed 32-bit address size, i.e. optional 0x67.
133 * \312 adf (disassembler only) invalid with non-default address size.
134 * \313 a64 indicates fixed 64-bit address size, 0x67 invalid.
135 * \314 norexb (disassembler only) invalid with REX.B
136 * \315 norexx (disassembler only) invalid with REX.X
137 * \316 norexr (disassembler only) invalid with REX.R
138 * \317 norexw (disassembler only) invalid with REX.W
139 * \320 o16 indicates fixed 16-bit operand size, i.e. optional 0x66.
140 * \321 o32 indicates fixed 32-bit operand size, i.e. optional 0x66.
141 * \322 odf indicates that this instruction is only valid when the
142 * operand size is the default (instruction to disassembler,
143 * generates no code in the assembler)
144 * \323 o64nw indicates fixed 64-bit operand size, REX on extensions only.
145 * \324 o64 indicates 64-bit operand size requiring REX prefix.
146 * \325 nohi instruction which always uses spl/bpl/sil/dil
147 * \326 nof3 instruction not valid with 0xF3 REP prefix. Hint for
148 disassembler only; for SSE instructions.
149 * \330 a literal byte follows in the code stream, to be added
150 * to the condition code value of the instruction.
151 * \331 norep instruction not valid with REP prefix. Hint for
152 * disassembler only; for SSE instructions.
153 * \332 f2i REP prefix (0xF2 byte) used as opcode extension.
154 * \333 f3i REP prefix (0xF3 byte) used as opcode extension.
155 * \334 rex.l LOCK prefix used as REX.R (used in non-64-bit mode)
156 * \335 repe disassemble a rep (0xF3 byte) prefix as repe not rep.
157 * \336 mustrep force a REP(E) prefix (0xF3) even if not specified.
158 * \337 mustrepne force a REPNE prefix (0xF2) even if not specified.
159 * \336-\337 are still listed as prefixes in the disassembler.
160 * \340 resb reserve <operand 0> bytes of uninitialized storage.
161 * Operand 0 had better be a segmentless constant.
162 * \341 wait this instruction needs a WAIT "prefix"
Cyrill Gorcunov8a5d3e62014-08-25 20:04:30 +0400163 * \360 np no SSE prefix (== \364\331)
Cyrill Gorcunov5d488a32014-08-25 17:50:53 +0400164 * \361 66 SSE prefix (== \366\331)
165 * \364 !osp operand-size prefix (0x66) not permitted
166 * \365 !asp address-size prefix (0x67) not permitted
167 * \366 operand-size prefix (0x66) used as opcode extension
168 * \367 address-size prefix (0x67) used as opcode extension
169 * \370,\371 jcc8 match only if operand 0 meets byte jump criteria.
170 * jmp8 370 is used for Jcc, 371 is used for JMP.
171 * \373 jlen assemble 0x03 if bits==16, 0x05 if bits==32;
172 * used for conditional jump over longer jump
173 * \374 vsibx|vm32x|vm64x this instruction takes an XMM VSIB memory EA
174 * \375 vsiby|vm32y|vm64y this instruction takes an YMM VSIB memory EA
175 * \376 vsibz|vm32z|vm64z this instruction takes an ZMM VSIB memory EA
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000176 */
177
H. Peter Anvinfe501952007-10-02 21:53:51 -0700178#include "compiler.h"
179
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000180#include <stdio.h>
181#include <string.h>
H. Peter Anvin89a2ac02013-11-26 18:23:20 -0800182#include <stdlib.h>
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000183
184#include "nasm.h"
H. Peter Anvin6768eb72002-04-30 20:52:26 +0000185#include "nasmlib.h"
H. Peter Anvinb20bc732017-03-07 19:23:03 -0800186#include "error.h"
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000187#include "assemble.h"
188#include "insns.h"
H. Peter Anvina4835d42008-05-20 14:21:29 -0700189#include "tables.h"
Jin Kyu Song5f3bfee2013-11-20 15:32:52 -0800190#include "disp8.h"
H. Peter Anvin172b8402016-02-18 01:16:18 -0800191#include "listing.h"
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000192
H. Peter Anvin65289e82009-07-25 17:25:11 -0700193enum match_result {
194 /*
195 * Matching errors. These should be sorted so that more specific
196 * errors come later in the sequence.
197 */
198 MERR_INVALOP,
199 MERR_OPSIZEMISSING,
200 MERR_OPSIZEMISMATCH,
H. Peter Anvin8e37ff42017-04-02 18:38:58 -0700201 MERR_BRNOTHERE,
Jin Kyu Song25c22122013-10-30 03:12:45 -0700202 MERR_BRNUMMISMATCH,
H. Peter Anvin8e37ff42017-04-02 18:38:58 -0700203 MERR_MASKNOTHERE,
H. Peter Anvinff04a9f2017-08-16 21:48:52 -0700204 MERR_DECONOTHERE,
H. Peter Anvin65289e82009-07-25 17:25:11 -0700205 MERR_BADCPU,
206 MERR_BADMODE,
H. Peter Anvinfb3f4e62012-02-25 22:22:07 -0800207 MERR_BADHLE,
Jin Kyu Song66c61922013-08-26 20:28:43 -0700208 MERR_ENCMISMATCH,
Jin Kyu Song03041092013-10-15 19:38:51 -0700209 MERR_BADBND,
Jin Kyu Songb287ff02013-12-04 20:05:55 -0800210 MERR_BADREPNE,
H. Peter Anvincd26fcc2018-06-25 17:15:08 -0700211 MERR_REGSETSIZE,
212 MERR_REGSET,
H. Peter Anvin65289e82009-07-25 17:25:11 -0700213 /*
214 * Matching success; the conditional ones first
215 */
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400216 MOK_JUMP, /* Matching OK but needs jmp_match() */
217 MOK_GOOD /* Matching unconditionally OK */
H. Peter Anvin65289e82009-07-25 17:25:11 -0700218};
219
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000220typedef struct {
H. Peter Anvin3089f7e2011-06-22 18:19:28 -0700221 enum ea_type type; /* what kind of EA is this? */
222 int sib_present; /* is a SIB byte necessary? */
223 int bytes; /* # of bytes of offset needed */
224 int size; /* lazy - this is sib+bytes+1 */
225 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -0700226 int8_t disp8; /* compressed displacement for EVEX */
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000227} ea;
228
Cyrill Gorcunov10734c72011-08-29 00:07:17 +0400229#define GEN_SIB(scale, index, base) \
230 (((scale) << 6) | ((index) << 3) | ((base)))
231
232#define GEN_MODRM(mod, reg, rm) \
233 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
234
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800235static int64_t calcsize(int32_t, int64_t, int, insn *,
236 const struct itemplate *);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700237static int emit_prefix(struct out_data *data, const int bits, insn *ins);
238static void gencode(struct out_data *data, insn *ins);
H. Peter Anvin23595f52009-07-25 17:44:25 -0700239static enum match_result find_match(const struct itemplate **tempp,
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400240 insn *instruction,
241 int32_t segment, int64_t offset, int bits);
H. Peter Anvin65289e82009-07-25 17:25:11 -0700242static enum match_result matches(const struct itemplate *, insn *, int bits);
H. Peter Anvinf8563f72009-10-13 12:28:14 -0700243static opflags_t regflag(const operand *);
H. Peter Anvin3df97a72007-05-30 03:25:21 +0000244static int32_t regval(const operand *);
H. Peter Anvinf8563f72009-10-13 12:28:14 -0700245static int rexflags(int, opflags_t, int);
H. Peter Anvin3df97a72007-05-30 03:25:21 +0000246static int op_rexflags(const operand *, int);
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -0700247static int op_evexflags(const operand *, int, uint8_t);
H. Peter Anvinc5b9ce02007-09-22 21:49:51 -0700248static void add_asp(insn *, int);
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000249
H. Peter Anvin8f622462017-04-02 19:02:29 -0700250static enum ea_type process_ea(operand *, ea *, int, int,
251 opflags_t, insn *, const char **);
H. Peter Anvin3089f7e2011-06-22 18:19:28 -0700252
H. Peter Anvin164d2462017-02-20 02:39:56 -0800253static inline bool absolute_op(const struct operand *o)
254{
255 return o->segment == NO_SEG && o->wrt == NO_SEG &&
256 !(o->opflags & OPFLAG_RELATIVE);
257}
258
Cyrill Gorcunov18914e62011-11-12 11:41:51 +0400259static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
H. Peter Anvin0db11e22007-04-17 20:23:11 +0000260{
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700261 return ins->prefixes[pos] == prefix;
262}
263
264static void assert_no_prefix(insn * ins, enum prefix_pos pos)
265{
266 if (ins->prefixes[pos])
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300267 nasm_nonfatal("invalid %s prefix", prefix_name(ins->prefixes[pos]));
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700268}
269
270static const char *size_name(int size)
271{
272 switch (size) {
273 case 1:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400274 return "byte";
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700275 case 2:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400276 return "word";
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700277 case 4:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400278 return "dword";
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700279 case 8:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400280 return "qword";
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700281 case 10:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400282 return "tword";
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700283 case 16:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400284 return "oword";
H. Peter Anvindfb91802008-05-20 11:43:53 -0700285 case 32:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400286 return "yword";
Jin Kyu Songd4760c12013-08-21 19:29:11 -0700287 case 64:
288 return "zword";
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700289 default:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400290 return "???";
H. Peter Anvin0db11e22007-04-17 20:23:11 +0000291 }
H. Peter Anvinc5b9ce02007-09-22 21:49:51 -0700292}
293
H. Peter Anvin285222f2017-03-01 13:27:33 -0800294static void warn_overflow(int size)
Cyrill Gorcunov9ccabd22009-09-21 00:56:20 +0400295{
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -0800296 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW, "%s data exceeds bounds",
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300297 size_name(size));
Cyrill Gorcunov9ccabd22009-09-21 00:56:20 +0400298}
299
300static void warn_overflow_const(int64_t data, int size)
301{
302 if (overflow_general(data, size))
H. Peter Anvin285222f2017-03-01 13:27:33 -0800303 warn_overflow(size);
Cyrill Gorcunov9ccabd22009-09-21 00:56:20 +0400304}
305
H. Peter Anvin8930a8f2017-02-21 11:30:22 -0800306static void warn_overflow_out(int64_t data, int size, enum out_sign sign)
307{
308 bool err;
309
310 switch (sign) {
311 case OUT_WRAP:
312 err = overflow_general(data, size);
313 break;
314 case OUT_SIGNED:
315 err = overflow_signed(data, size);
316 break;
317 case OUT_UNSIGNED:
318 err = overflow_unsigned(data, size);
319 break;
320 default:
321 panic();
322 break;
323 }
324
325 if (err)
H. Peter Anvin285222f2017-03-01 13:27:33 -0800326 warn_overflow(size);
H. Peter Anvin8930a8f2017-02-21 11:30:22 -0800327}
328
H. Peter Anvin6768eb72002-04-30 20:52:26 +0000329/*
330 * This routine wrappers the real output format's output routine,
331 * in order to pass a copy of the data off to the listing file
H. Peter Anvind24dd5f2016-02-08 10:32:13 -0800332 * generator at the same time, flatten unnecessary relocations,
333 * and verify backend compatibility.
H. Peter Anvin6768eb72002-04-30 20:52:26 +0000334 */
H. Peter Anvina77692b2016-09-20 14:04:33 -0700335static void out(struct out_data *data)
H. Peter Anvineba20a72002-04-30 20:53:55 +0000336{
Keith Kaniosb7a89542007-04-12 02:40:54 +0000337 static int32_t lineno = 0; /* static!!! */
H. Peter Anvin274cda82016-05-10 02:56:29 -0700338 static const char *lnfname = NULL;
H. Peter Anvina77692b2016-09-20 14:04:33 -0700339 union {
340 uint8_t b[8];
341 uint64_t q;
342 } xdata;
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700343 size_t asize, amax;
344 uint64_t zeropad = 0;
H. Peter Anvin8930a8f2017-02-21 11:30:22 -0800345 int64_t addrval;
H. Peter Anvinc5cbb972017-02-21 11:53:15 -0800346 int32_t fixseg; /* Segment for which to produce fixed data */
H. Peter Anvineba20a72002-04-30 20:53:55 +0000347
H. Peter Anvina77692b2016-09-20 14:04:33 -0700348 if (!data->size)
349 return; /* Nothing to do */
350
H. Peter Anvin472a7c12016-10-31 08:44:25 -0700351 /*
352 * Convert addresses to RAWDATA if possible
353 * XXX: not all backends want this for global symbols!!!!
354 */
H. Peter Anvina77692b2016-09-20 14:04:33 -0700355 switch (data->type) {
356 case OUT_ADDRESS:
H. Peter Anvin8930a8f2017-02-21 11:30:22 -0800357 addrval = data->toffset;
H. Peter Anvinc5cbb972017-02-21 11:53:15 -0800358 fixseg = NO_SEG; /* Absolute address is fixed data */
H. Peter Anvin8930a8f2017-02-21 11:30:22 -0800359 goto address;
360
361 case OUT_RELADDR:
362 addrval = data->toffset - data->relbase;
H. Peter Anvinc5cbb972017-02-21 11:53:15 -0800363 fixseg = data->segment; /* Our own segment is fixed data */
H. Peter Anvin8930a8f2017-02-21 11:30:22 -0800364 goto address;
365
366 address:
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700367 nasm_assert(data->size <= 8);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700368 asize = data->size;
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700369 amax = ofmt->maxbits >> 3; /* Maximum address size in bytes */
Chang S. Bae427d8e32018-05-02 08:07:52 -0700370 if ((ofmt->flags & OFMT_KEEP_ADDR) == 0 && data->tsegment == fixseg &&
371 data->twrt == NO_SEG) {
H. Peter Anvin8930a8f2017-02-21 11:30:22 -0800372 warn_overflow_out(addrval, asize, data->sign);
Martin Storsjö869087d2017-05-22 13:54:20 +0300373 xdata.q = cpu_to_le64(addrval);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700374 data->data = xdata.b;
375 data->type = OUT_RAWDATA;
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700376 asize = amax = 0; /* No longer an address */
H. Peter Anvina77692b2016-09-20 14:04:33 -0700377 }
378 break;
379
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700380 case OUT_SEGMENT:
381 nasm_assert(data->size <= 8);
382 asize = data->size;
383 amax = 2;
384 break;
385
H. Peter Anvina77692b2016-09-20 14:04:33 -0700386 default:
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700387 asize = amax = 0; /* Not an address */
H. Peter Anvina77692b2016-09-20 14:04:33 -0700388 break;
H. Peter Anvin6768eb72002-04-30 20:52:26 +0000389 }
390
Frank Kotlerabebb082003-09-06 04:45:37 +0000391 /*
392 * this call to src_get determines when we call the
393 * debug-format-specific "linenum" function
394 * it updates lineno and lnfname to the current values
395 * returning 0 if "same as last time", -2 if lnfname
396 * changed, and the amount by which lineno changed,
397 * if it did. thus, these variables must be static
398 */
399
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400400 if (src_get(&lineno, &lnfname))
H. Peter Anvina77692b2016-09-20 14:04:33 -0700401 dfmt->linenum(lnfname, lineno, data->segment);
H. Peter Anvineba20a72002-04-30 20:53:55 +0000402
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700403 if (asize > amax) {
404 if (data->type == OUT_RELADDR || data->sign == OUT_SIGNED) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300405 nasm_nonfatal("%u-bit signed relocation unsupported by output format %s",
406 (unsigned int)(asize << 3), ofmt->shortname);
H. Peter Anvind24dd5f2016-02-08 10:32:13 -0800407 } else {
H. Peter Anvin (Intel)723ab482018-12-13 21:53:31 -0800408 /*!
409 *!zext-reloc [on] relocation zero-extended to match output format
410 *! warns that a relocation has been zero-extended due
411 *! to limitations in the output format.
412 */
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -0800413 nasm_warn(WARN_ZEXT_RELOC,
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700414 "%u-bit %s relocation zero-extended from %u bits",
415 (unsigned int)(asize << 3),
416 data->type == OUT_SEGMENT ? "segment" : "unsigned",
417 (unsigned int)(amax << 3));
H. Peter Anvind24dd5f2016-02-08 10:32:13 -0800418 }
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700419 zeropad = data->size - amax;
420 data->size = amax;
H. Peter Anvind24dd5f2016-02-08 10:32:13 -0800421 }
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700422 lfmt->output(data);
H. Peter Anvined859f72018-06-15 00:03:53 -0700423
424 if (likely(data->segment != NO_SEG)) {
425 ofmt->output(data);
426 } else {
427 /* Outputting to ABSOLUTE section - only reserve is permitted */
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300428 if (data->type != OUT_RESERVE)
429 nasm_nonfatal("attempt to assemble code in [ABSOLUTE] space");
H. Peter Anvined859f72018-06-15 00:03:53 -0700430 /* No need to push to the backend */
431 }
432
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700433 data->offset += data->size;
434 data->insoffs += data->size;
435
436 if (zeropad) {
437 data->type = OUT_ZERODATA;
438 data->size = zeropad;
439 lfmt->output(data);
440 ofmt->output(data);
441 data->offset += zeropad;
442 data->insoffs += zeropad;
443 data->size += zeropad; /* Restore original size value */
444 }
H. Peter Anvin6768eb72002-04-30 20:52:26 +0000445}
446
H. Peter Anvina77692b2016-09-20 14:04:33 -0700447static inline void out_rawdata(struct out_data *data, const void *rawdata,
448 size_t size)
Ben Rudiak-Gould4e8396b2013-03-01 10:28:32 +0400449{
H. Peter Anvina77692b2016-09-20 14:04:33 -0700450 data->type = OUT_RAWDATA;
451 data->data = rawdata;
452 data->size = size;
453 out(data);
454}
455
456static void out_rawbyte(struct out_data *data, uint8_t byte)
457{
458 data->type = OUT_RAWDATA;
459 data->data = &byte;
460 data->size = 1;
461 out(data);
462}
463
464static inline void out_reserve(struct out_data *data, uint64_t size)
465{
466 data->type = OUT_RESERVE;
467 data->size = size;
468 out(data);
469}
470
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700471static void out_segment(struct out_data *data, const struct operand *opx)
H. Peter Anvina77692b2016-09-20 14:04:33 -0700472{
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700473 if (opx->opflags & OPFLAG_RELATIVE)
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300474 nasm_nonfatal("segment references cannot be relative");
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700475
476 data->type = OUT_SEGMENT;
477 data->sign = OUT_UNSIGNED;
478 data->size = 2;
479 data->toffset = opx->offset;
480 data->tsegment = ofmt->segbase(opx->segment | 1);
481 data->twrt = opx->wrt;
482 out(data);
483}
484
485static void out_imm(struct out_data *data, const struct operand *opx,
486 int size, enum out_sign sign)
487{
488 if (opx->segment != NO_SEG && (opx->segment & 1)) {
489 /*
490 * This is actually a segment reference, but eval() has
491 * already called ofmt->segbase() for us. Sigh.
492 */
493 if (size < 2)
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300494 nasm_nonfatal("segment reference must be 16 bits");
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700495
496 data->type = OUT_SEGMENT;
497 } else {
498 data->type = (opx->opflags & OPFLAG_RELATIVE)
499 ? OUT_RELADDR : OUT_ADDRESS;
500 }
H. Peter Anvina77692b2016-09-20 14:04:33 -0700501 data->sign = sign;
H. Peter Anvina77692b2016-09-20 14:04:33 -0700502 data->toffset = opx->offset;
503 data->tsegment = opx->segment;
504 data->twrt = opx->wrt;
H. Peter Anvin8930a8f2017-02-21 11:30:22 -0800505 /*
506 * XXX: improve this if at some point in the future we can
507 * distinguish the subtrahend in expressions like [foo - bar]
508 * where bar is a symbol in the current segment. However, at the
509 * current point, if OPFLAG_RELATIVE is set that subtraction has
510 * already occurred.
511 */
512 data->relbase = 0;
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700513 data->size = size;
H. Peter Anvina77692b2016-09-20 14:04:33 -0700514 out(data);
515}
516
H. Peter Anvin164d2462017-02-20 02:39:56 -0800517static void out_reladdr(struct out_data *data, const struct operand *opx,
518 int size)
H. Peter Anvina77692b2016-09-20 14:04:33 -0700519{
H. Peter Anvin164d2462017-02-20 02:39:56 -0800520 if (opx->opflags & OPFLAG_RELATIVE)
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300521 nasm_nonfatal("invalid use of self-relative expression");
H. Peter Anvin164d2462017-02-20 02:39:56 -0800522
H. Peter Anvina77692b2016-09-20 14:04:33 -0700523 data->type = OUT_RELADDR;
524 data->sign = OUT_SIGNED;
525 data->size = size;
526 data->toffset = opx->offset;
527 data->tsegment = opx->segment;
528 data->twrt = opx->wrt;
H. Peter Anvin8930a8f2017-02-21 11:30:22 -0800529 data->relbase = data->offset + (data->inslen - data->insoffs);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700530 out(data);
531}
532
H. Peter Anvin2d5baaa2008-09-30 16:31:06 -0700533static bool jmp_match(int32_t segment, int64_t offset, int bits,
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800534 insn * ins, const struct itemplate *temp)
H. Peter Anvine2c80182005-01-15 22:15:51 +0000535{
Charles Crayne5fbbc8c2007-11-07 19:03:46 -0800536 int64_t isize;
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800537 const uint8_t *code = temp->code;
Keith Kaniosb7a89542007-04-12 02:40:54 +0000538 uint8_t c = code[0];
Jin Kyu Song305f3ce2013-11-21 19:40:42 -0800539 bool is_byte;
H. Peter Anvinaf535c12002-04-30 20:59:21 +0000540
H. Peter Anvin755f5212012-02-25 11:41:34 -0800541 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
H. Peter Anvin2d5baaa2008-09-30 16:31:06 -0700542 return false;
Chang S. Baea5786342018-08-15 23:22:21 +0300543 if (!optimizing.level || (optimizing.flag & OPTIM_DISABLE_JMP_MATCH))
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400544 return false;
Chang S. Baea5786342018-08-15 23:22:21 +0300545 if (optimizing.level < 0 && c == 0371)
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400546 return false;
H. Peter Anvin2d5baaa2008-09-30 16:31:06 -0700547
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800548 isize = calcsize(segment, offset, bits, ins, temp);
Victor van den Elzenccafc3c2009-02-23 04:35:00 +0100549
Victor van den Elzen154e5922009-02-25 17:32:00 +0100550 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
Victor van den Elzenccafc3c2009-02-23 04:35:00 +0100551 /* Be optimistic in pass 1 */
552 return true;
553
H. Peter Anvine2c80182005-01-15 22:15:51 +0000554 if (ins->oprs[0].segment != segment)
H. Peter Anvin2d5baaa2008-09-30 16:31:06 -0700555 return false;
H. Peter Anvinaf535c12002-04-30 20:59:21 +0000556
H. Peter Anvin2d5baaa2008-09-30 16:31:06 -0700557 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
Jin Kyu Song305f3ce2013-11-21 19:40:42 -0800558 is_byte = (isize >= -128 && isize <= 127); /* is it byte size? */
559
560 if (is_byte && c == 0371 && ins->prefixes[PPS_REP] == P_BND) {
561 /* jmp short (opcode eb) cannot be used with bnd prefix. */
562 ins->prefixes[PPS_REP] = P_none;
H. Peter Anvin (Intel)723ab482018-12-13 21:53:31 -0800563 /*!
564 *!bnd [on] invalid BND prefixes
565 *! warns about ineffective use of the \c{BND} prefix when the
566 *! \c{JMP} instruction is converted to the \c{SHORT} form.
567 *! This should be extremely rare since the short \c{JMP} only
568 *! is applicable to jumps inside the same module, but if
569 *! it is legitimate, it may be necessary to use
570 *! \c{BND JMP DWORD}...
571 */
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -0800572 nasm_warn(WARN_BND | ERR_PASS2 ,
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300573 "jmp short does not init bnd regs - bnd prefix dropped.");
Jin Kyu Song305f3ce2013-11-21 19:40:42 -0800574 }
575
576 return is_byte;
H. Peter Anvine2c80182005-01-15 22:15:51 +0000577}
H. Peter Anvinaf535c12002-04-30 20:59:21 +0000578
H. Peter Anvin04445362016-09-21 15:56:19 -0700579/* This is totally just a wild guess what is reasonable... */
580#define INCBIN_MAX_BUF (ZERO_BUF_SIZE * 16)
581
H. Peter Anvinb20bc732017-03-07 19:23:03 -0800582int64_t assemble(int32_t segment, int64_t start, int bits, insn *instruction)
H. Peter Anvineba20a72002-04-30 20:53:55 +0000583{
H. Peter Anvina77692b2016-09-20 14:04:33 -0700584 struct out_data data;
H. Peter Anvin3360d792007-09-11 04:16:57 +0000585 const struct itemplate *temp;
H. Peter Anvin23595f52009-07-25 17:44:25 -0700586 enum match_result m;
Cyrill Gorcunovbafd8772009-10-31 20:02:14 +0300587 int64_t wsize; /* size for DB etc. */
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000588
H. Peter Anvine886c0e2017-03-31 14:56:17 -0700589 nasm_zero(data);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700590 data.offset = start;
591 data.segment = segment;
592 data.itemp = NULL;
H. Peter Anvina77692b2016-09-20 14:04:33 -0700593 data.bits = bits;
594
H. Peter Anvinaf9fe8f2017-05-01 21:44:24 -0700595 wsize = db_bytes(instruction->opcode);
Cyrill Gorcunovbafd8772009-10-31 20:02:14 +0300596 if (wsize == -1)
H. Peter Anvine2c80182005-01-15 22:15:51 +0000597 return 0;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000598
H. Peter Anvineba20a72002-04-30 20:53:55 +0000599 if (wsize) {
H. Peter Anvine2c80182005-01-15 22:15:51 +0000600 extop *e;
H. Peter Anvin5810c592017-05-01 19:51:09 -0700601
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700602 list_for_each(e, instruction->eops) {
603 if (e->type == EOT_DB_NUMBER) {
604 if (wsize > 8) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300605 nasm_nonfatal("integer supplied to a DT,DO,DY or DZ");
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700606 } else {
H. Peter Anvina77692b2016-09-20 14:04:33 -0700607 data.insoffs = 0;
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700608 data.inslen = data.size = wsize;
609 data.toffset = e->offset;
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700610 data.twrt = e->wrt;
611 data.relbase = 0;
H. Peter Anvina7b6bfc2017-05-03 17:32:02 -0700612 if (e->segment != NO_SEG && (e->segment & 1)) {
613 data.tsegment = e->segment;
614 data.type = OUT_SEGMENT;
615 data.sign = OUT_UNSIGNED;
616 } else {
617 data.tsegment = e->segment;
618 data.type = e->relative ? OUT_RELADDR : OUT_ADDRESS;
619 data.sign = OUT_WRAP;
620 }
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700621 out(&data);
H. Peter Anvine2c80182005-01-15 22:15:51 +0000622 }
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700623 } else if (e->type == EOT_DB_STRING ||
624 e->type == EOT_DB_STRING_FREE) {
625 int align = e->stringlen % wsize;
626 if (align)
627 align = wsize - align;
628
629 data.insoffs = 0;
630 data.inslen = e->stringlen + align;
631
632 out_rawdata(&data, e->stringval, e->stringlen);
633 out_rawdata(&data, zero_buffer, align);
H. Peter Anvin5f93c952017-05-01 19:44:34 -0700634 }
H. Peter Anvine2c80182005-01-15 22:15:51 +0000635 }
H. Peter Anvina77692b2016-09-20 14:04:33 -0700636 } else if (instruction->opcode == I_INCBIN) {
H. Peter Anvin518df302008-06-14 16:53:48 -0700637 const char *fname = instruction->eops->stringval;
H. Peter Anvine2c80182005-01-15 22:15:51 +0000638 FILE *fp;
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700639 size_t t = instruction->times; /* INCBIN handles TIMES by itself */
H. Peter Anvina77692b2016-09-20 14:04:33 -0700640 off_t base = 0;
641 off_t len;
H. Peter Anvind81a2352016-09-21 14:03:18 -0700642 const void *map = NULL;
H. Peter Anvin04445362016-09-21 15:56:19 -0700643 char *buf = NULL;
644 size_t blk = 0; /* Buffered I/O block size */
645 size_t m = 0; /* Bytes last read */
H. Peter Anvineba20a72002-04-30 20:53:55 +0000646
H. Peter Anvin94ead272017-09-27 15:22:23 -0700647 if (!t)
648 goto done;
649
H. Peter Anvind81a2352016-09-21 14:03:18 -0700650 fp = nasm_open_read(fname, NF_BINARY|NF_FORMAP);
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400651 if (!fp) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300652 nasm_nonfatal("`incbin': unable to open file `%s'",
653 fname);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700654 goto done;
655 }
H. Peter Anvind7ed89e2002-04-30 20:52:08 +0000656
H. Peter Anvind81a2352016-09-21 14:03:18 -0700657 len = nasm_file_size(fp);
658
659 if (len == (off_t)-1) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300660 nasm_nonfatal("`incbin': unable to get length of file `%s'",
661 fname);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700662 goto close_done;
663 }
664
H. Peter Anvina77692b2016-09-20 14:04:33 -0700665 if (instruction->eops->next) {
666 base = instruction->eops->next->offset;
667 if (base >= len) {
668 len = 0;
669 } else {
H. Peter Anvine2c80182005-01-15 22:15:51 +0000670 len -= base;
671 if (instruction->eops->next->next &&
H. Peter Anvina77692b2016-09-20 14:04:33 -0700672 len > (off_t)instruction->eops->next->next->offset)
673 len = (off_t)instruction->eops->next->next->offset;
H. Peter Anvine2c80182005-01-15 22:15:51 +0000674 }
H. Peter Anvine2c80182005-01-15 22:15:51 +0000675 }
H. Peter Anvind81a2352016-09-21 14:03:18 -0700676
H. Peter Anvina77692b2016-09-20 14:04:33 -0700677 lfmt->set_offset(data.offset);
678 lfmt->uplevel(LIST_INCBIN);
H. Peter Anvind7ed89e2002-04-30 20:52:08 +0000679
H. Peter Anvind81a2352016-09-21 14:03:18 -0700680 if (!len)
681 goto end_incbin;
682
683 /* Try to map file data */
684 map = nasm_map_file(fp, base, len);
H. Peter Anvin04445362016-09-21 15:56:19 -0700685 if (!map) {
686 blk = len < (off_t)INCBIN_MAX_BUF ? (size_t)len : INCBIN_MAX_BUF;
687 buf = nasm_malloc(blk);
688 }
H. Peter Anvind81a2352016-09-21 14:03:18 -0700689
690 while (t--) {
H. Peter Anvin96921a52016-09-24 09:53:03 -0700691 /*
692 * Consider these irrelevant for INCBIN, since it is fully
693 * possible that these might be (way) bigger than an int
694 * can hold; there is, however, no reason to widen these
695 * types just for INCBIN. data.inslen == 0 signals to the
696 * backend that these fields are meaningless, if at all
697 * needed.
698 */
H. Peter Anvina77692b2016-09-20 14:04:33 -0700699 data.insoffs = 0;
H. Peter Anvin96921a52016-09-24 09:53:03 -0700700 data.inslen = 0;
H. Peter Anvinc5b9ce02007-09-22 21:49:51 -0700701
H. Peter Anvind81a2352016-09-21 14:03:18 -0700702 if (map) {
703 out_rawdata(&data, map, len);
H. Peter Anvin04445362016-09-21 15:56:19 -0700704 } else if ((off_t)m == len) {
705 out_rawdata(&data, buf, len);
H. Peter Anvind81a2352016-09-21 14:03:18 -0700706 } else {
707 off_t l = len;
708
709 if (fseeko(fp, base, SEEK_SET) < 0 || ferror(fp)) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300710 nasm_nonfatal("`incbin': unable to seek on file `%s'",
711 fname);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700712 goto end_incbin;
713 }
H. Peter Anvind81a2352016-09-21 14:03:18 -0700714 while (l > 0) {
H. Peter Anvin04445362016-09-21 15:56:19 -0700715 m = fread(buf, 1, l < (off_t)blk ? (size_t)l : blk, fp);
H. Peter Anvind81a2352016-09-21 14:03:18 -0700716 if (!m || feof(fp)) {
717 /*
718 * This shouldn't happen unless the file
719 * actually changes while we are reading
720 * it.
721 */
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300722 nasm_nonfatal("`incbin': unexpected EOF while"
723 " reading file `%s'", fname);
H. Peter Anvind81a2352016-09-21 14:03:18 -0700724 goto end_incbin;
725 }
726 out_rawdata(&data, buf, m);
727 l -= m;
728 }
H. Peter Anvina77692b2016-09-20 14:04:33 -0700729 }
730 }
731 end_incbin:
732 lfmt->downlevel(LIST_INCBIN);
733 if (instruction->times > 1) {
H. Peter Anvina77692b2016-09-20 14:04:33 -0700734 lfmt->uplevel(LIST_TIMES);
735 lfmt->downlevel(LIST_TIMES);
736 }
737 if (ferror(fp)) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300738 nasm_nonfatal("`incbin': error while"
739 " reading file `%s'", fname);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700740 }
741 close_done:
H. Peter Anvin04445362016-09-21 15:56:19 -0700742 if (buf)
743 nasm_free(buf);
H. Peter Anvind81a2352016-09-21 14:03:18 -0700744 if (map)
745 nasm_unmap_file(map, len);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700746 fclose(fp);
747 done:
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700748 instruction->times = 1; /* Tell the upper layer not to iterate */
H. Peter Anvina77692b2016-09-20 14:04:33 -0700749 ;
750 } else {
751 /* "Real" instruction */
752
753 /* Check to see if we need an address-size prefix */
754 add_asp(instruction, bits);
755
756 m = find_match(&temp, instruction, data.segment, data.offset, bits);
757
758 if (m == MOK_GOOD) {
759 /* Matches! */
760 int64_t insn_size = calcsize(data.segment, data.offset,
761 bits, instruction, temp);
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700762 nasm_assert(insn_size >= 0);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700763
764 data.itemp = temp;
765 data.bits = bits;
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700766 data.insoffs = 0;
767 data.inslen = insn_size;
H. Peter Anvina77692b2016-09-20 14:04:33 -0700768
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700769 gencode(&data, instruction);
770 nasm_assert(data.insoffs == insn_size);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700771 } else {
772 /* No match */
773 switch (m) {
774 case MERR_OPSIZEMISSING:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300775 nasm_nonfatal("operation size not specified");
H. Peter Anvina77692b2016-09-20 14:04:33 -0700776 break;
777 case MERR_OPSIZEMISMATCH:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300778 nasm_nonfatal("mismatch in operand sizes");
H. Peter Anvina77692b2016-09-20 14:04:33 -0700779 break;
H. Peter Anvin8e37ff42017-04-02 18:38:58 -0700780 case MERR_BRNOTHERE:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300781 nasm_nonfatal("broadcast not permitted on this operand");
H. Peter Anvin8e37ff42017-04-02 18:38:58 -0700782 break;
H. Peter Anvina77692b2016-09-20 14:04:33 -0700783 case MERR_BRNUMMISMATCH:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300784 nasm_nonfatal("mismatch in the number of broadcasting elements");
H. Peter Anvina77692b2016-09-20 14:04:33 -0700785 break;
H. Peter Anvin8e37ff42017-04-02 18:38:58 -0700786 case MERR_MASKNOTHERE:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300787 nasm_nonfatal("mask not permitted on this operand");
H. Peter Anvin8e37ff42017-04-02 18:38:58 -0700788 break;
H. Peter Anvinff04a9f2017-08-16 21:48:52 -0700789 case MERR_DECONOTHERE:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300790 nasm_nonfatal("unsupported mode decorator for instruction");
H. Peter Anvinff04a9f2017-08-16 21:48:52 -0700791 break;
H. Peter Anvina77692b2016-09-20 14:04:33 -0700792 case MERR_BADCPU:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300793 nasm_nonfatal("no instruction for this cpu level");
H. Peter Anvina77692b2016-09-20 14:04:33 -0700794 break;
795 case MERR_BADMODE:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300796 nasm_nonfatal("instruction not supported in %d-bit mode", bits);
H. Peter Anvina77692b2016-09-20 14:04:33 -0700797 break;
798 case MERR_ENCMISMATCH:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300799 nasm_nonfatal("specific encoding scheme not available");
H. Peter Anvina77692b2016-09-20 14:04:33 -0700800 break;
801 case MERR_BADBND:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300802 nasm_nonfatal("bnd prefix is not allowed");
H. Peter Anvina77692b2016-09-20 14:04:33 -0700803 break;
804 case MERR_BADREPNE:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300805 nasm_nonfatal("%s prefix is not allowed",
806 (has_prefix(instruction, PPS_REP, P_REPNE) ?
807 "repne" : "repnz"));
H. Peter Anvina77692b2016-09-20 14:04:33 -0700808 break;
H. Peter Anvincd26fcc2018-06-25 17:15:08 -0700809 case MERR_REGSETSIZE:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300810 nasm_nonfatal("invalid register set size");
H. Peter Anvincd26fcc2018-06-25 17:15:08 -0700811 break;
812 case MERR_REGSET:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300813 nasm_nonfatal("register set not valid for operand");
H. Peter Anvincd26fcc2018-06-25 17:15:08 -0700814 break;
H. Peter Anvina77692b2016-09-20 14:04:33 -0700815 default:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300816 nasm_nonfatal("invalid combination of opcode and operands");
H. Peter Anvina77692b2016-09-20 14:04:33 -0700817 break;
818 }
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700819
820 instruction->times = 1; /* Avoid repeated error messages */
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400821 }
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000822 }
H. Peter Anvina77692b2016-09-20 14:04:33 -0700823 return data.offset - start;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000824}
825
H. Peter Anvinb20bc732017-03-07 19:23:03 -0800826int64_t insn_size(int32_t segment, int64_t offset, int bits, insn *instruction)
H. Peter Anvineba20a72002-04-30 20:53:55 +0000827{
H. Peter Anvin3360d792007-09-11 04:16:57 +0000828 const struct itemplate *temp;
H. Peter Anvin23595f52009-07-25 17:44:25 -0700829 enum match_result m;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000830
Cyrill Gorcunov37575242009-08-16 12:00:01 +0400831 if (instruction->opcode == I_none)
H. Peter Anvine2c80182005-01-15 22:15:51 +0000832 return 0;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000833
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700834 if (opcode_is_db(instruction->opcode)) {
H. Peter Anvine2c80182005-01-15 22:15:51 +0000835 extop *e;
Cyrill Gorcunovbafd8772009-10-31 20:02:14 +0300836 int32_t isize, osize, wsize;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000837
H. Peter Anvine2c80182005-01-15 22:15:51 +0000838 isize = 0;
H. Peter Anvinaf9fe8f2017-05-01 21:44:24 -0700839 wsize = db_bytes(instruction->opcode);
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700840 nasm_assert(wsize > 0);
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000841
Cyrill Gorcunova92a3a52009-07-27 22:33:59 +0400842 list_for_each(e, instruction->eops) {
Keith Kaniosb7a89542007-04-12 02:40:54 +0000843 int32_t align;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000844
H. Peter Anvine2c80182005-01-15 22:15:51 +0000845 osize = 0;
Cyrill Gorcunov9ccabd22009-09-21 00:56:20 +0400846 if (e->type == EOT_DB_NUMBER) {
H. Peter Anvine2c80182005-01-15 22:15:51 +0000847 osize = 1;
Cyrill Gorcunov9ccabd22009-09-21 00:56:20 +0400848 warn_overflow_const(e->offset, wsize);
849 } else if (e->type == EOT_DB_STRING ||
850 e->type == EOT_DB_STRING_FREE)
H. Peter Anvine2c80182005-01-15 22:15:51 +0000851 osize = e->stringlen;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000852
H. Peter Anvine2c80182005-01-15 22:15:51 +0000853 align = (-osize) % wsize;
854 if (align < 0)
855 align += wsize;
856 isize += osize + align;
857 }
H. Peter Anvina77692b2016-09-20 14:04:33 -0700858 return isize;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000859 }
860
H. Peter Anvine2c80182005-01-15 22:15:51 +0000861 if (instruction->opcode == I_INCBIN) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400862 const char *fname = instruction->eops->stringval;
H. Peter Anvina77692b2016-09-20 14:04:33 -0700863 off_t len;
H. Peter Anvind7ed89e2002-04-30 20:52:08 +0000864
H. Peter Anvind81a2352016-09-21 14:03:18 -0700865 len = nasm_file_size_by_path(fname);
866 if (len == (off_t)-1) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300867 nasm_nonfatal("`incbin': unable to get length of file `%s'",
868 fname);
H. Peter Anvind81a2352016-09-21 14:03:18 -0700869 return 0;
870 }
871
872 if (instruction->eops->next) {
873 if (len <= (off_t)instruction->eops->next->offset) {
874 len = 0;
875 } else {
876 len -= instruction->eops->next->offset;
877 if (instruction->eops->next->next &&
878 len > (off_t)instruction->eops->next->next->offset) {
879 len = (off_t)instruction->eops->next->next->offset;
H. Peter Anvine2c80182005-01-15 22:15:51 +0000880 }
881 }
H. Peter Anvine2c80182005-01-15 22:15:51 +0000882 }
H. Peter Anvind81a2352016-09-21 14:03:18 -0700883
H. Peter Anvin3e458a82017-05-01 20:28:29 -0700884 len *= instruction->times;
885 instruction->times = 1; /* Tell the upper layer to not iterate */
886
H. Peter Anvind81a2352016-09-21 14:03:18 -0700887 return len;
H. Peter Anvind7ed89e2002-04-30 20:52:08 +0000888 }
889
H. Peter Anvinc5b9ce02007-09-22 21:49:51 -0700890 /* Check to see if we need an address-size prefix */
891 add_asp(instruction, bits);
892
H. Peter Anvin23595f52009-07-25 17:44:25 -0700893 m = find_match(&temp, instruction, segment, offset, bits);
894 if (m == MOK_GOOD) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400895 /* we've matched an instruction. */
H. Peter Anvina77692b2016-09-20 14:04:33 -0700896 return calcsize(segment, offset, bits, instruction, temp);
H. Peter Anvin23595f52009-07-25 17:44:25 -0700897 } else {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400898 return -1; /* didn't match any instruction */
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000899 }
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000900}
901
H. Peter Anvin4ecd5d72012-02-24 21:51:46 -0800902static void bad_hle_warn(const insn * ins, uint8_t hleok)
903{
904 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800905 enum whatwarn { w_none, w_lock, w_inval } ww;
H. Peter Anvin4ecd5d72012-02-24 21:51:46 -0800906 static const enum whatwarn warn[2][4] =
907 {
908 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
909 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
910 };
911 unsigned int n;
912
913 n = (unsigned int)rep_pfx - P_XACQUIRE;
914 if (n > 1)
915 return; /* Not XACQUIRE/XRELEASE */
916
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800917 ww = warn[n][hleok];
918 if (!is_class(MEMORY, ins->oprs[0].type))
919 ww = w_inval; /* HLE requires operand 0 to be memory */
920
H. Peter Anvin (Intel)723ab482018-12-13 21:53:31 -0800921 /*!
922 *!hle [on] invalid HLE prefixes
923 *! warns about invalid use of the HLE \c{XACQUIRE} or \c{XRELEASE}
924 *! prefixes.
925 */
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800926 switch (ww) {
H. Peter Anvin4ecd5d72012-02-24 21:51:46 -0800927 case w_none:
928 break;
929
930 case w_lock:
931 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -0800932 nasm_warn(WARN_HLE | ERR_PASS2,
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300933 "%s with this instruction requires lock",
934 prefix_name(rep_pfx));
H. Peter Anvin4ecd5d72012-02-24 21:51:46 -0800935 }
936 break;
937
938 case w_inval:
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -0800939 nasm_warn(WARN_HLE | ERR_PASS2,
Cyrill Gorcunov00526d92018-11-25 01:32:22 +0300940 "%s invalid with this instruction",
941 prefix_name(rep_pfx));
H. Peter Anvin4ecd5d72012-02-24 21:51:46 -0800942 break;
943 }
944}
945
H. Peter Anvin507ae032008-10-09 15:37:10 -0700946/* Common construct */
Cyrill Gorcunov62576a02012-12-02 02:47:16 +0400947#define case3(x) case (x): case (x)+1: case (x)+2
948#define case4(x) case3(x): case (x)+3
H. Peter Anvin507ae032008-10-09 15:37:10 -0700949
Charles Crayne1f8bc4c2007-11-06 18:27:23 -0800950static int64_t calcsize(int32_t segment, int64_t offset, int bits,
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800951 insn * ins, const struct itemplate *temp)
H. Peter Anvineba20a72002-04-30 20:53:55 +0000952{
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800953 const uint8_t *codes = temp->code;
Charles Crayne1f8bc4c2007-11-06 18:27:23 -0800954 int64_t length = 0;
Keith Kaniosb7a89542007-04-12 02:40:54 +0000955 uint8_t c;
H. Peter Anvin3df97a72007-05-30 03:25:21 +0000956 int rex_mask = ~0;
H. Peter Anvindcffe4b2008-10-10 22:10:31 -0700957 int op1, op2;
H. Peter Anvin839eca22007-10-29 23:12:47 -0700958 struct operand *opx;
H. Peter Anvindcffe4b2008-10-10 22:10:31 -0700959 uint8_t opex = 0;
H. Peter Anvin3089f7e2011-06-22 18:19:28 -0700960 enum ea_type eat;
H. Peter Anvin4ecd5d72012-02-24 21:51:46 -0800961 uint8_t hleok = 0;
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -0800962 bool lockcheck = true;
Jin Kyu Song164d6072013-10-15 19:10:13 -0700963 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
H. Peter Anvin8f622462017-04-02 19:02:29 -0700964 const char *errmsg;
H. Peter Anvineba20a72002-04-30 20:53:55 +0000965
H. Peter Anvine3917fc2007-11-01 14:53:32 -0700966 ins->rex = 0; /* Ensure REX is reset */
H. Peter Anvin3089f7e2011-06-22 18:19:28 -0700967 eat = EA_SCALAR; /* Expect a scalar EA */
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -0700968 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
H. Peter Anvine3917fc2007-11-01 14:53:32 -0700969
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700970 if (ins->prefixes[PPS_OSIZE] == P_O64)
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400971 ins->rex |= REX_W;
H. Peter Anvinde4b89b2007-10-01 15:41:25 -0700972
H. Peter Anvine2c80182005-01-15 22:15:51 +0000973 (void)segment; /* Don't warn that this parameter is unused */
974 (void)offset; /* Don't warn that this parameter is unused */
H. Peter Anvinea6e34d2002-04-30 20:51:32 +0000975
H. Peter Anvin839eca22007-10-29 23:12:47 -0700976 while (*codes) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400977 c = *codes++;
978 op1 = (c & 3) + ((opex & 1) << 2);
979 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
980 opx = &ins->oprs[op1];
981 opex = 0; /* For the next iteration */
H. Peter Anvindcffe4b2008-10-10 22:10:31 -0700982
H. Peter Anvin839eca22007-10-29 23:12:47 -0700983 switch (c) {
Cyrill Gorcunov59df4212012-12-02 02:51:18 +0400984 case4(01):
H. Peter Anvine2c80182005-01-15 22:15:51 +0000985 codes += c, length += c;
986 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -0700987
Cyrill Gorcunov59df4212012-12-02 02:51:18 +0400988 case3(05):
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400989 opex = c;
990 break;
H. Peter Anvindcffe4b2008-10-10 22:10:31 -0700991
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +0400992 case4(010):
993 ins->rex |=
994 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
H. Peter Anvine2c80182005-01-15 22:15:51 +0000995 codes++, length++;
996 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -0700997
Jin Kyu Song164d6072013-10-15 19:10:13 -0700998 case4(014):
999 /* this is an index reg of MIB operand */
1000 mib_index = opx->basereg;
1001 break;
1002
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001003 case4(020):
1004 case4(024):
H. Peter Anvine2c80182005-01-15 22:15:51 +00001005 length++;
1006 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001007
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001008 case4(030):
H. Peter Anvine2c80182005-01-15 22:15:51 +00001009 length += 2;
1010 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001011
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001012 case4(034):
H. Peter Anvin839eca22007-10-29 23:12:47 -07001013 if (opx->type & (BITS16 | BITS32 | BITS64))
1014 length += (opx->type & BITS16) ? 2 : 4;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001015 else
1016 length += (bits == 16) ? 2 : 4;
1017 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001018
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001019 case4(040):
H. Peter Anvine2c80182005-01-15 22:15:51 +00001020 length += 4;
1021 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001022
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001023 case4(044):
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07001024 length += ins->addr_size >> 3;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001025 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001026
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001027 case4(050):
H. Peter Anvine2c80182005-01-15 22:15:51 +00001028 length++;
1029 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001030
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001031 case4(054):
Keith Kaniosb7a89542007-04-12 02:40:54 +00001032 length += 8; /* MOV reg64/imm */
1033 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001034
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001035 case4(060):
H. Peter Anvine2c80182005-01-15 22:15:51 +00001036 length += 2;
1037 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001038
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001039 case4(064):
H. Peter Anvin839eca22007-10-29 23:12:47 -07001040 if (opx->type & (BITS16 | BITS32 | BITS64))
1041 length += (opx->type & BITS16) ? 2 : 4;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001042 else
1043 length += (bits == 16) ? 2 : 4;
1044 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001045
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001046 case4(070):
H. Peter Anvine2c80182005-01-15 22:15:51 +00001047 length += 4;
1048 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001049
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001050 case4(074):
H. Peter Anvin7eb4a382007-09-17 15:49:30 -07001051 length += 2;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001052 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001053
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001054 case 0172:
1055 case 0173:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001056 codes++;
H. Peter Anvinc1377e92008-10-06 23:40:31 -07001057 length++;
1058 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001059
H. Peter Anvincffe61e2011-07-07 17:21:24 -07001060 case4(0174):
1061 length++;
1062 break;
1063
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001064 case4(0240):
1065 ins->rex |= REX_EV;
1066 ins->vexreg = regval(opx);
1067 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
1068 ins->vex_cm = *codes++;
1069 ins->vex_wlp = *codes++;
1070 ins->evex_tuple = (*codes++ - 0300);
1071 break;
1072
1073 case 0250:
1074 ins->rex |= REX_EV;
1075 ins->vexreg = 0;
1076 ins->vex_cm = *codes++;
1077 ins->vex_wlp = *codes++;
1078 ins->evex_tuple = (*codes++ - 0300);
1079 break;
1080
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001081 case4(0254):
1082 length += 4;
1083 break;
1084
1085 case4(0260):
1086 ins->rex |= REX_V;
H. Peter Anvinfc561202011-07-07 16:58:22 -07001087 ins->vexreg = regval(opx);
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001088 ins->vex_cm = *codes++;
1089 ins->vex_wlp = *codes++;
1090 break;
1091
1092 case 0270:
1093 ins->rex |= REX_V;
H. Peter Anvinfc561202011-07-07 16:58:22 -07001094 ins->vexreg = 0;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001095 ins->vex_cm = *codes++;
1096 ins->vex_wlp = *codes++;
1097 break;
1098
Cyrill Gorcunov59df4212012-12-02 02:51:18 +04001099 case3(0271):
H. Peter Anvin574784d2012-02-25 22:33:46 -08001100 hleok = c & 3;
1101 break;
1102
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001103 case4(0274):
1104 length++;
1105 break;
1106
1107 case4(0300):
H. Peter Anvine2c80182005-01-15 22:15:51 +00001108 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001109
H. Peter Anvine2c80182005-01-15 22:15:51 +00001110 case 0310:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001111 if (bits == 64)
1112 return -1;
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07001113 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001114 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001115
H. Peter Anvine2c80182005-01-15 22:15:51 +00001116 case 0311:
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07001117 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001118 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001119
H. Peter Anvine2c80182005-01-15 22:15:51 +00001120 case 0312:
H. Peter Anvin70653092007-10-19 14:42:29 -07001121 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001122
Keith Kaniosb7a89542007-04-12 02:40:54 +00001123 case 0313:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001124 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1125 has_prefix(ins, PPS_ASIZE, P_A32))
1126 return -1;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001127 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001128
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001129 case4(0314):
1130 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001131
H. Peter Anvine2c80182005-01-15 22:15:51 +00001132 case 0320:
Victor van den Elzen6dfbddb2010-12-29 17:13:38 +00001133 {
1134 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1135 if (pfx == P_O16)
1136 break;
1137 if (pfx != P_none)
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001138 nasm_warn(ERR_PASS2, "invalid operand size prefix");
Victor van den Elzen6dfbddb2010-12-29 17:13:38 +00001139 else
1140 ins->prefixes[PPS_OSIZE] = P_O16;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001141 break;
Victor van den Elzen6dfbddb2010-12-29 17:13:38 +00001142 }
H. Peter Anvin507ae032008-10-09 15:37:10 -07001143
H. Peter Anvine2c80182005-01-15 22:15:51 +00001144 case 0321:
Victor van den Elzen6dfbddb2010-12-29 17:13:38 +00001145 {
1146 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1147 if (pfx == P_O32)
1148 break;
1149 if (pfx != P_none)
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001150 nasm_warn(ERR_PASS2, "invalid operand size prefix");
Victor van den Elzen6dfbddb2010-12-29 17:13:38 +00001151 else
1152 ins->prefixes[PPS_OSIZE] = P_O32;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001153 break;
Victor van den Elzen6dfbddb2010-12-29 17:13:38 +00001154 }
H. Peter Anvin507ae032008-10-09 15:37:10 -07001155
H. Peter Anvine2c80182005-01-15 22:15:51 +00001156 case 0322:
1157 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001158
Keith Kaniosb7a89542007-04-12 02:40:54 +00001159 case 0323:
H. Peter Anvin3df97a72007-05-30 03:25:21 +00001160 rex_mask &= ~REX_W;
Keith Kaniosb7a89542007-04-12 02:40:54 +00001161 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001162
Keith Kaniosb7a89542007-04-12 02:40:54 +00001163 case 0324:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001164 ins->rex |= REX_W;
H. Peter Anvin8d7316a2007-04-18 02:27:18 +00001165 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001166
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001167 case 0325:
1168 ins->rex |= REX_NH;
1169 break;
H. Peter Anvin9472dab2009-06-24 21:38:29 -07001170
Ben Rudiak-Gouldd7ab1f92013-02-20 23:25:54 +04001171 case 0326:
1172 break;
1173
H. Peter Anvine2c80182005-01-15 22:15:51 +00001174 case 0330:
1175 codes++, length++;
1176 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001177
H. Peter Anvine2c80182005-01-15 22:15:51 +00001178 case 0331:
H. Peter Anvine2c80182005-01-15 22:15:51 +00001179 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001180
H. Peter Anvincb9b6902007-09-12 21:58:51 -07001181 case 0332:
H. Peter Anvine2c80182005-01-15 22:15:51 +00001182 case 0333:
1183 length++;
1184 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001185
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001186 case 0334:
1187 ins->rex |= REX_L;
1188 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001189
H. Peter Anvincb9b6902007-09-12 21:58:51 -07001190 case 0335:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001191 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001192
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001193 case 0336:
H. Peter Anvin10da41e2012-02-24 20:57:04 -08001194 if (!ins->prefixes[PPS_REP])
1195 ins->prefixes[PPS_REP] = P_REP;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001196 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001197
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001198 case 0337:
H. Peter Anvin10da41e2012-02-24 20:57:04 -08001199 if (!ins->prefixes[PPS_REP])
1200 ins->prefixes[PPS_REP] = P_REPNE;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001201 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001202
H. Peter Anvine2c80182005-01-15 22:15:51 +00001203 case 0340:
H. Peter Anvin164d2462017-02-20 02:39:56 -08001204 if (!absolute_op(&ins->oprs[0]))
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001205 nasm_nonfatal("attempt to reserve non-constant"
1206 " quantity of BSS space");
H. Peter Anvinc5d40b32016-10-03 22:18:31 -07001207 else if (ins->oprs[0].opflags & OPFLAG_FORWARD)
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001208 nasm_warn(ERR_PASS1, "forward reference in RESx "
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001209 "can have unpredictable results");
H. Peter Anvine2c80182005-01-15 22:15:51 +00001210 else
H. Peter Anvin428fd672007-11-15 10:25:52 -08001211 length += ins->oprs[0].offset;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001212 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001213
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001214 case 0341:
1215 if (!ins->prefixes[PPS_WAIT])
1216 ins->prefixes[PPS_WAIT] = P_WAIT;
1217 break;
H. Peter Anvinc2acf7b2009-02-21 18:22:56 -08001218
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001219 case 0360:
1220 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001221
Ben Rudiak-Gould94ba02f2013-03-10 21:46:12 +04001222 case 0361:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001223 length++;
1224 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001225
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001226 case 0364:
1227 case 0365:
1228 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001229
Keith Kanios48af1772007-08-17 07:37:52 +00001230 case 0366:
H. Peter Anvin62cb6062007-09-11 22:44:03 +00001231 case 0367:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001232 length++;
1233 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001234
Jin Kyu Songb4e1ae12013-11-08 13:31:58 -08001235 case 0370:
1236 case 0371:
H. Peter Anvine2c80182005-01-15 22:15:51 +00001237 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001238
H. Peter Anvine2c80182005-01-15 22:15:51 +00001239 case 0373:
1240 length++;
1241 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001242
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07001243 case 0374:
1244 eat = EA_XMMVSIB;
1245 break;
1246
1247 case 0375:
1248 eat = EA_YMMVSIB;
1249 break;
1250
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001251 case 0376:
1252 eat = EA_ZMMVSIB;
1253 break;
1254
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001255 case4(0100):
1256 case4(0110):
1257 case4(0120):
1258 case4(0130):
1259 case4(0200):
1260 case4(0204):
1261 case4(0210):
1262 case4(0214):
1263 case4(0220):
1264 case4(0224):
1265 case4(0230):
1266 case4(0234):
1267 {
H. Peter Anvine2c80182005-01-15 22:15:51 +00001268 ea ea_data;
Keith Kaniosb7a89542007-04-12 02:40:54 +00001269 int rfield;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001270 opflags_t rflags;
1271 struct operand *opy = &ins->oprs[op2];
Jin Kyu Songe3a06b92013-08-28 19:15:23 -07001272 struct operand *op_er_sae;
H. Peter Anvinae64c9d2008-10-25 00:41:00 -07001273
Keith Kaniosb7a89542007-04-12 02:40:54 +00001274 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
H. Peter Anvin70653092007-10-19 14:42:29 -07001275
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001276 if (c <= 0177) {
1277 /* pick rfield from operand b (opx) */
1278 rflags = regflag(opx);
1279 rfield = nasm_regvals[opx->basereg];
1280 } else {
1281 rflags = 0;
1282 rfield = c & 7;
1283 }
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001284
Jin Kyu Songe3a06b92013-08-28 19:15:23 -07001285 /* EVEX.b1 : evex_brerop contains the operand position */
1286 op_er_sae = (ins->evex_brerop >= 0 ?
1287 &ins->oprs[ins->evex_brerop] : NULL);
1288
Jin Kyu Songc47ef942013-08-30 18:10:35 -07001289 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1290 /* set EVEX.b */
1291 ins->evex_p[2] |= EVEX_P2B;
1292 if (op_er_sae->decoflags & ER) {
1293 /* set EVEX.RC (rounding control) */
1294 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1295 & EVEX_P2RC;
1296 }
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001297 } else {
1298 /* set EVEX.L'L (vector length) */
1299 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
Jin Kyu Song5f3bfee2013-11-20 15:32:52 -08001300 ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W);
Jin Kyu Songc47ef942013-08-30 18:10:35 -07001301 if (opy->decoflags & BRDCAST_MASK) {
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001302 /* set EVEX.b */
1303 ins->evex_p[2] |= EVEX_P2B;
1304 }
1305 }
1306
Jin Kyu Song4360ba22013-12-10 16:24:45 -08001307 if (itemp_has(temp, IF_MIB)) {
1308 opy->eaflags |= EAF_MIB;
1309 /*
1310 * if a separate form of MIB (ICC style) is used,
1311 * the index reg info is merged into mem operand
1312 */
1313 if (mib_index != R_none) {
1314 opy->indexreg = mib_index;
1315 opy->scale = 1;
1316 opy->hintbase = mib_index;
1317 opy->hinttype = EAH_NOTBASE;
1318 }
Jin Kyu Song3b653232013-11-08 11:41:12 -08001319 }
1320
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001321 if (process_ea(opy, &ea_data, bits,
H. Peter Anvin8f622462017-04-02 19:02:29 -07001322 rfield, rflags, ins, &errmsg) != eat) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001323 nasm_nonfatal("%s", errmsg);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001324 return -1;
Keith Kaniosb7a89542007-04-12 02:40:54 +00001325 } else {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001326 ins->rex |= ea_data.rex;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001327 length += ea_data.size;
Keith Kaniosb7a89542007-04-12 02:40:54 +00001328 }
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001329 }
1330 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001331
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001332 default:
H. Peter Anvinc5136902018-06-15 18:20:17 -07001333 nasm_panic("internal instruction table corrupt"
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001334 ": instruction code \\%o (0x%02X) given", c, c);
1335 break;
1336 }
H. Peter Anvin839eca22007-10-29 23:12:47 -07001337 }
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00001338
H. Peter Anvin0db11e22007-04-17 20:23:11 +00001339 ins->rex &= rex_mask;
H. Peter Anvin70653092007-10-19 14:42:29 -07001340
H. Peter Anvin9472dab2009-06-24 21:38:29 -07001341 if (ins->rex & REX_NH) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001342 if (ins->rex & REX_H) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001343 nasm_nonfatal("instruction cannot use high registers");
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001344 return -1;
1345 }
1346 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
H. Peter Anvin9472dab2009-06-24 21:38:29 -07001347 }
1348
H. Peter Anvin621a69a2013-11-28 12:11:24 -08001349 switch (ins->prefixes[PPS_VEX]) {
1350 case P_EVEX:
1351 if (!(ins->rex & REX_EV))
1352 return -1;
1353 break;
1354 case P_VEX3:
1355 case P_VEX2:
1356 if (!(ins->rex & REX_V))
1357 return -1;
1358 break;
1359 default:
1360 break;
1361 }
1362
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001363 if (ins->rex & (REX_V | REX_EV)) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001364 int bad32 = REX_R|REX_W|REX_X|REX_B;
H. Peter Anvind85d2502008-05-04 17:53:31 -07001365
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001366 if (ins->rex & REX_H) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001367 nasm_nonfatal("cannot use high register in AVX instruction");
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001368 return -1;
1369 }
H. Peter Anvin421059c2010-08-16 14:56:33 -07001370 switch (ins->vex_wlp & 060) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001371 case 000:
H. Peter Anvin229fa6c2010-08-16 15:21:48 -07001372 case 040:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001373 ins->rex &= ~REX_W;
1374 break;
H. Peter Anvin229fa6c2010-08-16 15:21:48 -07001375 case 020:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001376 ins->rex |= REX_W;
1377 bad32 &= ~REX_W;
1378 break;
H. Peter Anvin421059c2010-08-16 14:56:33 -07001379 case 060:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001380 /* Follow REX_W */
1381 break;
1382 }
H. Peter Anvind85d2502008-05-04 17:53:31 -07001383
H. Peter Anvinfc561202011-07-07 16:58:22 -07001384 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001385 nasm_nonfatal("invalid operands in non-64-bit mode");
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001386 return -1;
Jin Kyu Song66c61922013-08-26 20:28:43 -07001387 } else if (!(ins->rex & REX_EV) &&
1388 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001389 nasm_nonfatal("invalid high-16 register in non-AVX-512");
Jin Kyu Song66c61922013-08-26 20:28:43 -07001390 return -1;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001391 }
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001392 if (ins->rex & REX_EV)
1393 length += 4;
H. Peter Anvin621a69a2013-11-28 12:11:24 -08001394 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1395 ins->prefixes[PPS_VEX] == P_VEX3)
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001396 length += 3;
1397 else
1398 length += 2;
Cyrill Gorcunov5b144752014-05-06 01:50:22 +04001399 } else if (ins->rex & REX_MASK) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001400 if (ins->rex & REX_H) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001401 nasm_nonfatal("cannot use high register in rex instruction");
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001402 return -1;
1403 } else if (bits == 64) {
1404 length++;
1405 } else if ((ins->rex & REX_L) &&
1406 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
H. Peter Anvina7ecf262018-02-06 14:43:07 -08001407 iflag_cpu_level_ok(&cpu, IF_X86_64)) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001408 /* LOCK-as-REX.R */
H. Peter Anvin10da41e2012-02-24 20:57:04 -08001409 assert_no_prefix(ins, PPS_LOCK);
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -08001410 lockcheck = false; /* Already errored, no need for warning */
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001411 length++;
1412 } else {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001413 nasm_nonfatal("invalid operands in non-64-bit mode");
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001414 return -1;
1415 }
Keith Kaniosb7a89542007-04-12 02:40:54 +00001416 }
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -08001417
1418 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
Cyrill Gorcunov08359152013-11-09 22:16:11 +04001419 (!itemp_has(temp,IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
H. Peter Anvin (Intel)723ab482018-12-13 21:53:31 -08001420 /*!
H. Peter Anvin (Intel)be99ebd2018-12-13 22:12:37 -08001421 *!lock [on] LOCK prefix on unlockable instructions
H. Peter Anvin (Intel)723ab482018-12-13 21:53:31 -08001422 *! warns about \c{LOCK} prefixes on unlockable instructions.
1423 */
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001424 nasm_warn(WARN_LOCK | ERR_PASS2 , "instruction is not lockable");
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -08001425 }
1426
H. Peter Anvin4ecd5d72012-02-24 21:51:46 -08001427 bad_hle_warn(ins, hleok);
Keith Kaniosb7a89542007-04-12 02:40:54 +00001428
Jin Kyu Songb287ff02013-12-04 20:05:55 -08001429 /*
1430 * when BND prefix is set by DEFAULT directive,
1431 * BND prefix is added to every appropriate instruction line
1432 * unless it is overridden by NOBND prefix.
1433 */
1434 if (globalbnd &&
1435 (itemp_has(temp, IF_BND) && !has_prefix(ins, PPS_REP, P_NOBND)))
1436 ins->prefixes[PPS_REP] = P_BND;
1437
H. Peter Anvina77692b2016-09-20 14:04:33 -07001438 /*
1439 * Add length of legacy prefixes
1440 */
1441 length += emit_prefix(NULL, bits, ins);
1442
H. Peter Anvin0db11e22007-04-17 20:23:11 +00001443 return length;
1444}
Keith Kaniosb7a89542007-04-12 02:40:54 +00001445
H. Peter Anvina77692b2016-09-20 14:04:33 -07001446static inline void emit_rex(struct out_data *data, insn *ins)
Cyrill Gorcunov98238762013-03-02 02:48:23 +04001447{
H. Peter Anvina77692b2016-09-20 14:04:33 -07001448 if (data->bits == 64) {
H. Peter Anvin89f78f52014-05-21 08:30:40 -07001449 if ((ins->rex & REX_MASK) &&
H. Peter Anvin0a9250c2014-05-21 08:19:16 -07001450 !(ins->rex & (REX_V | REX_EV)) &&
1451 !ins->rex_done) {
H. Peter Anvina77692b2016-09-20 14:04:33 -07001452 uint8_t rex = (ins->rex & REX_MASK) | REX_P;
1453 out_rawbyte(data, rex);
H. Peter Anvin0a9250c2014-05-21 08:19:16 -07001454 ins->rex_done = true;
Cyrill Gorcunov98238762013-03-02 02:48:23 +04001455 }
H. Peter Anvin3df97a72007-05-30 03:25:21 +00001456 }
Cyrill Gorcunov98238762013-03-02 02:48:23 +04001457}
1458
H. Peter Anvina77692b2016-09-20 14:04:33 -07001459static int emit_prefix(struct out_data *data, const int bits, insn *ins)
1460{
1461 int bytes = 0;
1462 int j;
1463
1464 for (j = 0; j < MAXPREFIX; j++) {
1465 uint8_t c = 0;
1466 switch (ins->prefixes[j]) {
1467 case P_WAIT:
1468 c = 0x9B;
1469 break;
1470 case P_LOCK:
1471 c = 0xF0;
1472 break;
1473 case P_REPNE:
1474 case P_REPNZ:
1475 case P_XACQUIRE:
1476 case P_BND:
1477 c = 0xF2;
1478 break;
1479 case P_REPE:
1480 case P_REPZ:
1481 case P_REP:
1482 case P_XRELEASE:
1483 c = 0xF3;
1484 break;
1485 case R_CS:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001486 if (bits == 64)
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001487 nasm_warn(ERR_PASS2, "cs segment base generated, "
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001488 "but will be ignored in 64-bit mode");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001489 c = 0x2E;
1490 break;
1491 case R_DS:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001492 if (bits == 64)
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001493 nasm_warn(ERR_PASS2, "ds segment base generated, "
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001494 "but will be ignored in 64-bit mode");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001495 c = 0x3E;
1496 break;
1497 case R_ES:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001498 if (bits == 64)
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001499 nasm_warn(ERR_PASS2, "es segment base generated, "
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001500 "but will be ignored in 64-bit mode");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001501 c = 0x26;
1502 break;
1503 case R_FS:
1504 c = 0x64;
1505 break;
1506 case R_GS:
1507 c = 0x65;
1508 break;
1509 case R_SS:
1510 if (bits == 64) {
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001511 nasm_warn(ERR_PASS2, "ss segment base generated, "
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001512 "but will be ignored in 64-bit mode");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001513 }
1514 c = 0x36;
1515 break;
1516 case R_SEGR6:
1517 case R_SEGR7:
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001518 nasm_nonfatal("segr6 and segr7 cannot be used as prefixes");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001519 break;
1520 case P_A16:
1521 if (bits == 64) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001522 nasm_nonfatal("16-bit addressing is not supported "
1523 "in 64-bit mode");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001524 } else if (bits != 16)
1525 c = 0x67;
1526 break;
1527 case P_A32:
1528 if (bits != 32)
1529 c = 0x67;
1530 break;
1531 case P_A64:
1532 if (bits != 64) {
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001533 nasm_nonfatal("64-bit addressing is only supported "
1534 "in 64-bit mode");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001535 }
1536 break;
1537 case P_ASP:
1538 c = 0x67;
1539 break;
1540 case P_O16:
1541 if (bits != 16)
1542 c = 0x66;
1543 break;
1544 case P_O32:
1545 if (bits == 16)
1546 c = 0x66;
1547 break;
1548 case P_O64:
1549 /* REX.W */
1550 break;
1551 case P_OSP:
1552 c = 0x66;
1553 break;
1554 case P_EVEX:
1555 case P_VEX3:
1556 case P_VEX2:
1557 case P_NOBND:
1558 case P_none:
1559 break;
1560 default:
H. Peter Anvinc5136902018-06-15 18:20:17 -07001561 nasm_panic("invalid instruction prefix");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001562 }
1563 if (c) {
1564 if (data)
1565 out_rawbyte(data, c);
1566 bytes++;
1567 }
1568 }
1569 return bytes;
1570}
1571
1572static void gencode(struct out_data *data, insn *ins)
H. Peter Anvineba20a72002-04-30 20:53:55 +00001573{
Keith Kaniosb7a89542007-04-12 02:40:54 +00001574 uint8_t c;
1575 uint8_t bytes[4];
Charles Crayne1f8bc4c2007-11-06 18:27:23 -08001576 int64_t size;
H. Peter Anvindcffe4b2008-10-10 22:10:31 -07001577 int op1, op2;
H. Peter Anvin839eca22007-10-29 23:12:47 -07001578 struct operand *opx;
H. Peter Anvina77692b2016-09-20 14:04:33 -07001579 const uint8_t *codes = data->itemp->code;
H. Peter Anvindcffe4b2008-10-10 22:10:31 -07001580 uint8_t opex = 0;
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07001581 enum ea_type eat = EA_SCALAR;
H. Peter Anvin637b9cc2016-09-20 16:39:46 -07001582 int r;
H. Peter Anvina77692b2016-09-20 14:04:33 -07001583 const int bits = data->bits;
H. Peter Anvin8f622462017-04-02 19:02:29 -07001584 const char *errmsg;
H. Peter Anvin70653092007-10-19 14:42:29 -07001585
H. Peter Anvin0a9250c2014-05-21 08:19:16 -07001586 ins->rex_done = false;
1587
H. Peter Anvina77692b2016-09-20 14:04:33 -07001588 emit_prefix(data, bits, ins);
1589
H. Peter Anvin839eca22007-10-29 23:12:47 -07001590 while (*codes) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001591 c = *codes++;
1592 op1 = (c & 3) + ((opex & 1) << 2);
1593 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1594 opx = &ins->oprs[op1];
1595 opex = 0; /* For the next iteration */
H. Peter Anvindcffe4b2008-10-10 22:10:31 -07001596
H. Peter Anvina77692b2016-09-20 14:04:33 -07001597
H. Peter Anvin839eca22007-10-29 23:12:47 -07001598 switch (c) {
H. Peter Anvine2c80182005-01-15 22:15:51 +00001599 case 01:
1600 case 02:
1601 case 03:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001602 case 04:
H. Peter Anvina77692b2016-09-20 14:04:33 -07001603 emit_rex(data, ins);
1604 out_rawdata(data, codes, c);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001605 codes += c;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001606 break;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00001607
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001608 case 05:
1609 case 06:
1610 case 07:
1611 opex = c;
1612 break;
H. Peter Anvindcffe4b2008-10-10 22:10:31 -07001613
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001614 case4(010):
H. Peter Anvina77692b2016-09-20 14:04:33 -07001615 emit_rex(data, ins);
1616 out_rawbyte(data, *codes++ + (regval(opx) & 7));
H. Peter Anvine2c80182005-01-15 22:15:51 +00001617 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001618
Jin Kyu Song164d6072013-10-15 19:10:13 -07001619 case4(014):
1620 break;
1621
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001622 case4(020):
H. Peter Anvina77692b2016-09-20 14:04:33 -07001623 out_imm(data, opx, 1, OUT_WRAP);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001624 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001625
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001626 case4(024):
H. Peter Anvina77692b2016-09-20 14:04:33 -07001627 out_imm(data, opx, 1, OUT_UNSIGNED);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001628 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001629
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001630 case4(030):
H. Peter Anvina77692b2016-09-20 14:04:33 -07001631 out_imm(data, opx, 2, OUT_WRAP);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001632 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001633
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001634 case4(034):
H. Peter Anvin839eca22007-10-29 23:12:47 -07001635 if (opx->type & (BITS16 | BITS32))
1636 size = (opx->type & BITS16) ? 2 : 4;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001637 else
1638 size = (bits == 16) ? 2 : 4;
H. Peter Anvina77692b2016-09-20 14:04:33 -07001639 out_imm(data, opx, size, OUT_WRAP);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001640 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001641
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001642 case4(040):
H. Peter Anvina77692b2016-09-20 14:04:33 -07001643 out_imm(data, opx, 4, OUT_WRAP);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001644 break;
H. Peter Anvin3ba46772002-05-27 23:19:35 +00001645
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001646 case4(044):
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07001647 size = ins->addr_size >> 3;
H. Peter Anvina77692b2016-09-20 14:04:33 -07001648 out_imm(data, opx, size, OUT_WRAP);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001649 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001650
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001651 case4(050):
H. Peter Anvina77692b2016-09-20 14:04:33 -07001652 if (opx->segment == data->segment) {
1653 int64_t delta = opx->offset - data->offset
1654 - (data->inslen - data->insoffs);
1655 if (delta > 127 || delta < -128)
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001656 nasm_nonfatal("short jump is out of range");
H. Peter Anvinfea84d72010-05-06 15:32:20 -07001657 }
H. Peter Anvina77692b2016-09-20 14:04:33 -07001658 out_reladdr(data, opx, 1);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001659 break;
H. Peter Anvin70653092007-10-19 14:42:29 -07001660
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001661 case4(054):
H. Peter Anvina77692b2016-09-20 14:04:33 -07001662 out_imm(data, opx, 8, OUT_WRAP);
Keith Kaniosb7a89542007-04-12 02:40:54 +00001663 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001664
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001665 case4(060):
H. Peter Anvina77692b2016-09-20 14:04:33 -07001666 out_reladdr(data, opx, 2);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001667 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001668
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001669 case4(064):
H. Peter Anvin839eca22007-10-29 23:12:47 -07001670 if (opx->type & (BITS16 | BITS32 | BITS64))
1671 size = (opx->type & BITS16) ? 2 : 4;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001672 else
1673 size = (bits == 16) ? 2 : 4;
H. Peter Anvina77692b2016-09-20 14:04:33 -07001674
1675 out_reladdr(data, opx, size);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001676 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001677
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001678 case4(070):
H. Peter Anvina77692b2016-09-20 14:04:33 -07001679 out_reladdr(data, opx, 4);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001680 break;
H. Peter Anvinaf535c12002-04-30 20:59:21 +00001681
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001682 case4(074):
H. Peter Anvin839eca22007-10-29 23:12:47 -07001683 if (opx->segment == NO_SEG)
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001684 nasm_nonfatal("value referenced by FAR is not relocatable");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001685 out_segment(data, opx);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001686 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001687
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001688 case 0172:
H. Peter Anvin637b9cc2016-09-20 16:39:46 -07001689 {
1690 int mask = ins->prefixes[PPS_VEX] == P_EVEX ? 7 : 15;
1691 const struct operand *opy;
1692
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001693 c = *codes++;
1694 opx = &ins->oprs[c >> 3];
H. Peter Anvin637b9cc2016-09-20 16:39:46 -07001695 opy = &ins->oprs[c & 7];
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001696 if (!absolute_op(opy))
1697 nasm_nonfatal("non-absolute expression not permitted "
1698 "as argument %d", c & 7);
1699 else if (opy->offset & ~mask)
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001700 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW,
H. Peter Anvin637b9cc2016-09-20 16:39:46 -07001701 "is4 argument exceeds bounds");
H. Peter Anvin637b9cc2016-09-20 16:39:46 -07001702 c = opy->offset & mask;
1703 goto emit_is4;
1704 }
H. Peter Anvind85d2502008-05-04 17:53:31 -07001705
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001706 case 0173:
1707 c = *codes++;
1708 opx = &ins->oprs[c >> 4];
H. Peter Anvin637b9cc2016-09-20 16:39:46 -07001709 c &= 15;
1710 goto emit_is4;
H. Peter Anvind58656f2008-05-06 20:11:14 -07001711
H. Peter Anvincffe61e2011-07-07 17:21:24 -07001712 case4(0174):
H. Peter Anvin637b9cc2016-09-20 16:39:46 -07001713 c = 0;
1714 emit_is4:
1715 r = nasm_regvals[opx->basereg];
1716 out_rawbyte(data, (r << 4) | ((r & 0x10) >> 1) | c);
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001717 break;
H. Peter Anvin52dc3532008-05-20 19:29:04 -07001718
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001719 case4(0254):
H. Peter Anvin164d2462017-02-20 02:39:56 -08001720 if (absolute_op(opx) &&
H. Peter Anvina77692b2016-09-20 14:04:33 -07001721 (int32_t)opx->offset != (int64_t)opx->offset) {
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001722 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW,
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001723 "signed dword immediate exceeds bounds");
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001724 }
H. Peter Anvina77692b2016-09-20 14:04:33 -07001725 out_imm(data, opx, 4, OUT_SIGNED);
H. Peter Anvin588df782008-10-07 10:05:10 -07001726 break;
1727
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001728 case4(0240):
1729 case 0250:
1730 codes += 3;
1731 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1732 EVEX_P2Z | EVEX_P2AAA, 2);
1733 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
1734 bytes[0] = 0x62;
1735 /* EVEX.X can be set by either REX or EVEX for different reasons */
Jin Kyu Song1be09ee2013-11-08 01:14:39 -08001736 bytes[1] = ((((ins->rex & 7) << 5) |
1737 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
H. Peter Anvin2c9b6ad2016-05-13 14:42:55 -07001738 (ins->vex_cm & EVEX_P0MM);
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001739 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
1740 ((~ins->vexreg & 15) << 3) |
1741 (1 << 2) | (ins->vex_wlp & 3);
1742 bytes[3] = ins->evex_p[2];
H. Peter Anvina77692b2016-09-20 14:04:33 -07001743 out_rawdata(data, bytes, 4);
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001744 break;
1745
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001746 case4(0260):
1747 case 0270:
1748 codes += 2;
H. Peter Anvin621a69a2013-11-28 12:11:24 -08001749 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1750 ins->prefixes[PPS_VEX] == P_VEX3) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001751 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
1752 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
1753 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
H. Peter Anvinfc561202011-07-07 16:58:22 -07001754 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
H. Peter Anvina77692b2016-09-20 14:04:33 -07001755 out_rawdata(data, bytes, 3);
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001756 } else {
1757 bytes[0] = 0xc5;
1758 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
H. Peter Anvinfc561202011-07-07 16:58:22 -07001759 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
H. Peter Anvina77692b2016-09-20 14:04:33 -07001760 out_rawdata(data, bytes, 2);
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001761 }
1762 break;
H. Peter Anvind85d2502008-05-04 17:53:31 -07001763
H. Peter Anvine014f352012-02-25 22:35:19 -08001764 case 0271:
1765 case 0272:
1766 case 0273:
H. Peter Anvin8ea22002012-02-25 10:24:24 -08001767 break;
1768
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001769 case4(0274):
1770 {
H. Peter Anvin02788e12017-03-01 13:39:10 -08001771 uint64_t uv, um;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001772 int s;
H. Peter Anvinc1377e92008-10-06 23:40:31 -07001773
H. Peter Anvin64e87d02017-03-01 13:45:02 -08001774 if (absolute_op(opx)) {
1775 if (ins->rex & REX_W)
1776 s = 64;
1777 else if (ins->prefixes[PPS_OSIZE] == P_O16)
1778 s = 16;
1779 else if (ins->prefixes[PPS_OSIZE] == P_O32)
1780 s = 32;
1781 else
1782 s = bits;
H. Peter Anvinc1377e92008-10-06 23:40:31 -07001783
H. Peter Anvin64e87d02017-03-01 13:45:02 -08001784 um = (uint64_t)2 << (s-1);
1785 uv = opx->offset;
H. Peter Anvin02788e12017-03-01 13:39:10 -08001786
H. Peter Anvin64e87d02017-03-01 13:45:02 -08001787 if (uv > 127 && uv < (uint64_t)-128 &&
1788 (uv < um-128 || uv > um-1)) {
1789 /* If this wasn't explicitly byte-sized, warn as though we
1790 * had fallen through to the imm16/32/64 case.
1791 */
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08001792 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW,
H. Peter Anvin64e87d02017-03-01 13:45:02 -08001793 "%s value exceeds bounds",
1794 (opx->type & BITS8) ? "signed byte" :
1795 s == 16 ? "word" :
1796 s == 32 ? "dword" :
1797 "signed dword");
1798 }
1799
1800 /* Output as a raw byte to avoid byte overflow check */
1801 out_rawbyte(data, (uint8_t)uv);
1802 } else {
1803 out_imm(data, opx, 1, OUT_WRAP); /* XXX: OUT_SIGNED? */
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001804 }
H. Peter Anvinc1377e92008-10-06 23:40:31 -07001805 break;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001806 }
H. Peter Anvinc1377e92008-10-06 23:40:31 -07001807
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001808 case4(0300):
H. Peter Anvine2c80182005-01-15 22:15:51 +00001809 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001810
H. Peter Anvine2c80182005-01-15 22:15:51 +00001811 case 0310:
H. Peter Anvina77692b2016-09-20 14:04:33 -07001812 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16))
1813 out_rawbyte(data, 0x67);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001814 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001815
H. Peter Anvine2c80182005-01-15 22:15:51 +00001816 case 0311:
H. Peter Anvina77692b2016-09-20 14:04:33 -07001817 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32))
1818 out_rawbyte(data, 0x67);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001819 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001820
H. Peter Anvine2c80182005-01-15 22:15:51 +00001821 case 0312:
1822 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001823
Keith Kaniosb7a89542007-04-12 02:40:54 +00001824 case 0313:
1825 ins->rex = 0;
1826 break;
H. Peter Anvinc5b9ce02007-09-22 21:49:51 -07001827
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001828 case4(0314):
1829 break;
H. Peter Anvin23440102007-11-12 21:02:33 -08001830
H. Peter Anvine2c80182005-01-15 22:15:51 +00001831 case 0320:
H. Peter Anvine2c80182005-01-15 22:15:51 +00001832 case 0321:
H. Peter Anvine2c80182005-01-15 22:15:51 +00001833 break;
H. Peter Anvinef7468f2002-04-30 20:57:59 +00001834
H. Peter Anvine2c80182005-01-15 22:15:51 +00001835 case 0322:
H. Peter Anvin70653092007-10-19 14:42:29 -07001836 case 0323:
1837 break;
1838
Keith Kaniosb7a89542007-04-12 02:40:54 +00001839 case 0324:
H. Peter Anvin3df97a72007-05-30 03:25:21 +00001840 ins->rex |= REX_W;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001841 break;
H. Peter Anvin70653092007-10-19 14:42:29 -07001842
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001843 case 0325:
1844 break;
H. Peter Anvin9472dab2009-06-24 21:38:29 -07001845
Ben Rudiak-Gouldd7ab1f92013-02-20 23:25:54 +04001846 case 0326:
1847 break;
1848
H. Peter Anvine2c80182005-01-15 22:15:51 +00001849 case 0330:
H. Peter Anvina77692b2016-09-20 14:04:33 -07001850 out_rawbyte(data, *codes++ ^ get_cond_opcode(ins->condition));
H. Peter Anvine2c80182005-01-15 22:15:51 +00001851 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001852
H. Peter Anvine2c80182005-01-15 22:15:51 +00001853 case 0331:
H. Peter Anvine2c80182005-01-15 22:15:51 +00001854 break;
H. Peter Anvinaf535c12002-04-30 20:59:21 +00001855
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001856 case 0332:
H. Peter Anvine2c80182005-01-15 22:15:51 +00001857 case 0333:
H. Peter Anvina77692b2016-09-20 14:04:33 -07001858 out_rawbyte(data, c - 0332 + 0xF2);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001859 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001860
Keith Kanios48af1772007-08-17 07:37:52 +00001861 case 0334:
H. Peter Anvina77692b2016-09-20 14:04:33 -07001862 if (ins->rex & REX_R)
1863 out_rawbyte(data, 0xF0);
Keith Kanios48af1772007-08-17 07:37:52 +00001864 ins->rex &= ~(REX_L|REX_R);
1865 break;
H. Peter Anvin0db11e22007-04-17 20:23:11 +00001866
H. Peter Anvincb9b6902007-09-12 21:58:51 -07001867 case 0335:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001868 break;
H. Peter Anvincb9b6902007-09-12 21:58:51 -07001869
H. Peter Anvin962e3052008-08-28 17:47:16 -07001870 case 0336:
1871 case 0337:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001872 break;
H. Peter Anvin962e3052008-08-28 17:47:16 -07001873
H. Peter Anvine2c80182005-01-15 22:15:51 +00001874 case 0340:
H. Peter Anvine2c80182005-01-15 22:15:51 +00001875 if (ins->oprs[0].segment != NO_SEG)
H. Peter Anvinc5136902018-06-15 18:20:17 -07001876 nasm_panic("non-constant BSS size in pass two");
H. Peter Anvina77692b2016-09-20 14:04:33 -07001877
1878 out_reserve(data, ins->oprs[0].offset);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001879 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001880
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001881 case 0341:
1882 break;
H. Peter Anvinc2acf7b2009-02-21 18:22:56 -08001883
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001884 case 0360:
1885 break;
H. Peter Anvinfff5a472008-05-20 09:46:24 -07001886
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001887 case 0361:
H. Peter Anvina77692b2016-09-20 14:04:33 -07001888 out_rawbyte(data, 0x66);
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001889 break;
H. Peter Anvinfff5a472008-05-20 09:46:24 -07001890
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001891 case 0364:
1892 case 0365:
1893 break;
H. Peter Anvin62cb6062007-09-11 22:44:03 +00001894
Keith Kanios48af1772007-08-17 07:37:52 +00001895 case 0366:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001896 case 0367:
H. Peter Anvina77692b2016-09-20 14:04:33 -07001897 out_rawbyte(data, c - 0366 + 0x66);
Keith Kanios48af1772007-08-17 07:37:52 +00001898 break;
H. Peter Anvin62cb6062007-09-11 22:44:03 +00001899
Jin Kyu Song03041092013-10-15 19:38:51 -07001900 case3(0370):
H. Peter Anvine2c80182005-01-15 22:15:51 +00001901 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001902
H. Peter Anvine2c80182005-01-15 22:15:51 +00001903 case 0373:
H. Peter Anvina77692b2016-09-20 14:04:33 -07001904 out_rawbyte(data, bits == 16 ? 3 : 5);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001905 break;
H. Peter Anvineba20a72002-04-30 20:53:55 +00001906
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07001907 case 0374:
1908 eat = EA_XMMVSIB;
1909 break;
1910
1911 case 0375:
1912 eat = EA_YMMVSIB;
1913 break;
1914
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001915 case 0376:
1916 eat = EA_ZMMVSIB;
1917 break;
1918
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001919 case4(0100):
1920 case4(0110):
1921 case4(0120):
1922 case4(0130):
1923 case4(0200):
1924 case4(0204):
1925 case4(0210):
1926 case4(0214):
1927 case4(0220):
1928 case4(0224):
1929 case4(0230):
1930 case4(0234):
1931 {
H. Peter Anvine2c80182005-01-15 22:15:51 +00001932 ea ea_data;
1933 int rfield;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001934 opflags_t rflags;
Keith Kaniosb7a89542007-04-12 02:40:54 +00001935 uint8_t *p;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001936 struct operand *opy = &ins->oprs[op2];
H. Peter Anvin70653092007-10-19 14:42:29 -07001937
H. Peter Anvin3df97a72007-05-30 03:25:21 +00001938 if (c <= 0177) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001939 /* pick rfield from operand b (opx) */
1940 rflags = regflag(opx);
H. Peter Anvin33d5fc02008-10-23 23:07:53 -07001941 rfield = nasm_regvals[opx->basereg];
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001942 } else {
1943 /* rfield is constant */
1944 rflags = 0;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001945 rfield = c & 7;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001946 }
H. Peter Anvine2c80182005-01-15 22:15:51 +00001947
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001948 if (process_ea(opy, &ea_data, bits,
H. Peter Anvin8f622462017-04-02 19:02:29 -07001949 rfield, rflags, ins, &errmsg) != eat)
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03001950 nasm_nonfatal("%s", errmsg);
Charles Crayne7e975552007-11-03 22:06:13 -07001951
H. Peter Anvine2c80182005-01-15 22:15:51 +00001952 p = bytes;
1953 *p++ = ea_data.modrm;
1954 if (ea_data.sib_present)
1955 *p++ = ea_data.sib;
H. Peter Anvina77692b2016-09-20 14:04:33 -07001956 out_rawdata(data, bytes, p - bytes);
H. Peter Anvine2c80182005-01-15 22:15:51 +00001957
Victor van den Elzencf9332c2008-10-01 12:18:28 +02001958 /*
1959 * Make sure the address gets the right offset in case
1960 * the line breaks in the .lst file (BR 1197827)
1961 */
Victor van den Elzencf9332c2008-10-01 12:18:28 +02001962
H. Peter Anvin72bf3fe2013-11-26 20:19:53 -08001963 if (ea_data.bytes) {
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07001964 /* use compressed displacement, if available */
H. Peter Anvina77692b2016-09-20 14:04:33 -07001965 if (ea_data.disp8) {
1966 out_rawbyte(data, ea_data.disp8);
1967 } else if (ea_data.rip) {
1968 out_reladdr(data, opy, ea_data.bytes);
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001969 } else {
H. Peter Anvin72bf3fe2013-11-26 20:19:53 -08001970 int asize = ins->addr_size >> 3;
H. Peter Anvin72bf3fe2013-11-26 20:19:53 -08001971
H. Peter Anvina77692b2016-09-20 14:04:33 -07001972 if (overflow_general(opy->offset, asize) ||
1973 signed_bits(opy->offset, ins->addr_size) !=
1974 signed_bits(opy->offset, ea_data.bytes << 3))
H. Peter Anvin285222f2017-03-01 13:27:33 -08001975 warn_overflow(ea_data.bytes);
Victor van den Elzen0d268fb2010-01-24 21:24:57 +01001976
H. Peter Anvina77692b2016-09-20 14:04:33 -07001977 out_imm(data, opy, ea_data.bytes,
H. Peter Anvind9bc2442017-03-28 15:52:58 -07001978 (asize > ea_data.bytes)
1979 ? OUT_SIGNED : OUT_WRAP);
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001980 }
H. Peter Anvine2c80182005-01-15 22:15:51 +00001981 }
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001982 }
1983 break;
H. Peter Anvin507ae032008-10-09 15:37:10 -07001984
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001985 default:
H. Peter Anvinc5136902018-06-15 18:20:17 -07001986 nasm_panic("internal instruction table corrupt"
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04001987 ": instruction code \\%o (0x%02X) given", c, c);
1988 break;
H. Peter Anvine2c80182005-01-15 22:15:51 +00001989 }
H. Peter Anvin839eca22007-10-29 23:12:47 -07001990 }
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00001991}
1992
H. Peter Anvinf8563f72009-10-13 12:28:14 -07001993static opflags_t regflag(const operand * o)
H. Peter Anvin3df97a72007-05-30 03:25:21 +00001994{
Cyrill Gorcunov2124b7b2010-07-25 01:16:33 +04001995 if (!is_register(o->basereg))
H. Peter Anvinc5136902018-06-15 18:20:17 -07001996 nasm_panic("invalid operand passed to regflag()");
H. Peter Anvina4835d42008-05-20 14:21:29 -07001997 return nasm_reg_flags[o->basereg];
H. Peter Anvin3df97a72007-05-30 03:25:21 +00001998}
1999
H. Peter Anvin5b0e3ec2007-07-07 02:01:08 +00002000static int32_t regval(const operand * o)
H. Peter Anvineba20a72002-04-30 20:53:55 +00002001{
Cyrill Gorcunov2124b7b2010-07-25 01:16:33 +04002002 if (!is_register(o->basereg))
H. Peter Anvinc5136902018-06-15 18:20:17 -07002003 nasm_panic("invalid operand passed to regval()");
H. Peter Anvina4835d42008-05-20 14:21:29 -07002004 return nasm_regvals[o->basereg];
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002005}
2006
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002007static int op_rexflags(const operand * o, int mask)
2008{
H. Peter Anvinf8563f72009-10-13 12:28:14 -07002009 opflags_t flags;
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002010 int val;
2011
Cyrill Gorcunov2124b7b2010-07-25 01:16:33 +04002012 if (!is_register(o->basereg))
H. Peter Anvinc5136902018-06-15 18:20:17 -07002013 nasm_panic("invalid operand passed to op_rexflags()");
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002014
H. Peter Anvina4835d42008-05-20 14:21:29 -07002015 flags = nasm_reg_flags[o->basereg];
2016 val = nasm_regvals[o->basereg];
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002017
2018 return rexflags(val, flags, mask);
2019}
2020
H. Peter Anvinf8563f72009-10-13 12:28:14 -07002021static int rexflags(int val, opflags_t flags, int mask)
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002022{
2023 int rex = 0;
2024
H. Peter Anvinc6c750c2013-11-08 15:28:19 -08002025 if (val >= 0 && (val & 8))
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002026 rex |= REX_B|REX_X|REX_R;
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002027 if (flags & BITS64)
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002028 rex |= REX_W;
2029 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
2030 rex |= REX_H;
2031 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
2032 rex |= REX_P;
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002033
2034 return rex & mask;
2035}
2036
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002037static int evexflags(int val, decoflags_t deco,
2038 int mask, uint8_t byte)
2039{
2040 int evex = 0;
2041
Jin Kyu Song1be09ee2013-11-08 01:14:39 -08002042 switch (byte) {
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002043 case 0:
H. Peter Anvinc6c750c2013-11-08 15:28:19 -08002044 if (val >= 0 && (val & 16))
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002045 evex |= (EVEX_P0RP | EVEX_P0X);
2046 break;
2047 case 2:
H. Peter Anvinc6c750c2013-11-08 15:28:19 -08002048 if (val >= 0 && (val & 16))
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002049 evex |= EVEX_P2VP;
2050 if (deco & Z)
2051 evex |= EVEX_P2Z;
2052 if (deco & OPMASK_MASK)
2053 evex |= deco & EVEX_P2AAA;
2054 break;
2055 }
2056 return evex & mask;
2057}
2058
2059static int op_evexflags(const operand * o, int mask, uint8_t byte)
2060{
2061 int val;
2062
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002063 val = nasm_regvals[o->basereg];
2064
2065 return evexflags(val, o->decoflags, mask, byte);
2066}
2067
H. Peter Anvin23595f52009-07-25 17:44:25 -07002068static enum match_result find_match(const struct itemplate **tempp,
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002069 insn *instruction,
2070 int32_t segment, int64_t offset, int bits)
H. Peter Anvin23595f52009-07-25 17:44:25 -07002071{
2072 const struct itemplate *temp;
2073 enum match_result m, merr;
H. Peter Anvina7643f42009-10-13 12:32:20 -07002074 opflags_t xsizeflags[MAX_OPERANDS];
H. Peter Anvina81655b2009-07-25 18:15:28 -07002075 bool opsizemissing = false;
Jin Kyu Songe3a06b92013-08-28 19:15:23 -07002076 int8_t broadcast = instruction->evex_brerop;
H. Peter Anvina81655b2009-07-25 18:15:28 -07002077 int i;
2078
Jin Kyu Song4d1fc3f2013-08-21 19:29:10 -07002079 /* broadcasting uses a different data element size */
2080 for (i = 0; i < instruction->operands; i++)
2081 if (i == broadcast)
2082 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
2083 else
2084 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
H. Peter Anvin23595f52009-07-25 17:44:25 -07002085
2086 merr = MERR_INVALOP;
2087
2088 for (temp = nasm_instructions[instruction->opcode];
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002089 temp->opcode != I_none; temp++) {
2090 m = matches(temp, instruction, bits);
2091 if (m == MOK_JUMP) {
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -08002092 if (jmp_match(segment, offset, bits, instruction, temp))
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002093 m = MOK_GOOD;
2094 else
2095 m = MERR_INVALOP;
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002096 } else if (m == MERR_OPSIZEMISSING && !itemp_has(temp, IF_SX)) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002097 /*
2098 * Missing operand size and a candidate for fuzzy matching...
2099 */
Ben Rudiak-Gould6e878932013-02-27 10:13:14 -08002100 for (i = 0; i < temp->operands; i++)
Jin Kyu Song4d1fc3f2013-08-21 19:29:10 -07002101 if (i == broadcast)
2102 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
2103 else
2104 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002105 opsizemissing = true;
2106 }
2107 if (m > merr)
2108 merr = m;
2109 if (merr == MOK_GOOD)
2110 goto done;
H. Peter Anvina81655b2009-07-25 18:15:28 -07002111 }
2112
2113 /* No match, but see if we can get a fuzzy operand size match... */
2114 if (!opsizemissing)
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002115 goto done;
H. Peter Anvina81655b2009-07-25 18:15:28 -07002116
2117 for (i = 0; i < instruction->operands; i++) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002118 /*
2119 * We ignore extrinsic operand sizes on registers, so we should
2120 * never try to fuzzy-match on them. This also resolves the case
2121 * when we have e.g. "xmmrm128" in two different positions.
2122 */
2123 if (is_class(REGISTER, instruction->oprs[i].type))
2124 continue;
H. Peter Anvinff5d6562009-10-05 14:08:05 -07002125
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002126 /* This tests if xsizeflags[i] has more than one bit set */
2127 if ((xsizeflags[i] & (xsizeflags[i]-1)))
2128 goto done; /* No luck */
H. Peter Anvina81655b2009-07-25 18:15:28 -07002129
Jin Kyu Song7903c072013-10-30 03:00:12 -07002130 if (i == broadcast) {
Jin Kyu Song4d1fc3f2013-08-21 19:29:10 -07002131 instruction->oprs[i].decoflags |= xsizeflags[i];
Jin Kyu Song7903c072013-10-30 03:00:12 -07002132 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
2133 BITS32 : BITS64);
2134 } else {
Jin Kyu Song4d1fc3f2013-08-21 19:29:10 -07002135 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
Jin Kyu Song7903c072013-10-30 03:00:12 -07002136 }
H. Peter Anvina81655b2009-07-25 18:15:28 -07002137 }
2138
2139 /* Try matching again... */
2140 for (temp = nasm_instructions[instruction->opcode];
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002141 temp->opcode != I_none; temp++) {
2142 m = matches(temp, instruction, bits);
2143 if (m == MOK_JUMP) {
H. Peter Anvin8cc8a1d2012-02-25 11:11:42 -08002144 if (jmp_match(segment, offset, bits, instruction, temp))
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002145 m = MOK_GOOD;
2146 else
2147 m = MERR_INVALOP;
2148 }
2149 if (m > merr)
2150 merr = m;
2151 if (merr == MOK_GOOD)
2152 goto done;
H. Peter Anvin23595f52009-07-25 17:44:25 -07002153 }
2154
H. Peter Anvina81655b2009-07-25 18:15:28 -07002155done:
H. Peter Anvin23595f52009-07-25 17:44:25 -07002156 *tempp = temp;
2157 return merr;
2158}
2159
Mark Charneydcaef4b2014-10-09 13:45:17 -04002160static uint8_t get_broadcast_num(opflags_t opflags, opflags_t brsize)
2161{
H. Peter Anvin2902fbc2017-02-20 00:35:58 -08002162 unsigned int opsize = (opflags & SIZE_MASK) >> SIZE_SHIFT;
Mark Charneydcaef4b2014-10-09 13:45:17 -04002163 uint8_t brcast_num;
2164
Mark Charneydcaef4b2014-10-09 13:45:17 -04002165 if (brsize > BITS64)
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03002166 nasm_fatal("size of broadcasting element is greater than 64 bits");
Mark Charneydcaef4b2014-10-09 13:45:17 -04002167
H. Peter Anvin2902fbc2017-02-20 00:35:58 -08002168 /*
2169 * The shift term is to take care of the extra BITS80 inserted
2170 * between BITS64 and BITS128.
2171 */
2172 brcast_num = ((opsize / (BITS64 >> SIZE_SHIFT)) * (BITS64 / brsize))
2173 >> (opsize > (BITS64 >> SIZE_SHIFT));
Mark Charneydcaef4b2014-10-09 13:45:17 -04002174
2175 return brcast_num;
2176}
2177
H. Peter Anvin65289e82009-07-25 17:25:11 -07002178static enum match_result matches(const struct itemplate *itemp,
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002179 insn *instruction, int bits)
H. Peter Anvineba20a72002-04-30 20:53:55 +00002180{
Cyrill Gorcunov167917a2012-09-10 00:19:12 +04002181 opflags_t size[MAX_OPERANDS], asize;
H. Peter Anvin3fb86f22009-07-25 19:12:10 -07002182 bool opsizemissing = false;
Cyrill Gorcunov167917a2012-09-10 00:19:12 +04002183 int i, oprs;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002184
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002185 /*
2186 * Check the opcode
2187 */
H. Peter Anvine2c80182005-01-15 22:15:51 +00002188 if (itemp->opcode != instruction->opcode)
H. Peter Anvin65289e82009-07-25 17:25:11 -07002189 return MERR_INVALOP;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002190
2191 /*
2192 * Count the operands
2193 */
H. Peter Anvine2c80182005-01-15 22:15:51 +00002194 if (itemp->operands != instruction->operands)
H. Peter Anvin65289e82009-07-25 17:25:11 -07002195 return MERR_INVALOP;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002196
2197 /*
H. Peter Anvin47fb7bc2010-08-24 13:53:22 -07002198 * Is it legal?
2199 */
Chang S. Baea5786342018-08-15 23:22:21 +03002200 if (!(optimizing.level > 0) && itemp_has(itemp, IF_OPT))
H. Peter Anvin47fb7bc2010-08-24 13:53:22 -07002201 return MERR_INVALOP;
2202
2203 /*
Jin Kyu Song6cfa9682013-11-26 17:27:48 -08002204 * {evex} available?
2205 */
H. Peter Anvin621a69a2013-11-28 12:11:24 -08002206 switch (instruction->prefixes[PPS_VEX]) {
2207 case P_EVEX:
2208 if (!itemp_has(itemp, IF_EVEX))
2209 return MERR_ENCMISMATCH;
2210 break;
2211 case P_VEX3:
2212 case P_VEX2:
2213 if (!itemp_has(itemp, IF_VEX))
2214 return MERR_ENCMISMATCH;
2215 break;
2216 default:
2217 break;
Jin Kyu Song6cfa9682013-11-26 17:27:48 -08002218 }
2219
2220 /*
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002221 * Check that no spurious colons or TOs are present
2222 */
H. Peter Anvine2c80182005-01-15 22:15:51 +00002223 for (i = 0; i < itemp->operands; i++)
2224 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
H. Peter Anvin65289e82009-07-25 17:25:11 -07002225 return MERR_INVALOP;
H. Peter Anvin70653092007-10-19 14:42:29 -07002226
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002227 /*
H. Peter Anvin32cd4c22008-04-04 13:34:53 -07002228 * Process size flags
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002229 */
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002230 switch (itemp_smask(itemp)) {
2231 case IF_GENBIT(IF_SB):
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002232 asize = BITS8;
2233 break;
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002234 case IF_GENBIT(IF_SW):
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002235 asize = BITS16;
2236 break;
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002237 case IF_GENBIT(IF_SD):
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002238 asize = BITS32;
2239 break;
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002240 case IF_GENBIT(IF_SQ):
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002241 asize = BITS64;
2242 break;
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002243 case IF_GENBIT(IF_SO):
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002244 asize = BITS128;
2245 break;
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002246 case IF_GENBIT(IF_SY):
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002247 asize = BITS256;
2248 break;
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002249 case IF_GENBIT(IF_SZ):
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002250 asize = BITS512;
2251 break;
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002252 case IF_GENBIT(IF_SIZE):
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002253 switch (bits) {
2254 case 16:
2255 asize = BITS16;
2256 break;
2257 case 32:
2258 asize = BITS32;
2259 break;
2260 case 64:
2261 asize = BITS64;
2262 break;
2263 default:
2264 asize = 0;
2265 break;
2266 }
2267 break;
H. Peter Anvin60926242009-07-26 16:25:38 -07002268 default:
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002269 asize = 0;
2270 break;
H. Peter Anvin60926242009-07-26 16:25:38 -07002271 }
2272
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002273 if (itemp_armask(itemp)) {
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002274 /* S- flags only apply to a specific operand */
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002275 i = itemp_arg(itemp);
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002276 memset(size, 0, sizeof size);
2277 size[i] = asize;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002278 } else {
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002279 /* S- flags apply to all operands */
2280 for (i = 0; i < MAX_OPERANDS; i++)
2281 size[i] = asize;
H. Peter Anvinef7468f2002-04-30 20:57:59 +00002282 }
H. Peter Anvin70653092007-10-19 14:42:29 -07002283
H. Peter Anvin32cd4c22008-04-04 13:34:53 -07002284 /*
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002285 * Check that the operand flags all match up,
2286 * it's a bit tricky so lets be verbose:
2287 *
2288 * 1) Find out the size of operand. If instruction
2289 * doesn't have one specified -- we're trying to
2290 * guess it either from template (IF_S* flag) or
2291 * from code bits.
2292 *
Ben Rudiak-Gould6e878932013-02-27 10:13:14 -08002293 * 2) If template operand do not match the instruction OR
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002294 * template has an operand size specified AND this size differ
2295 * from which instruction has (perhaps we got it from code bits)
2296 * we are:
2297 * a) Check that only size of instruction and operand is differ
2298 * other characteristics do match
2299 * b) Perhaps it's a register specified in instruction so
2300 * for such a case we just mark that operand as "size
2301 * missing" and this will turn on fuzzy operand size
2302 * logic facility (handled by a caller)
H. Peter Anvin32cd4c22008-04-04 13:34:53 -07002303 */
2304 for (i = 0; i < itemp->operands; i++) {
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002305 opflags_t type = instruction->oprs[i].type;
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002306 decoflags_t deco = instruction->oprs[i].decoflags;
H. Peter Anvin8e37ff42017-04-02 18:38:58 -07002307 decoflags_t ideco = itemp->deco[i];
Jin Kyu Song7903c072013-10-30 03:00:12 -07002308 bool is_broadcast = deco & BRDCAST_MASK;
Jin Kyu Song25c22122013-10-30 03:12:45 -07002309 uint8_t brcast_num = 0;
Jin Kyu Song7903c072013-10-30 03:00:12 -07002310 opflags_t template_opsize, insn_opsize;
2311
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002312 if (!(type & SIZE_MASK))
2313 type |= size[i];
H. Peter Anvind85d2502008-05-04 17:53:31 -07002314
Jin Kyu Song7903c072013-10-30 03:00:12 -07002315 insn_opsize = type & SIZE_MASK;
2316 if (!is_broadcast) {
2317 template_opsize = itemp->opd[i] & SIZE_MASK;
2318 } else {
H. Peter Anvin8e37ff42017-04-02 18:38:58 -07002319 decoflags_t deco_brsize = ideco & BRSIZE_MASK;
2320
2321 if (~ideco & BRDCAST_MASK)
2322 return MERR_BRNOTHERE;
2323
Jin Kyu Song7903c072013-10-30 03:00:12 -07002324 /*
2325 * when broadcasting, the element size depends on
2326 * the instruction type. decorator flag should match.
2327 */
Jin Kyu Song7903c072013-10-30 03:00:12 -07002328 if (deco_brsize) {
2329 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
Jin Kyu Song25c22122013-10-30 03:12:45 -07002330 /* calculate the proper number : {1to<brcast_num>} */
Mark Charneydcaef4b2014-10-09 13:45:17 -04002331 brcast_num = get_broadcast_num(itemp->opd[i], template_opsize);
Jin Kyu Song7903c072013-10-30 03:00:12 -07002332 } else {
2333 template_opsize = 0;
2334 }
2335 }
2336
H. Peter Anvin8e37ff42017-04-02 18:38:58 -07002337 if (~ideco & deco & OPMASK_MASK)
2338 return MERR_MASKNOTHERE;
2339
H. Peter Anvinff04a9f2017-08-16 21:48:52 -07002340 if (~ideco & deco & (Z_MASK|STATICRND_MASK|SAE_MASK))
2341 return MERR_DECONOTHERE;
2342
H. Peter Anvincd26fcc2018-06-25 17:15:08 -07002343 if (itemp->opd[i] & ~type & ~(SIZE_MASK|REGSET_MASK))
Ben Rudiak-Gould4e8396b2013-03-01 10:28:32 +04002344 return MERR_INVALOP;
H. Peter Anvincd26fcc2018-06-25 17:15:08 -07002345
2346 if (~itemp->opd[i] & type & REGSET_MASK)
2347 return (itemp->opd[i] & REGSET_MASK)
2348 ? MERR_REGSETSIZE : MERR_REGSET;
2349
2350 if (template_opsize) {
Jin Kyu Song7903c072013-10-30 03:00:12 -07002351 if (template_opsize != insn_opsize) {
2352 if (insn_opsize) {
Jin Kyu Song4d1fc3f2013-08-21 19:29:10 -07002353 return MERR_INVALOP;
Jin Kyu Song7903c072013-10-30 03:00:12 -07002354 } else if (!is_class(REGISTER, type)) {
2355 /*
2356 * Note: we don't honor extrinsic operand sizes for registers,
2357 * so "missing operand size" for a register should be
2358 * considered a wildcard match rather than an error.
2359 */
2360 opsizemissing = true;
Jin Kyu Song4d1fc3f2013-08-21 19:29:10 -07002361 }
Jin Kyu Song25c22122013-10-30 03:12:45 -07002362 } else if (is_broadcast &&
2363 (brcast_num !=
Mark Charneydcaef4b2014-10-09 13:45:17 -04002364 (2U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
Jin Kyu Song25c22122013-10-30 03:12:45 -07002365 /*
2366 * broadcasting opsize matches but the number of repeated memory
2367 * element does not match.
Mark Charneydcaef4b2014-10-09 13:45:17 -04002368 * if 64b double precision float is broadcasted to ymm (256b),
2369 * broadcasting decorator must be {1to4}.
Jin Kyu Song25c22122013-10-30 03:12:45 -07002370 */
2371 return MERR_BRNUMMISMATCH;
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002372 }
H. Peter Anvin32cd4c22008-04-04 13:34:53 -07002373 }
2374 }
2375
H. Peter Anvin3fb86f22009-07-25 19:12:10 -07002376 if (opsizemissing)
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002377 return MERR_OPSIZEMISSING;
H. Peter Anvin3fb86f22009-07-25 19:12:10 -07002378
H. Peter Anvin32cd4c22008-04-04 13:34:53 -07002379 /*
2380 * Check operand sizes
2381 */
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002382 if (itemp_has(itemp, IF_SM) || itemp_has(itemp, IF_SM2)) {
2383 oprs = (itemp_has(itemp, IF_SM2) ? 2 : itemp->operands);
H. Peter Anvine2c80182005-01-15 22:15:51 +00002384 for (i = 0; i < oprs; i++) {
Cyrill Gorcunovbc31bee2009-11-01 23:16:01 +03002385 asize = itemp->opd[i] & SIZE_MASK;
2386 if (asize) {
2387 for (i = 0; i < oprs; i++)
2388 size[i] = asize;
H. Peter Anvine2c80182005-01-15 22:15:51 +00002389 break;
2390 }
2391 }
H. Peter Anvinef7468f2002-04-30 20:57:59 +00002392 } else {
H. Peter Anvine2c80182005-01-15 22:15:51 +00002393 oprs = itemp->operands;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002394 }
2395
Keith Kaniosb7a89542007-04-12 02:40:54 +00002396 for (i = 0; i < itemp->operands; i++) {
H. Peter Anvine2c80182005-01-15 22:15:51 +00002397 if (!(itemp->opd[i] & SIZE_MASK) &&
2398 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
H. Peter Anvin65289e82009-07-25 17:25:11 -07002399 return MERR_OPSIZEMISMATCH;
Keith Kaniosb7a89542007-04-12 02:40:54 +00002400 }
2401
H. Peter Anvinaf535c12002-04-30 20:59:21 +00002402 /*
2403 * Check template is okay at the set cpu level
2404 */
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002405 if (iflag_cmp_cpu_level(&insns_flags[itemp->iflag_idx], &cpu) > 0)
H. Peter Anvin65289e82009-07-25 17:25:11 -07002406 return MERR_BADCPU;
H. Peter Anvin70653092007-10-19 14:42:29 -07002407
Keith Kaniosb7a89542007-04-12 02:40:54 +00002408 /*
H. Peter Anvin6cda4142008-12-29 20:52:28 -08002409 * Verify the appropriate long mode flag.
Keith Kaniosb7a89542007-04-12 02:40:54 +00002410 */
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002411 if (itemp_has(itemp, (bits == 64 ? IF_NOLONG : IF_LONG)))
H. Peter Anvin65289e82009-07-25 17:25:11 -07002412 return MERR_BADMODE;
H. Peter Anvine2c80182005-01-15 22:15:51 +00002413
H. Peter Anvinaf535c12002-04-30 20:59:21 +00002414 /*
H. Peter Anvinfb3f4e62012-02-25 22:22:07 -08002415 * If we have a HLE prefix, look for the NOHLE flag
2416 */
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002417 if (itemp_has(itemp, IF_NOHLE) &&
H. Peter Anvinfb3f4e62012-02-25 22:22:07 -08002418 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2419 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2420 return MERR_BADHLE;
2421
2422 /*
H. Peter Anvinaf535c12002-04-30 20:59:21 +00002423 * Check if special handling needed for Jumps
2424 */
H. Peter Anvin755f5212012-02-25 11:41:34 -08002425 if ((itemp->code[0] & ~1) == 0370)
Cyrill Gorcunov1de95002009-11-06 00:08:38 +03002426 return MOK_JUMP;
H. Peter Anvine2c80182005-01-15 22:15:51 +00002427
Jin Kyu Song03041092013-10-15 19:38:51 -07002428 /*
Jin Kyu Songb287ff02013-12-04 20:05:55 -08002429 * Check if BND prefix is allowed.
2430 * Other 0xF2 (REPNE/REPNZ) prefix is prohibited.
Jin Kyu Song03041092013-10-15 19:38:51 -07002431 */
Cyrill Gorcunov08359152013-11-09 22:16:11 +04002432 if (!itemp_has(itemp, IF_BND) &&
Jin Kyu Songb287ff02013-12-04 20:05:55 -08002433 (has_prefix(instruction, PPS_REP, P_BND) ||
2434 has_prefix(instruction, PPS_REP, P_NOBND)))
Jin Kyu Song03041092013-10-15 19:38:51 -07002435 return MERR_BADBND;
Jin Kyu Songb287ff02013-12-04 20:05:55 -08002436 else if (itemp_has(itemp, IF_BND) &&
2437 (has_prefix(instruction, PPS_REP, P_REPNE) ||
2438 has_prefix(instruction, PPS_REP, P_REPNZ)))
2439 return MERR_BADREPNE;
Jin Kyu Song03041092013-10-15 19:38:51 -07002440
H. Peter Anvin60926242009-07-26 16:25:38 -07002441 return MOK_GOOD;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002442}
2443
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002444/*
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002445 * Check if ModR/M.mod should/can be 01.
2446 * - EAF_BYTEOFFS is set
2447 * - offset can fit in a byte when EVEX is not used
2448 * - offset can be compressed when EVEX is used
2449 */
Henrik Gramner16d4db32017-04-20 16:02:19 +02002450#define IS_MOD_01() (!(input->eaflags & EAF_WORDOFFS) && \
2451 (ins->rex & REX_EV ? seg == NO_SEG && !forw_ref && \
2452 is_disp8n(input, ins, &output->disp8) : \
2453 input->eaflags & EAF_BYTEOFFS || (o >= -128 && \
2454 o <= 127 && seg == NO_SEG && !forw_ref)))
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002455
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002456static enum ea_type process_ea(operand *input, ea *output, int bits,
H. Peter Anvin8f622462017-04-02 19:02:29 -07002457 int rfield, opflags_t rflags, insn *ins,
2458 const char **errmsg)
H. Peter Anvineba20a72002-04-30 20:53:55 +00002459{
H. Peter Anvinab5bd052010-07-25 12:43:30 -07002460 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002461 int addrbits = ins->addr_size;
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002462 int eaflags = input->eaflags;
H. Peter Anvin1c3277b2008-07-19 21:38:56 -07002463
H. Peter Anvin8f622462017-04-02 19:02:29 -07002464 *errmsg = "invalid effective address"; /* Default error message */
2465
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002466 output->type = EA_SCALAR;
2467 output->rip = false;
Jin Kyu Songdb358a22013-09-20 20:36:19 -07002468 output->disp8 = 0;
H. Peter Anvin99c4ecd2007-08-28 23:06:00 +00002469
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002470 /* REX flags for the rfield operand */
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002471 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002472 /* EVEX.R' flag for the REG operand */
2473 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002474
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002475 if (is_class(REGISTER, input->type)) {
2476 /*
2477 * It's a direct register.
2478 */
Cyrill Gorcunov2124b7b2010-07-25 01:16:33 +04002479 if (!is_register(input->basereg))
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002480 goto err;
Keith Kaniosb7a89542007-04-12 02:40:54 +00002481
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002482 if (!is_reg_class(REG_EA, input->basereg))
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002483 goto err;
H. Peter Anvin70653092007-10-19 14:42:29 -07002484
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002485 /* broadcasting is not available with a direct register operand. */
2486 if (input->decoflags & BRDCAST_MASK) {
H. Peter Anvin8f622462017-04-02 19:02:29 -07002487 *errmsg = "broadcast not allowed with register operand";
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002488 goto err;
2489 }
2490
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002491 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002492 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002493 output->sib_present = false; /* no SIB necessary */
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002494 output->bytes = 0; /* no offset necessary either */
2495 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2496 } else {
2497 /*
2498 * It's a memory reference.
2499 */
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002500
2501 /* Embedded rounding or SAE is not available with a mem ref operand. */
2502 if (input->decoflags & (ER | SAE)) {
H. Peter Anvin8f622462017-04-02 19:02:29 -07002503 *errmsg = "embedded rounding is available only with "
2504 "register-register operations";
2505 goto err;
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002506 }
2507
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002508 if (input->basereg == -1 &&
2509 (input->indexreg == -1 || input->scale == 0)) {
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002510 /*
2511 * It's a pure offset.
2512 */
H. Peter Anvin164d2462017-02-20 02:39:56 -08002513 if (bits == 64 && ((input->type & IP_REL) == IP_REL)) {
H. Peter Anvin8f622462017-04-02 19:02:29 -07002514 if (input->segment == NO_SEG ||
2515 (input->opflags & OPFLAG_RELATIVE)) {
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08002516 nasm_warn(ERR_PASS2, "absolute address can not be RIP-relative");
H. Peter Anvin164d2462017-02-20 02:39:56 -08002517 input->type &= ~IP_REL;
2518 input->type |= MEMORY;
2519 }
Victor van den Elzen0d268fb2010-01-24 21:24:57 +01002520 }
2521
Jin Kyu Song97f6fae2013-12-18 21:28:17 -08002522 if (bits == 64 &&
2523 !(IP_REL & ~input->type) && (eaflags & EAF_MIB)) {
H. Peter Anvine83311c2017-04-06 18:50:28 -07002524 *errmsg = "RIP-relative addressing is prohibited for MIB";
H. Peter Anvin8f622462017-04-02 19:02:29 -07002525 goto err;
Jin Kyu Song97f6fae2013-12-18 21:28:17 -08002526 }
2527
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002528 if (eaflags & EAF_BYTEOFFS ||
2529 (eaflags & EAF_WORDOFFS &&
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03002530 input->disp_size != (addrbits != 16 ? 32 : 16)))
H. Peter Anvin (Intel)80c4f232018-12-14 13:33:24 -08002531 nasm_warn(ERR_PASS1, "displacement size ignored on absolute address");
Victor van den Elzen0d268fb2010-01-24 21:24:57 +01002532
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002533 if (bits == 64 && (~input->type & IP_REL)) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002534 output->sib_present = true;
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002535 output->sib = GEN_SIB(0, 4, 5);
2536 output->bytes = 4;
2537 output->modrm = GEN_MODRM(0, rfield, 4);
2538 output->rip = false;
Chuck Crayne42fe6ce2007-06-03 02:42:41 +00002539 } else {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002540 output->sib_present = false;
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002541 output->bytes = (addrbits != 16 ? 4 : 2);
H. Peter Anvin8f622462017-04-02 19:02:29 -07002542 output->modrm = GEN_MODRM(0, rfield,
2543 (addrbits != 16 ? 5 : 6));
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002544 output->rip = bits == 64;
Chuck Crayne42fe6ce2007-06-03 02:42:41 +00002545 }
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002546 } else {
2547 /*
2548 * It's an indirection.
2549 */
H. Peter Anvine2c80182005-01-15 22:15:51 +00002550 int i = input->indexreg, b = input->basereg, s = input->scale;
H. Peter Anvinab5bd052010-07-25 12:43:30 -07002551 int32_t seg = input->segment;
H. Peter Anvine2c80182005-01-15 22:15:51 +00002552 int hb = input->hintbase, ht = input->hinttype;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002553 int t, it, bt; /* register numbers */
2554 opflags_t x, ix, bx; /* register flags */
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002555
H. Peter Anvine2c80182005-01-15 22:15:51 +00002556 if (s == 0)
2557 i = -1; /* make this easy, at least */
H. Peter Anvin70653092007-10-19 14:42:29 -07002558
Cyrill Gorcunov2124b7b2010-07-25 01:16:33 +04002559 if (is_register(i)) {
H. Peter Anvina4835d42008-05-20 14:21:29 -07002560 it = nasm_regvals[i];
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002561 ix = nasm_reg_flags[i];
2562 } else {
Keith Kaniosb7a89542007-04-12 02:40:54 +00002563 it = -1;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002564 ix = 0;
2565 }
H. Peter Anvin70653092007-10-19 14:42:29 -07002566
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002567 if (is_register(b)) {
H. Peter Anvina4835d42008-05-20 14:21:29 -07002568 bt = nasm_regvals[b];
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002569 bx = nasm_reg_flags[b];
2570 } else {
Keith Kaniosb7a89542007-04-12 02:40:54 +00002571 bt = -1;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002572 bx = 0;
2573 }
H. Peter Anvin70653092007-10-19 14:42:29 -07002574
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002575 /* if either one are a vector register... */
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002576 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
Cyrill Gorcunov167917a2012-09-10 00:19:12 +04002577 opflags_t sok = BITS32 | BITS64;
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002578 int32_t o = input->offset;
2579 int mod, scale, index, base;
2580
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002581 /*
2582 * For a vector SIB, one has to be a vector and the other,
2583 * if present, a GPR. The vector must be the index operand.
2584 */
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002585 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002586 if (s == 0)
2587 s = 1;
2588 else if (s != 1)
2589 goto err;
2590
2591 t = bt, bt = it, it = t;
2592 x = bx, bx = ix, ix = x;
2593 }
2594
2595 if (bt != -1) {
2596 if (REG_GPR & ~bx)
2597 goto err;
2598 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2599 sok &= bx;
2600 else
2601 goto err;
2602 }
2603
2604 /*
2605 * While we're here, ensure the user didn't specify
2606 * WORD or QWORD
2607 */
2608 if (input->disp_size == 16 || input->disp_size == 64)
2609 goto err;
2610
2611 if (addrbits == 16 ||
2612 (addrbits == 32 && !(sok & BITS32)) ||
2613 (addrbits == 64 && !(sok & BITS64)))
2614 goto err;
2615
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002616 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2617 : ((ix & YMMREG & ~REG_EA)
2618 ? EA_YMMVSIB : EA_XMMVSIB));
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002619
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002620 output->rex |= rexflags(it, ix, REX_X);
2621 output->rex |= rexflags(bt, bx, REX_B);
2622 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002623
2624 index = it & 7; /* it is known to be != -1 */
2625
2626 switch (s) {
2627 case 1:
2628 scale = 0;
2629 break;
2630 case 2:
2631 scale = 1;
2632 break;
2633 case 4:
2634 scale = 2;
2635 break;
2636 case 8:
2637 scale = 3;
2638 break;
2639 default: /* then what the smeg is it? */
2640 goto err; /* panic */
2641 }
H. Peter Anvina77692b2016-09-20 14:04:33 -07002642
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002643 if (bt == -1) {
2644 base = 5;
2645 mod = 0;
2646 } else {
2647 base = (bt & 7);
2648 if (base != REG_NUM_EBP && o == 0 &&
2649 seg == NO_SEG && !forw_ref &&
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002650 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002651 mod = 0;
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002652 else if (IS_MOD_01())
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002653 mod = 1;
2654 else
2655 mod = 2;
2656 }
2657
2658 output->sib_present = true;
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002659 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2660 output->modrm = GEN_MODRM(mod, rfield, 4);
2661 output->sib = GEN_SIB(scale, index, base);
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002662 } else if ((ix|bx) & (BITS32|BITS64)) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002663 /*
2664 * it must be a 32/64-bit memory reference. Firstly we have
2665 * to check that all registers involved are type E/Rxx.
2666 */
Cyrill Gorcunov167917a2012-09-10 00:19:12 +04002667 opflags_t sok = BITS32 | BITS64;
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002668 int32_t o = input->offset;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002669
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002670 if (it != -1) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002671 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2672 sok &= ix;
2673 else
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002674 goto err;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002675 }
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002676
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002677 if (bt != -1) {
2678 if (REG_GPR & ~bx)
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002679 goto err; /* Invalid register */
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002680 if (~sok & bx & SIZE_MASK)
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002681 goto err; /* Invalid size */
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002682 sok &= bx;
2683 }
H. Peter Anvin70653092007-10-19 14:42:29 -07002684
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002685 /*
2686 * While we're here, ensure the user didn't specify
2687 * WORD or QWORD
2688 */
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002689 if (input->disp_size == 16 || input->disp_size == 64)
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002690 goto err;
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002691
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002692 if (addrbits == 16 ||
2693 (addrbits == 32 && !(sok & BITS32)) ||
2694 (addrbits == 64 && !(sok & BITS64)))
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002695 goto err;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002696
Keith Kaniosb7a89542007-04-12 02:40:54 +00002697 /* now reorganize base/index */
2698 if (s == 1 && bt != it && bt != -1 && it != -1 &&
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002699 ((hb == b && ht == EAH_NOTBASE) ||
2700 (hb == i && ht == EAH_MAKEBASE))) {
2701 /* swap if hints say so */
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002702 t = bt, bt = it, it = t;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002703 x = bx, bx = ix, ix = x;
2704 }
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002705
Jin Kyu Song164d6072013-10-15 19:10:13 -07002706 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002707 /* make single reg base, unless hint */
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002708 bt = it, bx = ix, it = -1, ix = 0;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002709 }
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002710 if (eaflags & EAF_MIB) {
2711 /* only for mib operands */
2712 if (it == -1 && (hb == b && ht == EAH_NOTBASE)) {
2713 /*
2714 * make a single reg index [reg*1].
2715 * gas uses this form for an explicit index register.
2716 */
2717 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2718 }
2719 if ((ht == EAH_SUMMED) && bt == -1) {
2720 /* separate once summed index into [base, index] */
2721 bt = it, bx = ix, s--;
2722 }
2723 } else {
2724 if (((s == 2 && it != REG_NUM_ESP &&
Jin Kyu Song3d06af22013-12-18 21:28:41 -08002725 (!(eaflags & EAF_TIMESTWO) || (ht == EAH_SUMMED))) ||
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002726 s == 3 || s == 5 || s == 9) && bt == -1) {
2727 /* convert 3*EAX to EAX+2*EAX */
2728 bt = it, bx = ix, s--;
2729 }
2730 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
Jin Kyu Song26ddad62013-12-18 22:01:14 -08002731 (eaflags & EAF_TIMESTWO) &&
2732 (hb == b && ht == EAH_NOTBASE)) {
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002733 /*
Jin Kyu Song26ddad62013-12-18 22:01:14 -08002734 * convert [NOSPLIT EAX*1]
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002735 * to sib format with 0x0 displacement - [EAX*1+0].
2736 */
2737 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2738 }
2739 }
Keith Kanios48af1772007-08-17 07:37:52 +00002740 if (s == 1 && it == REG_NUM_ESP) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002741 /* swap ESP into base if scale is 1 */
Keith Kaniosb7a89542007-04-12 02:40:54 +00002742 t = it, it = bt, bt = t;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002743 x = ix, ix = bx, bx = x;
2744 }
2745 if (it == REG_NUM_ESP ||
2746 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002747 goto err; /* wrong, for various reasons */
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002748
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002749 output->rex |= rexflags(it, ix, REX_X);
2750 output->rex |= rexflags(bt, bx, REX_B);
Keith Kaniosb7a89542007-04-12 02:40:54 +00002751
Keith Kanios48af1772007-08-17 07:37:52 +00002752 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002753 /* no SIB needed */
H. Peter Anvine2c80182005-01-15 22:15:51 +00002754 int mod, rm;
H. Peter Anvin70653092007-10-19 14:42:29 -07002755
Keith Kaniosb7a89542007-04-12 02:40:54 +00002756 if (bt == -1) {
H. Peter Anvine2c80182005-01-15 22:15:51 +00002757 rm = 5;
H. Peter Anvine2c80182005-01-15 22:15:51 +00002758 mod = 0;
Keith Kaniosb7a89542007-04-12 02:40:54 +00002759 } else {
2760 rm = (bt & 7);
H. Peter Anvinab5bd052010-07-25 12:43:30 -07002761 if (rm != REG_NUM_EBP && o == 0 &&
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002762 seg == NO_SEG && !forw_ref &&
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002763 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
Keith Kaniosb7a89542007-04-12 02:40:54 +00002764 mod = 0;
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002765 else if (IS_MOD_01())
Keith Kaniosb7a89542007-04-12 02:40:54 +00002766 mod = 1;
2767 else
2768 mod = 2;
2769 }
H. Peter Anvinea838272002-04-30 20:51:53 +00002770
H. Peter Anvin6867acc2007-10-10 14:58:45 -07002771 output->sib_present = false;
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002772 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2773 output->modrm = GEN_MODRM(mod, rfield, rm);
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002774 } else {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002775 /* we need a SIB */
H. Peter Anvine2c80182005-01-15 22:15:51 +00002776 int mod, scale, index, base;
H. Peter Anvin70653092007-10-19 14:42:29 -07002777
Keith Kaniosb7a89542007-04-12 02:40:54 +00002778 if (it == -1)
2779 index = 4, s = 1;
2780 else
2781 index = (it & 7);
H. Peter Anvin70653092007-10-19 14:42:29 -07002782
H. Peter Anvine2c80182005-01-15 22:15:51 +00002783 switch (s) {
2784 case 1:
2785 scale = 0;
2786 break;
2787 case 2:
2788 scale = 1;
2789 break;
2790 case 4:
2791 scale = 2;
2792 break;
2793 case 8:
2794 scale = 3;
2795 break;
2796 default: /* then what the smeg is it? */
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002797 goto err; /* panic */
H. Peter Anvine2c80182005-01-15 22:15:51 +00002798 }
H. Peter Anvin70653092007-10-19 14:42:29 -07002799
Keith Kaniosb7a89542007-04-12 02:40:54 +00002800 if (bt == -1) {
2801 base = 5;
2802 mod = 0;
2803 } else {
2804 base = (bt & 7);
H. Peter Anvinab5bd052010-07-25 12:43:30 -07002805 if (base != REG_NUM_EBP && o == 0 &&
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002806 seg == NO_SEG && !forw_ref &&
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002807 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
Keith Kaniosb7a89542007-04-12 02:40:54 +00002808 mod = 0;
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002809 else if (IS_MOD_01())
Keith Kaniosb7a89542007-04-12 02:40:54 +00002810 mod = 1;
2811 else
2812 mod = 2;
2813 }
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002814
H. Peter Anvin6867acc2007-10-10 14:58:45 -07002815 output->sib_present = true;
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002816 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2817 output->modrm = GEN_MODRM(mod, rfield, 4);
2818 output->sib = GEN_SIB(scale, index, base);
H. Peter Anvine2c80182005-01-15 22:15:51 +00002819 }
2820 } else { /* it's 16-bit */
2821 int mod, rm;
H. Peter Anvinab5bd052010-07-25 12:43:30 -07002822 int16_t o = input->offset;
H. Peter Anvin70653092007-10-19 14:42:29 -07002823
Keith Kaniosb7a89542007-04-12 02:40:54 +00002824 /* check for 64-bit long mode */
2825 if (addrbits == 64)
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002826 goto err;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002827
H. Peter Anvine2c80182005-01-15 22:15:51 +00002828 /* check all registers are BX, BP, SI or DI */
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002829 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
2830 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002831 goto err;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002832
Keith Kaniosb7a89542007-04-12 02:40:54 +00002833 /* ensure the user didn't specify DWORD/QWORD */
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002834 if (input->disp_size == 32 || input->disp_size == 64)
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002835 goto err;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002836
H. Peter Anvine2c80182005-01-15 22:15:51 +00002837 if (s != 1 && i != -1)
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002838 goto err; /* no can do, in 16-bit EA */
H. Peter Anvine2c80182005-01-15 22:15:51 +00002839 if (b == -1 && i != -1) {
2840 int tmp = b;
2841 b = i;
2842 i = tmp;
2843 } /* swap */
2844 if ((b == R_SI || b == R_DI) && i != -1) {
2845 int tmp = b;
2846 b = i;
2847 i = tmp;
2848 }
2849 /* have BX/BP as base, SI/DI index */
2850 if (b == i)
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002851 goto err; /* shouldn't ever happen, in theory */
H. Peter Anvine2c80182005-01-15 22:15:51 +00002852 if (i != -1 && b != -1 &&
2853 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002854 goto err; /* invalid combinations */
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002855 if (b == -1) /* pure offset: handled above */
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002856 goto err; /* so if it gets to here, panic! */
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002857
H. Peter Anvine2c80182005-01-15 22:15:51 +00002858 rm = -1;
2859 if (i != -1)
2860 switch (i * 256 + b) {
2861 case R_SI * 256 + R_BX:
2862 rm = 0;
2863 break;
2864 case R_DI * 256 + R_BX:
2865 rm = 1;
2866 break;
2867 case R_SI * 256 + R_BP:
2868 rm = 2;
2869 break;
2870 case R_DI * 256 + R_BP:
2871 rm = 3;
2872 break;
2873 } else
2874 switch (b) {
2875 case R_SI:
2876 rm = 4;
2877 break;
2878 case R_DI:
2879 rm = 5;
2880 break;
2881 case R_BP:
2882 rm = 6;
2883 break;
2884 case R_BX:
2885 rm = 7;
2886 break;
2887 }
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002888 if (rm == -1) /* can't happen, in theory */
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002889 goto err; /* so panic if it does */
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002890
H. Peter Anvinab5bd052010-07-25 12:43:30 -07002891 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
Jin Kyu Song4360ba22013-12-10 16:24:45 -08002892 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
H. Peter Anvine2c80182005-01-15 22:15:51 +00002893 mod = 0;
Jin Kyu Songcc1dc9d2013-08-15 19:01:25 -07002894 else if (IS_MOD_01())
H. Peter Anvine2c80182005-01-15 22:15:51 +00002895 mod = 1;
2896 else
2897 mod = 2;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002898
H. Peter Anvin6867acc2007-10-10 14:58:45 -07002899 output->sib_present = false; /* no SIB - it's 16-bit */
Cyrill Gorcunov10734c72011-08-29 00:07:17 +04002900 output->bytes = mod; /* bytes of offset needed */
2901 output->modrm = GEN_MODRM(mod, rfield, rm);
H. Peter Anvine2c80182005-01-15 22:15:51 +00002902 }
2903 }
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002904 }
H. Peter Anvin70653092007-10-19 14:42:29 -07002905
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002906 output->size = 1 + output->sib_present + output->bytes;
H. Peter Anvin3089f7e2011-06-22 18:19:28 -07002907 return output->type;
2908
2909err:
2910 return output->type = EA_INVALID;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002911}
2912
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002913static void add_asp(insn *ins, int addrbits)
H. Peter Anvineba20a72002-04-30 20:53:55 +00002914{
H. Peter Anvinc5b9ce02007-09-22 21:49:51 -07002915 int j, valid;
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002916 int defdisp;
Keith Kaniosb7a89542007-04-12 02:40:54 +00002917
H. Peter Anvinc5b9ce02007-09-22 21:49:51 -07002918 valid = (addrbits == 64) ? 64|32 : 32|16;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002919
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002920 switch (ins->prefixes[PPS_ASIZE]) {
2921 case P_A16:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002922 valid &= 16;
2923 break;
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002924 case P_A32:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002925 valid &= 32;
2926 break;
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002927 case P_A64:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002928 valid &= 64;
2929 break;
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002930 case P_ASP:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002931 valid &= (addrbits == 32) ? 16 : 32;
2932 break;
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002933 default:
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002934 break;
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002935 }
2936
2937 for (j = 0; j < ins->operands; j++) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002938 if (is_class(MEMORY, ins->oprs[j].type)) {
2939 opflags_t i, b;
H. Peter Anvin70653092007-10-19 14:42:29 -07002940
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002941 /* Verify as Register */
Cyrill Gorcunov2124b7b2010-07-25 01:16:33 +04002942 if (!is_register(ins->oprs[j].indexreg))
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002943 i = 0;
2944 else
2945 i = nasm_reg_flags[ins->oprs[j].indexreg];
H. Peter Anvin70653092007-10-19 14:42:29 -07002946
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002947 /* Verify as Register */
Cyrill Gorcunov2124b7b2010-07-25 01:16:33 +04002948 if (!is_register(ins->oprs[j].basereg))
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002949 b = 0;
2950 else
2951 b = nasm_reg_flags[ins->oprs[j].basereg];
H. Peter Anvin70653092007-10-19 14:42:29 -07002952
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002953 if (ins->oprs[j].scale == 0)
2954 i = 0;
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00002955
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002956 if (!i && !b) {
2957 int ds = ins->oprs[j].disp_size;
2958 if ((addrbits != 64 && ds > 8) ||
2959 (addrbits == 64 && ds == 16))
2960 valid &= ds;
2961 } else {
2962 if (!(REG16 & ~b))
2963 valid &= 16;
2964 if (!(REG32 & ~b))
2965 valid &= 32;
2966 if (!(REG64 & ~b))
2967 valid &= 64;
H. Peter Anvin70653092007-10-19 14:42:29 -07002968
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002969 if (!(REG16 & ~i))
2970 valid &= 16;
2971 if (!(REG32 & ~i))
2972 valid &= 32;
2973 if (!(REG64 & ~i))
2974 valid &= 64;
2975 }
2976 }
H. Peter Anvinc5b9ce02007-09-22 21:49:51 -07002977 }
2978
2979 if (valid & addrbits) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002980 ins->addr_size = addrbits;
H. Peter Anvinc5b9ce02007-09-22 21:49:51 -07002981 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002982 /* Add an address size prefix */
Cyrill Gorcunovd6851d42011-09-25 18:01:45 +04002983 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002984 ins->addr_size = (addrbits == 32) ? 16 : 32;
H. Peter Anvin3df97a72007-05-30 03:25:21 +00002985 } else {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002986 /* Impossible... */
Cyrill Gorcunov00526d92018-11-25 01:32:22 +03002987 nasm_nonfatal("impossible combination of address sizes");
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002988 ins->addr_size = addrbits; /* Error recovery */
H. Peter Anvinde4b89b2007-10-01 15:41:25 -07002989 }
2990
2991 defdisp = ins->addr_size == 16 ? 16 : 32;
2992
2993 for (j = 0; j < ins->operands; j++) {
Cyrill Gorcunovd6f31242010-07-26 23:14:40 +04002994 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
2995 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
2996 /*
2997 * mem_offs sizes must match the address size; if not,
2998 * strip the MEM_OFFS bit and match only EA instructions
2999 */
3000 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);
3001 }
H. Peter Anvin3df97a72007-05-30 03:25:21 +00003002 }
H. Peter Anvinea6e34d2002-04-30 20:51:32 +00003003}