Bug 476331 - clean up generated/distributed filter scripts
[valgrind.git] / VEX / priv / host_nanomips_defs.h
blob87219dab530ed907a2c875c9586f88c93a2ad2c2
2 /*---------------------------------------------------------------*/
3 /*--- begin host_nanomips_defs.h ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2017-2018 RT-RK
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25 02111-1307, USA.
27 The GNU General Public License is contained in the file COPYING.
30 #ifndef __VEX_HOST_NANOMIPS_DEFS_H
31 #define __VEX_HOST_NANOMIPS_DEFS_H
33 #include "libvex_basictypes.h"
34 #include "libvex.h" /* VexArch */
35 #include "host_generic_regs.h" /* HReg */
36 #include "common_nanomips_defs.h"
38 /* --------- Registers. --------- */
40 #define ST_IN static inline
42 #define GPR(_enc, _ix) \
43 mkHReg(False, HRcInt32, (_enc), (_ix))
45 ST_IN HReg hregNANOMIPS_GPR16(void) {
46 return GPR(16, 0);
48 ST_IN HReg hregNANOMIPS_GPR17(void) {
49 return GPR(17, 1);
51 ST_IN HReg hregNANOMIPS_GPR18(void) {
52 return GPR(18, 2);
54 ST_IN HReg hregNANOMIPS_GPR19(void) {
55 return GPR(19, 3);
57 ST_IN HReg hregNANOMIPS_GPR20(void) {
58 return GPR(20, 4);
60 ST_IN HReg hregNANOMIPS_GPR21(void) {
61 return GPR(21, 5);
63 ST_IN HReg hregNANOMIPS_GPR22(void) {
64 return GPR(22, 6);
67 ST_IN HReg hregNANOMIPS_GPR12(void) {
68 return GPR(12, 7);
70 ST_IN HReg hregNANOMIPS_GPR13(void) {
71 return GPR(13, 8);
73 ST_IN HReg hregNANOMIPS_GPR14(void) {
74 return GPR(14, 9);
76 ST_IN HReg hregNANOMIPS_GPR15(void) {
77 return GPR(15, 10);
79 ST_IN HReg hregNANOMIPS_GPR24(void) {
80 return GPR(24, 11);
83 ST_IN HReg hregNANOMIPS_GPR0(void) {
84 return GPR( 0, 12);
86 ST_IN HReg hregNANOMIPS_GPR1(void) {
87 return GPR( 1, 13);
89 ST_IN HReg hregNANOMIPS_GPR2(void) {
90 return GPR( 2, 14);
92 ST_IN HReg hregNANOMIPS_GPR3(void) {
93 return GPR( 3, 15);
95 ST_IN HReg hregNANOMIPS_GPR4(void) {
96 return GPR( 4, 16);
98 ST_IN HReg hregNANOMIPS_GPR5(void) {
99 return GPR( 5, 17);
101 ST_IN HReg hregNANOMIPS_GPR6(void) {
102 return GPR( 6, 18);
104 ST_IN HReg hregNANOMIPS_GPR7(void) {
105 return GPR( 7, 19);
107 ST_IN HReg hregNANOMIPS_GPR8(void) {
108 return GPR( 8, 20);
110 ST_IN HReg hregNANOMIPS_GPR9(void) {
111 return GPR( 9, 21);
113 ST_IN HReg hregNANOMIPS_GPR10(void) {
114 return GPR(10, 22);
116 ST_IN HReg hregNANOMIPS_GPR11(void) {
117 return GPR(11, 23);
119 ST_IN HReg hregNANOMIPS_GPR23(void) {
120 return GPR(23, 24);
122 ST_IN HReg hregNANOMIPS_GPR25(void) {
123 return GPR(25, 25);
125 ST_IN HReg hregNANOMIPS_GPR29(void) {
126 return GPR(29, 26);
128 ST_IN HReg hregNANOMIPS_GPR31(void) {
129 return GPR(31, 27);
132 #undef ST_IN
133 #undef GPR
135 #undef GuestStatePointer
136 #undef StackFramePointer
137 #undef StackPointer
138 #undef Zero
140 #define GuestStatePointer hregNANOMIPS_GPR23()
141 #define StackFramePointer hregNANOMIPS_GPR30()
142 #define StackPointer hregNANOMIPS_GPR29()
143 #define Zero hregNANOMIPS_GPR0()
145 /* Num registers used for function calls */
146 /* a0, a1, a2, a3, a4, a5, a6, a7 */
147 # define NANOMIPS_N_REGPARMS 8
149 typedef enum {
150 NMin_Imm, /* Operation with word and imm (fake insn). */
151 NMin_Unary, /* Unary ops: clo, clz, neg and nop. */
152 NMin_Alu, /* Binary ops: add/sub/and/or/xor/nor/mul/div. */
153 NMin_Cmp, /* Word compare (fake insn). */
154 NMin_Call, /* Call to address in register. */
156 /* The following 5 insns are mandated by translation chaining */
157 NMin_XDirect, /* Direct transfer to GA. */
158 NMin_XIndir, /* Indirect transfer to GA. */
159 NMin_XAssisted, /* Assisted transfer to GA. */
160 NMin_EvCheck, /* Event check. */
161 NMin_ProfInc, /* 64-bit profile counter increment. */
163 NMin_Load, /* Sign-extending load a 8|16|32 bit value from mem. */
164 NMin_Store, /* Store a 8|16|32 bit value to mem. */
165 NMin_Cas, /* Compare and swap. */
166 NMin_LoadL, /* Mips Load Linked Word - LL. */
167 NMin_StoreC, /* Mips Store Conditional Word - SC. */
168 NMin_MoveCond, /* Move Conditional. */
169 } NANOMIPSInstrTag;
171 typedef enum {
172 NMimm_INVALID = -1, /* Invalid / unknown op */
173 NMimm_SLL = 0x00, /* Shift left */
174 NMimm_SRL = 0x02, /* Logic shift right */
175 NMimm_LI = 0x03, /* Load immediate */
176 NMimm_SRA = 0x04, /* Arithetic shift right */
177 NMimm_SGN = 0x05, /* Sign extend from imm bits */
178 NMimm_ORI = 0x06, /* Logical or */
179 NMimm_XORI = 0x07, /* Logical xor */
180 NMimm_ANDI = 0x08, /* Logical and */
181 NMimm_ROTX = 0x09, /* Rotx */
182 } NANOMIPSImmOp;
184 typedef enum {
185 NMun_CLO,
186 NMun_CLZ,
187 NMun_NOP,
188 } NANOMIPSUnaryOp;
190 typedef enum {
191 NMalu_INVALID = -1,
192 NMalu_SLL = NMimm_SLL,
193 NMalu_SRL = NMimm_SRL,
194 NMalu_SRA = NMimm_SRA,
195 NMalu_OR = NMimm_ORI,
196 NMalu_XOR = NMimm_XORI,
197 NMalu_AND = NMimm_ANDI,
198 NMalu_ADD,
199 NMalu_SUB,
200 NMalu_SLT,
201 NMalu_NOR,
202 NMalu_MUL,
203 NMalu_MULU,
204 NMalu_MUH,
205 NMalu_MUHU,
206 NMalu_DIV,
207 NMalu_DIVU,
208 NMalu_MOD,
209 NMalu_MODU,
210 } NANOMIPSAluOp;
212 typedef enum {
213 NMcc_INVALID, /* Invalid or unknown condition */
214 NMcc_EQ, /* equal */
215 NMcc_NE, /* not equal */
217 NMcc_LTS, /* signed less than */
218 NMcc_LTU, /* unsigned less than */
220 NMcc_LES, /* signed less than or equal */
221 NMcc_LEU, /* unsigned less than or equal */
223 NMcc_AL, /* always (unconditional) */
224 NMcc_NV, /* never (unconditional) */
225 } NANOMIPSCondCode;
227 typedef enum {
228 NMMoveCond_movn /* Move Conditional on Not Zero */
229 } NANOMIPSMoveCondOp;
231 typedef struct {
232 NANOMIPSInstrTag tag;
233 union {
234 struct {
235 NANOMIPSImmOp op;
236 HReg src;
237 HReg dst;
238 UInt imm;
239 } Imm;
240 struct {
241 NANOMIPSAluOp op;
242 HReg dst;
243 HReg srcL;
244 HReg srcR;
245 } Alu;
246 /* Clz, Clo, not, nop */
247 struct {
248 NANOMIPSUnaryOp op;
249 HReg dst;
250 HReg src;
251 } Unary;
252 /* Word compare. Fake instruction, used for basic block ending. */
253 struct {
254 HReg dst;
255 HReg srcL;
256 HReg srcR;
257 NANOMIPSCondCode cond;
258 } Cmp;
259 /* Pseudo-insn. Call target (an absolute address), on given
260 condition. */
261 struct {
262 Addr target;
263 UInt argiregs;
264 HReg guard;
265 RetLoc rloc; /* Where the return value will be */
266 } Call;
267 /* Update the guest EIP value, then exit requesting to chain
268 to it. May be conditional. */
269 struct {
270 Addr dstGA; /* next guest address */
271 HReg addr; /* Address register */
272 Int addr_offset; /* Offset */
273 HReg cond; /* Condition */
274 Bool toFastEP; /* Chain to the slow or fast point? */
275 } XDirect;
276 /* Boring transfer to a guest address not known at JIT time.
277 Not chainable. May be conditional. */
278 struct {
279 HReg dstGA;
280 HReg addr; /* Address register */
281 Int addr_offset; /* Offset */
282 HReg cond; /* Condition */
283 } XIndir;
284 /* Assisted transfer to a guest address, most general case.
285 Not chainable. May be conditional. */
286 struct {
287 HReg dstGA;
288 HReg addr; /* Address register */
289 Int addr_offset; /* Offset */
290 HReg cond; /* Condition */
291 IRJumpKind jk; /* Jump kind */
292 } XAssisted;
293 struct {
294 HReg r_amCounter;
295 Int offset_amCounter;
296 HReg r_amFailAddr;
297 Int offset_amFailAddr;
298 } EvCheck;
299 struct {
300 /* No fields. The address of the counter to inc is
301 installed later, post-translation, by patching it in,
302 as it is not known at translation time. */
303 } ProfInc;
304 /* Sign extending loads. Dst size is host word size */
305 struct {
306 UChar sz; /* Must be 4 bytes for now. */
307 HReg dst; /* Destionation register */
308 HReg addr; /* Address register */
309 Int addr_offset; /* Offset */
310 } Load;
311 struct {
312 UChar sz; /* Must be 4 bytes for now. */
313 HReg addr; /* Address register */
314 Int addr_offset; /* Offset */
315 HReg src; /* Source register */
316 } Store;
317 struct {
318 UChar sz; /* Must be 4 bytes for now. */
319 HReg oldHi;
320 HReg oldLo;
321 HReg addr;
322 HReg expdHi;
323 HReg expdLo;
324 HReg dataHi;
325 HReg dataLo;
326 } Cas;
327 struct {
328 UChar sz; /* Must be 4 bytes for now. */
329 HReg dst; /* Destination register */
330 HReg addr; /* Address register */
331 Int addr_offset; /* Offset */
332 } LoadL;
333 struct {
334 UChar sz; /* Must be 4 bytes for now. */
335 HReg addr; /* Address register */
336 Int addr_offset; /* Offset */
337 HReg src; /* Sorce register */
338 } StoreC;
339 /* Conditional move. */
340 struct {
341 NANOMIPSMoveCondOp op;
342 HReg dst;
343 HReg src;
344 HReg cond;
345 } MoveCond;
346 } NMin;
347 } NANOMIPSInstr;
349 extern NANOMIPSInstr *NANOMIPSInstr_Imm(NANOMIPSImmOp, HReg, HReg, UInt);
350 extern NANOMIPSInstr *NANOMIPSInstr_Unary(NANOMIPSUnaryOp op, HReg dst,
351 HReg src);
352 extern NANOMIPSInstr *NANOMIPSInstr_Alu(NANOMIPSAluOp, HReg, HReg, HReg);
353 extern NANOMIPSInstr *NANOMIPSInstr_Cmp(NANOMIPSCondCode, HReg, HReg, HReg);
354 extern NANOMIPSInstr *NANOMIPSInstr_Call(Addr, UInt, HReg, RetLoc);
355 extern NANOMIPSInstr *NANOMIPSInstr_XDirect(Addr64 dstGA, HReg, Int,
356 HReg cond, Bool toFastEP);
357 extern NANOMIPSInstr *NANOMIPSInstr_XIndir(HReg dstGA, HReg, Int,
358 HReg cond);
359 extern NANOMIPSInstr *NANOMIPSInstr_XAssisted(HReg dstGA, HReg, Int,
360 HReg cond, IRJumpKind jk);
361 extern NANOMIPSInstr *NANOMIPSInstr_EvCheck(HReg, Int, HReg, Int);
362 extern NANOMIPSInstr *NANOMIPSInstr_ProfInc(void);
363 extern NANOMIPSInstr *NANOMIPSInstr_Load(UChar sz, HReg dst, HReg src,
364 Int addr_offset);
365 extern NANOMIPSInstr *NANOMIPSInstr_Store(UChar sz, HReg dst, Int addr_offset,
366 HReg src);
367 extern NANOMIPSInstr *NANOMIPSInstr_Cas(UChar sz, HReg oldLo, HReg oldHi, HReg addr,
368 HReg expdLo, HReg expdHi,
369 HReg dataLo, HReg dataHi);
370 extern NANOMIPSInstr *NANOMIPSInstr_LoadL(UChar sz, HReg dst, HReg src,
371 Int addr_offset);
372 extern NANOMIPSInstr *NANOMIPSInstr_StoreC(UChar sz, HReg dst, Int addr_offset,
373 HReg src);
374 extern NANOMIPSInstr *NANOMIPSInstr_MoveCond(NANOMIPSMoveCondOp op, HReg dst,
375 HReg src, HReg cond);
376 extern void ppNANOMIPSInstr(const NANOMIPSInstr *);
377 extern UInt ppHRegNANOMIPS(HReg);
378 extern void getRegUsage_NANOMIPSInstr (HRegUsage *, const NANOMIPSInstr *);
379 extern void mapRegs_NANOMIPSInstr (HRegRemap *, NANOMIPSInstr *);
380 extern void genSpill_NANOMIPS ( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2,
381 HReg rreg, Int offset, Bool mode64);
382 extern void genReload_NANOMIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2,
383 HReg rreg, Int offset, Bool mode64);
384 extern NANOMIPSInstr* genMove_NANOMIPS(HReg from, HReg to);
385 extern HInstrArray *iselSB_NANOMIPS(const IRSB*,
386 VexArch,
387 const VexArchInfo*,
388 const VexAbiInfo*,
389 Int offs_Host_EvC_Counter,
390 Int offs_Host_EvC_FailAddr,
391 Bool chainingAllowed,
392 Bool addProfInc,
393 Addr max_ga);
394 extern Int emit_NANOMIPSInstr (/*MB_MOD*/Bool* is_profInc,
395 UChar* buf, Int nbuf,
396 const NANOMIPSInstr* i,
397 Bool mode64,
398 VexEndness endness_host,
399 const void* disp_cp_chain_me_to_slowEP,
400 const void* disp_cp_chain_me_to_fastEP,
401 const void* disp_cp_xindir,
402 const void* disp_cp_xassisted);
403 /* How big is an event check? This is kind of a kludge because it
404 depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
405 and so assumes that they are both <= 128, and so can use the short
406 offset encoding. This is all checked with assertions, so in the
407 worst case we will merely assert at startup. */
408 extern Int evCheckSzB_NANOMIPS (void);
409 /* Perform a chaining and unchaining of an XDirect jump. */
410 extern VexInvalRange chainXDirect_NANOMIPS (VexEndness endness_host,
411 void* place_to_chain,
412 const void* disp_cp_chain_me_EXPECTED,
413 const void* place_to_jump_to);
414 extern VexInvalRange unchainXDirect_NANOMIPS(VexEndness endness_host,
415 void* place_to_unchain,
416 const void* place_to_jump_to_EXPECTED,
417 const void* disp_cp_chain_me);
418 /* Patch the counter location into an existing ProfInc point. */
419 extern VexInvalRange patchProfInc_NANOMIPS (VexEndness endness_host,
420 void* place_to_patch,
421 const ULong* location_of_counter);
422 extern const RRegUniverse* getRRegUniverse_NANOMIPS (Bool mode64);
424 #endif /* ndef __VEX_HOST_NANOMIPS_DEFS_H */
426 /*---------------------------------------------------------------*/
427 /*--- end host-nanomips_defs.h ---*/
428 /*---------------------------------------------------------------*/