Hexagon (target/hexagon) bit reverse (brev) addressing
[qemu/ar7.git] / target / hexagon / macros.h
blob30c8951c169816d38e4729a9639d9df0d0a3de7d
1 /*
2 * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #ifndef HEXAGON_MACROS_H
19 #define HEXAGON_MACROS_H
21 #include "cpu.h"
22 #include "hex_regs.h"
23 #include "reg_fields.h"
25 #ifdef QEMU_GENERATE
26 #define READ_REG(dest, NUM) gen_read_reg(dest, NUM)
27 #define READ_PREG(dest, NUM) gen_read_preg(dest, (NUM))
28 #else
29 #define READ_REG(NUM) (env->gpr[(NUM)])
30 #define READ_PREG(NUM) (env->pred[NUM])
32 #define WRITE_RREG(NUM, VAL) log_reg_write(env, NUM, VAL, slot)
33 #define WRITE_PREG(NUM, VAL) log_pred_write(env, NUM, VAL)
34 #endif
36 #define PCALIGN 4
37 #define PCALIGN_MASK (PCALIGN - 1)
39 #define GET_FIELD(FIELD, REGIN) \
40 fEXTRACTU_BITS(REGIN, reg_field_info[FIELD].width, \
41 reg_field_info[FIELD].offset)
43 #ifdef QEMU_GENERATE
44 #define GET_USR_FIELD(FIELD, DST) \
45 tcg_gen_extract_tl(DST, hex_gpr[HEX_REG_USR], \
46 reg_field_info[FIELD].offset, \
47 reg_field_info[FIELD].width)
49 #define TYPE_INT(X) __builtin_types_compatible_p(typeof(X), int)
50 #define TYPE_TCGV(X) __builtin_types_compatible_p(typeof(X), TCGv)
51 #define TYPE_TCGV_I64(X) __builtin_types_compatible_p(typeof(X), TCGv_i64)
53 #define SET_USR_FIELD_FUNC(X) \
54 __builtin_choose_expr(TYPE_INT(X), \
55 gen_set_usr_fieldi, \
56 __builtin_choose_expr(TYPE_TCGV(X), \
57 gen_set_usr_field, (void)0))
58 #define SET_USR_FIELD(FIELD, VAL) \
59 SET_USR_FIELD_FUNC(VAL)(FIELD, VAL)
60 #else
61 #define GET_USR_FIELD(FIELD) \
62 fEXTRACTU_BITS(env->gpr[HEX_REG_USR], reg_field_info[FIELD].width, \
63 reg_field_info[FIELD].offset)
65 #define SET_USR_FIELD(FIELD, VAL) \
66 fINSERT_BITS(env->gpr[HEX_REG_USR], reg_field_info[FIELD].width, \
67 reg_field_info[FIELD].offset, (VAL))
68 #endif
70 #ifdef QEMU_GENERATE
72 * Section 5.5 of the Hexagon V67 Programmer's Reference Manual
74 * Slot 1 store with slot 0 load
75 * A slot 1 store operation with a slot 0 load operation can appear in a packet.
76 * The packet attribute :mem_noshuf inhibits the instruction reordering that
77 * would otherwise be done by the assembler. For example:
78 * {
79 * memw(R5) = R2 // slot 1 store
80 * R3 = memh(R6) // slot 0 load
81 * }:mem_noshuf
82 * Unlike most packetized operations, these memory operations are not executed
83 * in parallel (Section 3.3.1). Instead, the store instruction in Slot 1
84 * effectively executes first, followed by the load instruction in Slot 0. If
85 * the addresses of the two operations are overlapping, the load will receive
86 * the newly stored data. This feature is supported in processor versions
87 * V65 or greater.
90 * For qemu, we look for a load in slot 0 when there is a store in slot 1
91 * in the same packet. When we see this, we call a helper that merges the
92 * bytes from the store buffer with the value loaded from memory.
94 #define CHECK_NOSHUF \
95 do { \
96 if (insn->slot == 0 && pkt->pkt_has_store_s1) { \
97 process_store(ctx, pkt, 1); \
98 } \
99 } while (0)
101 #define MEM_LOAD1s(DST, VA) \
102 do { \
103 CHECK_NOSHUF; \
104 tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \
105 } while (0)
106 #define MEM_LOAD1u(DST, VA) \
107 do { \
108 CHECK_NOSHUF; \
109 tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \
110 } while (0)
111 #define MEM_LOAD2s(DST, VA) \
112 do { \
113 CHECK_NOSHUF; \
114 tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \
115 } while (0)
116 #define MEM_LOAD2u(DST, VA) \
117 do { \
118 CHECK_NOSHUF; \
119 tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \
120 } while (0)
121 #define MEM_LOAD4s(DST, VA) \
122 do { \
123 CHECK_NOSHUF; \
124 tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \
125 } while (0)
126 #define MEM_LOAD4u(DST, VA) \
127 do { \
128 CHECK_NOSHUF; \
129 tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \
130 } while (0)
131 #define MEM_LOAD8u(DST, VA) \
132 do { \
133 CHECK_NOSHUF; \
134 tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \
135 } while (0)
137 #define MEM_STORE1_FUNC(X) \
138 __builtin_choose_expr(TYPE_INT(X), \
139 gen_store1i, \
140 __builtin_choose_expr(TYPE_TCGV(X), \
141 gen_store1, (void)0))
142 #define MEM_STORE1(VA, DATA, SLOT) \
143 MEM_STORE1_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT)
145 #define MEM_STORE2_FUNC(X) \
146 __builtin_choose_expr(TYPE_INT(X), \
147 gen_store2i, \
148 __builtin_choose_expr(TYPE_TCGV(X), \
149 gen_store2, (void)0))
150 #define MEM_STORE2(VA, DATA, SLOT) \
151 MEM_STORE2_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT)
153 #define MEM_STORE4_FUNC(X) \
154 __builtin_choose_expr(TYPE_INT(X), \
155 gen_store4i, \
156 __builtin_choose_expr(TYPE_TCGV(X), \
157 gen_store4, (void)0))
158 #define MEM_STORE4(VA, DATA, SLOT) \
159 MEM_STORE4_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT)
161 #define MEM_STORE8_FUNC(X) \
162 __builtin_choose_expr(TYPE_INT(X), \
163 gen_store8i, \
164 __builtin_choose_expr(TYPE_TCGV_I64(X), \
165 gen_store8, (void)0))
166 #define MEM_STORE8(VA, DATA, SLOT) \
167 MEM_STORE8_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT)
168 #else
169 #define MEM_LOAD1s(VA) ((int8_t)mem_load1(env, slot, VA))
170 #define MEM_LOAD1u(VA) ((uint8_t)mem_load1(env, slot, VA))
171 #define MEM_LOAD2s(VA) ((int16_t)mem_load2(env, slot, VA))
172 #define MEM_LOAD2u(VA) ((uint16_t)mem_load2(env, slot, VA))
173 #define MEM_LOAD4s(VA) ((int32_t)mem_load4(env, slot, VA))
174 #define MEM_LOAD4u(VA) ((uint32_t)mem_load4(env, slot, VA))
175 #define MEM_LOAD8s(VA) ((int64_t)mem_load8(env, slot, VA))
176 #define MEM_LOAD8u(VA) ((uint64_t)mem_load8(env, slot, VA))
178 #define MEM_STORE1(VA, DATA, SLOT) log_store32(env, VA, DATA, 1, SLOT)
179 #define MEM_STORE2(VA, DATA, SLOT) log_store32(env, VA, DATA, 2, SLOT)
180 #define MEM_STORE4(VA, DATA, SLOT) log_store32(env, VA, DATA, 4, SLOT)
181 #define MEM_STORE8(VA, DATA, SLOT) log_store64(env, VA, DATA, 8, SLOT)
182 #endif
184 #define CANCEL cancel_slot(env, slot)
186 #define LOAD_CANCEL(EA) do { CANCEL; } while (0)
188 #ifdef QEMU_GENERATE
189 static inline void gen_pred_cancel(TCGv pred, int slot_num)
191 TCGv slot_mask = tcg_const_tl(1 << slot_num);
192 TCGv tmp = tcg_temp_new();
193 TCGv zero = tcg_const_tl(0);
194 TCGv one = tcg_const_tl(1);
195 tcg_gen_or_tl(slot_mask, hex_slot_cancelled, slot_mask);
196 tcg_gen_andi_tl(tmp, pred, 1);
197 tcg_gen_movcond_tl(TCG_COND_EQ, hex_slot_cancelled, tmp, zero,
198 slot_mask, hex_slot_cancelled);
199 tcg_temp_free(slot_mask);
200 tcg_temp_free(tmp);
201 tcg_temp_free(zero);
202 tcg_temp_free(one);
204 #define PRED_LOAD_CANCEL(PRED, EA) \
205 gen_pred_cancel(PRED, insn->is_endloop ? 4 : insn->slot)
206 #endif
208 #define STORE_CANCEL(EA) { env->slot_cancelled |= (1 << slot); }
210 #define fMAX(A, B) (((A) > (B)) ? (A) : (B))
212 #define fMIN(A, B) (((A) < (B)) ? (A) : (B))
214 #define fABS(A) (((A) < 0) ? (-(A)) : (A))
215 #define fINSERT_BITS(REG, WIDTH, OFFSET, INVAL) \
216 REG = ((WIDTH) ? deposit64(REG, (OFFSET), (WIDTH), (INVAL)) : REG)
217 #define fEXTRACTU_BITS(INREG, WIDTH, OFFSET) \
218 ((WIDTH) ? extract64((INREG), (OFFSET), (WIDTH)) : 0LL)
219 #define fEXTRACTU_BIDIR(INREG, WIDTH, OFFSET) \
220 (fZXTN(WIDTH, 32, fBIDIR_LSHIFTR((INREG), (OFFSET), 4_8)))
221 #define fEXTRACTU_RANGE(INREG, HIBIT, LOWBIT) \
222 (((HIBIT) - (LOWBIT) + 1) ? \
223 extract64((INREG), (LOWBIT), ((HIBIT) - (LOWBIT) + 1)) : \
224 0LL)
226 #define f8BITSOF(VAL) ((VAL) ? 0xff : 0x00)
228 #ifdef QEMU_GENERATE
229 #define fLSBOLD(VAL) tcg_gen_andi_tl(LSB, (VAL), 1)
230 #else
231 #define fLSBOLD(VAL) ((VAL) & 1)
232 #endif
234 #ifdef QEMU_GENERATE
235 #define fLSBNEW(PVAL) tcg_gen_mov_tl(LSB, (PVAL))
236 #define fLSBNEW0 tcg_gen_mov_tl(LSB, hex_new_pred_value[0])
237 #define fLSBNEW1 tcg_gen_mov_tl(LSB, hex_new_pred_value[1])
238 #else
239 #define fLSBNEW(PVAL) (PVAL)
240 #define fLSBNEW0 new_pred_value(env, 0)
241 #define fLSBNEW1 new_pred_value(env, 1)
242 #endif
244 #ifdef QEMU_GENERATE
245 static inline void gen_logical_not(TCGv dest, TCGv src)
247 TCGv one = tcg_const_tl(1);
248 TCGv zero = tcg_const_tl(0);
250 tcg_gen_movcond_tl(TCG_COND_NE, dest, src, zero, zero, one);
252 tcg_temp_free(one);
253 tcg_temp_free(zero);
255 #define fLSBOLDNOT(VAL) \
256 do { \
257 tcg_gen_andi_tl(LSB, (VAL), 1); \
258 tcg_gen_xori_tl(LSB, LSB, 1); \
259 } while (0)
260 #define fLSBNEWNOT(PNUM) \
261 gen_logical_not(LSB, (PNUM))
262 #else
263 #define fLSBNEWNOT(PNUM) (!fLSBNEW(PNUM))
264 #define fLSBOLDNOT(VAL) (!fLSBOLD(VAL))
265 #define fLSBNEW0NOT (!fLSBNEW0)
266 #define fLSBNEW1NOT (!fLSBNEW1)
267 #endif
269 #define fNEWREG(VAL) ((int32_t)(VAL))
271 #define fNEWREG_ST(VAL) (VAL)
273 #define fSATUVALN(N, VAL) \
274 ({ \
275 fSET_OVERFLOW(); \
276 ((VAL) < 0) ? 0 : ((1LL << (N)) - 1); \
278 #define fSATVALN(N, VAL) \
279 ({ \
280 fSET_OVERFLOW(); \
281 ((VAL) < 0) ? (-(1LL << ((N) - 1))) : ((1LL << ((N) - 1)) - 1); \
283 #define fZXTN(N, M, VAL) (((N) != 0) ? extract64((VAL), 0, (N)) : 0LL)
284 #define fSXTN(N, M, VAL) (((N) != 0) ? sextract64((VAL), 0, (N)) : 0LL)
285 #define fSATN(N, VAL) \
286 ((fSXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATVALN(N, VAL))
287 #define fADDSAT64(DST, A, B) \
288 do { \
289 uint64_t __a = fCAST8u(A); \
290 uint64_t __b = fCAST8u(B); \
291 uint64_t __sum = __a + __b; \
292 uint64_t __xor = __a ^ __b; \
293 const uint64_t __mask = 0x8000000000000000ULL; \
294 if (__xor & __mask) { \
295 DST = __sum; \
297 else if ((__a ^ __sum) & __mask) { \
298 if (__sum & __mask) { \
299 DST = 0x7FFFFFFFFFFFFFFFLL; \
300 fSET_OVERFLOW(); \
301 } else { \
302 DST = 0x8000000000000000LL; \
303 fSET_OVERFLOW(); \
305 } else { \
306 DST = __sum; \
308 } while (0)
309 #define fSATUN(N, VAL) \
310 ((fZXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATUVALN(N, VAL))
311 #define fSATH(VAL) (fSATN(16, VAL))
312 #define fSATUH(VAL) (fSATUN(16, VAL))
313 #define fSATUB(VAL) (fSATUN(8, VAL))
314 #define fSATB(VAL) (fSATN(8, VAL))
315 #define fIMMEXT(IMM) (IMM = IMM)
316 #define fMUST_IMMEXT(IMM) fIMMEXT(IMM)
318 #define fPCALIGN(IMM) IMM = (IMM & ~PCALIGN_MASK)
320 #ifdef QEMU_GENERATE
321 static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift)
324 * Section 2.2.4 of the Hexagon V67 Programmer's Reference Manual
326 * The "I" value from a modifier register is divided into two pieces
327 * LSB bits 23:17
328 * MSB bits 31:28
329 * The value is signed
331 * At the end we shift the result according to the shift argument
333 TCGv msb = tcg_temp_new();
334 TCGv lsb = tcg_temp_new();
336 tcg_gen_extract_tl(lsb, val, 17, 7);
337 tcg_gen_sari_tl(msb, val, 21);
338 tcg_gen_deposit_tl(result, msb, lsb, 0, 7);
340 tcg_gen_shli_tl(result, result, shift);
342 tcg_temp_free(msb);
343 tcg_temp_free(lsb);
345 return result;
347 #define fREAD_IREG(VAL, SHIFT) gen_read_ireg(ireg, (VAL), (SHIFT))
348 #else
349 #define fREAD_IREG(VAL) \
350 (fSXTN(11, 64, (((VAL) & 0xf0000000) >> 21) | ((VAL >> 17) & 0x7f)))
351 #endif
353 #define fREAD_LR() (READ_REG(HEX_REG_LR))
355 #define fWRITE_LR(A) WRITE_RREG(HEX_REG_LR, A)
356 #define fWRITE_FP(A) WRITE_RREG(HEX_REG_FP, A)
357 #define fWRITE_SP(A) WRITE_RREG(HEX_REG_SP, A)
359 #define fREAD_SP() (READ_REG(HEX_REG_SP))
360 #define fREAD_LC0 (READ_REG(HEX_REG_LC0))
361 #define fREAD_LC1 (READ_REG(HEX_REG_LC1))
362 #define fREAD_SA0 (READ_REG(HEX_REG_SA0))
363 #define fREAD_SA1 (READ_REG(HEX_REG_SA1))
364 #define fREAD_FP() (READ_REG(HEX_REG_FP))
365 #ifdef FIXME
366 /* Figure out how to get insn->extension_valid to helper */
367 #define fREAD_GP() \
368 (insn->extension_valid ? 0 : READ_REG(HEX_REG_GP))
369 #else
370 #define fREAD_GP() READ_REG(HEX_REG_GP)
371 #endif
372 #define fREAD_PC() (READ_REG(HEX_REG_PC))
374 #define fREAD_NPC() (env->next_PC & (0xfffffffe))
376 #define fREAD_P0() (READ_PREG(0))
377 #define fREAD_P3() (READ_PREG(3))
379 #define fCHECK_PCALIGN(A)
381 #define fWRITE_NPC(A) write_new_pc(env, A)
383 #define fBRANCH(LOC, TYPE) fWRITE_NPC(LOC)
384 #define fJUMPR(REGNO, TARGET, TYPE) fBRANCH(TARGET, COF_TYPE_JUMPR)
385 #define fHINTJR(TARGET) { /* Not modelled in qemu */}
386 #define fCALL(A) \
387 do { \
388 fWRITE_LR(fREAD_NPC()); \
389 fBRANCH(A, COF_TYPE_CALL); \
390 } while (0)
391 #define fCALLR(A) \
392 do { \
393 fWRITE_LR(fREAD_NPC()); \
394 fBRANCH(A, COF_TYPE_CALLR); \
395 } while (0)
396 #define fWRITE_LOOP_REGS0(START, COUNT) \
397 do { \
398 WRITE_RREG(HEX_REG_LC0, COUNT); \
399 WRITE_RREG(HEX_REG_SA0, START); \
400 } while (0)
401 #define fWRITE_LOOP_REGS1(START, COUNT) \
402 do { \
403 WRITE_RREG(HEX_REG_LC1, COUNT); \
404 WRITE_RREG(HEX_REG_SA1, START);\
405 } while (0)
406 #define fWRITE_LC0(VAL) WRITE_RREG(HEX_REG_LC0, VAL)
407 #define fWRITE_LC1(VAL) WRITE_RREG(HEX_REG_LC1, VAL)
409 #define fSET_OVERFLOW() SET_USR_FIELD(USR_OVF, 1)
410 #define fSET_LPCFG(VAL) SET_USR_FIELD(USR_LPCFG, (VAL))
411 #define fGET_LPCFG (GET_USR_FIELD(USR_LPCFG))
412 #define fWRITE_P0(VAL) WRITE_PREG(0, VAL)
413 #define fWRITE_P1(VAL) WRITE_PREG(1, VAL)
414 #define fWRITE_P2(VAL) WRITE_PREG(2, VAL)
415 #define fWRITE_P3(VAL) WRITE_PREG(3, VAL)
416 #define fPART1(WORK) if (part1) { WORK; return; }
417 #define fCAST4u(A) ((uint32_t)(A))
418 #define fCAST4s(A) ((int32_t)(A))
419 #define fCAST8u(A) ((uint64_t)(A))
420 #define fCAST8s(A) ((int64_t)(A))
421 #define fCAST4_4s(A) ((int32_t)(A))
422 #define fCAST4_4u(A) ((uint32_t)(A))
423 #define fCAST4_8s(A) ((int64_t)((int32_t)(A)))
424 #define fCAST4_8u(A) ((uint64_t)((uint32_t)(A)))
425 #define fCAST8_8s(A) ((int64_t)(A))
426 #define fCAST8_8u(A) ((uint64_t)(A))
427 #define fCAST2_8s(A) ((int64_t)((int16_t)(A)))
428 #define fCAST2_8u(A) ((uint64_t)((uint16_t)(A)))
429 #define fZE8_16(A) ((int16_t)((uint8_t)(A)))
430 #define fSE8_16(A) ((int16_t)((int8_t)(A)))
431 #define fSE16_32(A) ((int32_t)((int16_t)(A)))
432 #define fZE16_32(A) ((uint32_t)((uint16_t)(A)))
433 #define fSE32_64(A) ((int64_t)((int32_t)(A)))
434 #define fZE32_64(A) ((uint64_t)((uint32_t)(A)))
435 #define fSE8_32(A) ((int32_t)((int8_t)(A)))
436 #define fZE8_32(A) ((int32_t)((uint8_t)(A)))
437 #define fMPY8UU(A, B) (int)(fZE8_16(A) * fZE8_16(B))
438 #define fMPY8US(A, B) (int)(fZE8_16(A) * fSE8_16(B))
439 #define fMPY8SU(A, B) (int)(fSE8_16(A) * fZE8_16(B))
440 #define fMPY8SS(A, B) (int)((short)(A) * (short)(B))
441 #define fMPY16SS(A, B) fSE32_64(fSE16_32(A) * fSE16_32(B))
442 #define fMPY16UU(A, B) fZE32_64(fZE16_32(A) * fZE16_32(B))
443 #define fMPY16SU(A, B) fSE32_64(fSE16_32(A) * fZE16_32(B))
444 #define fMPY16US(A, B) fMPY16SU(B, A)
445 #define fMPY32SS(A, B) (fSE32_64(A) * fSE32_64(B))
446 #define fMPY32UU(A, B) (fZE32_64(A) * fZE32_64(B))
447 #define fMPY32SU(A, B) (fSE32_64(A) * fZE32_64(B))
448 #define fMPY3216SS(A, B) (fSE32_64(A) * fSXTN(16, 64, B))
449 #define fMPY3216SU(A, B) (fSE32_64(A) * fZXTN(16, 64, B))
450 #define fROUND(A) (A + 0x8000)
451 #define fCLIP(DST, SRC, U) \
452 do { \
453 int32_t maxv = (1 << U) - 1; \
454 int32_t minv = -(1 << U); \
455 DST = fMIN(maxv, fMAX(SRC, minv)); \
456 } while (0)
457 #define fCRND(A) ((((A) & 0x3) == 0x3) ? ((A) + 1) : ((A)))
458 #define fRNDN(A, N) ((((N) == 0) ? (A) : (((fSE32_64(A)) + (1 << ((N) - 1))))))
459 #define fCRNDN(A, N) (conv_round(A, N))
460 #define fADD128(A, B) (int128_add(A, B))
461 #define fSUB128(A, B) (int128_sub(A, B))
462 #define fSHIFTR128(A, B) (int128_rshift(A, B))
463 #define fSHIFTL128(A, B) (int128_lshift(A, B))
464 #define fAND128(A, B) (int128_and(A, B))
465 #define fCAST8S_16S(A) (int128_exts64(A))
466 #define fCAST16S_8S(A) (int128_getlo(A))
468 #define fEA_RI(REG, IMM) \
469 do { \
470 EA = REG + IMM; \
471 } while (0)
472 #define fEA_RRs(REG, REG2, SCALE) \
473 do { \
474 EA = REG + (REG2 << SCALE); \
475 } while (0)
476 #define fEA_IRs(IMM, REG, SCALE) \
477 do { \
478 EA = IMM + (REG << SCALE); \
479 } while (0)
481 #ifdef QEMU_GENERATE
482 #define fEA_IMM(IMM) tcg_gen_movi_tl(EA, IMM)
483 #define fEA_REG(REG) tcg_gen_mov_tl(EA, REG)
484 #define fEA_BREVR(REG) gen_helper_fbrev(EA, REG)
485 #define fPM_I(REG, IMM) tcg_gen_addi_tl(REG, REG, IMM)
486 #define fPM_M(REG, MVAL) tcg_gen_add_tl(REG, REG, MVAL)
487 #define fPM_CIRI(REG, IMM, MVAL) \
488 do { \
489 TCGv tcgv_siV = tcg_const_tl(siV); \
490 gen_helper_fcircadd(REG, REG, tcgv_siV, MuV, \
491 hex_gpr[HEX_REG_CS0 + MuN]); \
492 tcg_temp_free(tcgv_siV); \
493 } while (0)
494 #else
495 #define fEA_IMM(IMM) do { EA = (IMM); } while (0)
496 #define fEA_REG(REG) do { EA = (REG); } while (0)
497 #define fEA_GPI(IMM) do { EA = (fREAD_GP() + (IMM)); } while (0)
498 #define fPM_I(REG, IMM) do { REG = REG + (IMM); } while (0)
499 #define fPM_M(REG, MVAL) do { REG = REG + (MVAL); } while (0)
500 #endif
501 #define fSCALE(N, A) (((int64_t)(A)) << N)
502 #define fSATW(A) fSATN(32, ((long long)A))
503 #define fSAT(A) fSATN(32, (A))
504 #define fSAT_ORIG_SHL(A, ORIG_REG) \
505 ((((int32_t)((fSAT(A)) ^ ((int32_t)(ORIG_REG)))) < 0) \
506 ? fSATVALN(32, ((int32_t)(ORIG_REG))) \
507 : ((((ORIG_REG) > 0) && ((A) == 0)) ? fSATVALN(32, (ORIG_REG)) \
508 : fSAT(A)))
509 #define fPASS(A) A
510 #define fBIDIR_SHIFTL(SRC, SHAMT, REGSTYPE) \
511 (((SHAMT) < 0) ? ((fCAST##REGSTYPE(SRC) >> ((-(SHAMT)) - 1)) >> 1) \
512 : (fCAST##REGSTYPE(SRC) << (SHAMT)))
513 #define fBIDIR_ASHIFTL(SRC, SHAMT, REGSTYPE) \
514 fBIDIR_SHIFTL(SRC, SHAMT, REGSTYPE##s)
515 #define fBIDIR_LSHIFTL(SRC, SHAMT, REGSTYPE) \
516 fBIDIR_SHIFTL(SRC, SHAMT, REGSTYPE##u)
517 #define fBIDIR_ASHIFTL_SAT(SRC, SHAMT, REGSTYPE) \
518 (((SHAMT) < 0) ? ((fCAST##REGSTYPE##s(SRC) >> ((-(SHAMT)) - 1)) >> 1) \
519 : fSAT_ORIG_SHL(fCAST##REGSTYPE##s(SRC) << (SHAMT), (SRC)))
520 #define fBIDIR_SHIFTR(SRC, SHAMT, REGSTYPE) \
521 (((SHAMT) < 0) ? ((fCAST##REGSTYPE(SRC) << ((-(SHAMT)) - 1)) << 1) \
522 : (fCAST##REGSTYPE(SRC) >> (SHAMT)))
523 #define fBIDIR_ASHIFTR(SRC, SHAMT, REGSTYPE) \
524 fBIDIR_SHIFTR(SRC, SHAMT, REGSTYPE##s)
525 #define fBIDIR_LSHIFTR(SRC, SHAMT, REGSTYPE) \
526 fBIDIR_SHIFTR(SRC, SHAMT, REGSTYPE##u)
527 #define fBIDIR_ASHIFTR_SAT(SRC, SHAMT, REGSTYPE) \
528 (((SHAMT) < 0) ? fSAT_ORIG_SHL((fCAST##REGSTYPE##s(SRC) \
529 << ((-(SHAMT)) - 1)) << 1, (SRC)) \
530 : (fCAST##REGSTYPE##s(SRC) >> (SHAMT)))
531 #define fASHIFTR(SRC, SHAMT, REGSTYPE) (fCAST##REGSTYPE##s(SRC) >> (SHAMT))
532 #define fLSHIFTR(SRC, SHAMT, REGSTYPE) \
533 (((SHAMT) >= (sizeof(SRC) * 8)) ? 0 : (fCAST##REGSTYPE##u(SRC) >> (SHAMT)))
534 #define fROTL(SRC, SHAMT, REGSTYPE) \
535 (((SHAMT) == 0) ? (SRC) : ((fCAST##REGSTYPE##u(SRC) << (SHAMT)) | \
536 ((fCAST##REGSTYPE##u(SRC) >> \
537 ((sizeof(SRC) * 8) - (SHAMT))))))
538 #define fROTR(SRC, SHAMT, REGSTYPE) \
539 (((SHAMT) == 0) ? (SRC) : ((fCAST##REGSTYPE##u(SRC) >> (SHAMT)) | \
540 ((fCAST##REGSTYPE##u(SRC) << \
541 ((sizeof(SRC) * 8) - (SHAMT))))))
542 #define fASHIFTL(SRC, SHAMT, REGSTYPE) \
543 (((SHAMT) >= (sizeof(SRC) * 8)) ? 0 : (fCAST##REGSTYPE##s(SRC) << (SHAMT)))
545 #ifdef QEMU_GENERATE
546 #define fLOAD(NUM, SIZE, SIGN, EA, DST) MEM_LOAD##SIZE##SIGN(DST, EA)
547 #else
548 #define fLOAD(NUM, SIZE, SIGN, EA, DST) \
549 DST = (size##SIZE##SIGN##_t)MEM_LOAD##SIZE##SIGN(EA)
550 #endif
552 #define fMEMOP(NUM, SIZE, SIGN, EA, FNTYPE, VALUE)
554 #define fGET_FRAMEKEY() READ_REG(HEX_REG_FRAMEKEY)
555 #define fFRAME_SCRAMBLE(VAL) ((VAL) ^ (fCAST8u(fGET_FRAMEKEY()) << 32))
556 #define fFRAME_UNSCRAMBLE(VAL) fFRAME_SCRAMBLE(VAL)
558 #ifdef CONFIG_USER_ONLY
559 #define fFRAMECHECK(ADDR, EA) do { } while (0) /* Not modelled in linux-user */
560 #else
561 /* System mode not implemented yet */
562 #define fFRAMECHECK(ADDR, EA) g_assert_not_reached();
563 #endif
565 #ifdef QEMU_GENERATE
566 #define fLOAD_LOCKED(NUM, SIZE, SIGN, EA, DST) \
567 gen_load_locked##SIZE##SIGN(DST, EA, ctx->mem_idx);
568 #endif
570 #ifdef QEMU_GENERATE
571 #define fSTORE(NUM, SIZE, EA, SRC) MEM_STORE##SIZE(EA, SRC, insn->slot)
572 #else
573 #define fSTORE(NUM, SIZE, EA, SRC) MEM_STORE##SIZE(EA, SRC, slot)
574 #endif
576 #ifdef QEMU_GENERATE
577 #define fSTORE_LOCKED(NUM, SIZE, EA, SRC, PRED) \
578 gen_store_conditional##SIZE(env, ctx, PdN, PRED, EA, SRC);
579 #endif
581 #ifdef QEMU_GENERATE
582 #define GETBYTE_FUNC(X) \
583 __builtin_choose_expr(TYPE_TCGV(X), \
584 gen_get_byte, \
585 __builtin_choose_expr(TYPE_TCGV_I64(X), \
586 gen_get_byte_i64, (void)0))
587 #define fGETBYTE(N, SRC) GETBYTE_FUNC(SRC)(BYTE, N, SRC, true)
588 #define fGETUBYTE(N, SRC) GETBYTE_FUNC(SRC)(BYTE, N, SRC, false)
589 #else
590 #define fGETBYTE(N, SRC) ((int8_t)((SRC >> ((N) * 8)) & 0xff))
591 #define fGETUBYTE(N, SRC) ((uint8_t)((SRC >> ((N) * 8)) & 0xff))
592 #endif
594 #define fSETBYTE(N, DST, VAL) \
595 do { \
596 DST = (DST & ~(0x0ffLL << ((N) * 8))) | \
597 (((uint64_t)((VAL) & 0x0ffLL)) << ((N) * 8)); \
598 } while (0)
600 #ifdef QEMU_GENERATE
601 #define fGETHALF(N, SRC) gen_get_half(HALF, N, SRC, true)
602 #define fGETUHALF(N, SRC) gen_get_half(HALF, N, SRC, false)
603 #else
604 #define fGETHALF(N, SRC) ((int16_t)((SRC >> ((N) * 16)) & 0xffff))
605 #define fGETUHALF(N, SRC) ((uint16_t)((SRC >> ((N) * 16)) & 0xffff))
606 #endif
607 #define fSETHALF(N, DST, VAL) \
608 do { \
609 DST = (DST & ~(0x0ffffLL << ((N) * 16))) | \
610 (((uint64_t)((VAL) & 0x0ffff)) << ((N) * 16)); \
611 } while (0)
612 #define fSETHALFw fSETHALF
613 #define fSETHALFd fSETHALF
615 #define fGETWORD(N, SRC) \
616 ((int64_t)((int32_t)((SRC >> ((N) * 32)) & 0x0ffffffffLL)))
617 #define fGETUWORD(N, SRC) \
618 ((uint64_t)((uint32_t)((SRC >> ((N) * 32)) & 0x0ffffffffLL)))
620 #define fSETWORD(N, DST, VAL) \
621 do { \
622 DST = (DST & ~(0x0ffffffffLL << ((N) * 32))) | \
623 (((VAL) & 0x0ffffffffLL) << ((N) * 32)); \
624 } while (0)
626 #define fSETBIT(N, DST, VAL) \
627 do { \
628 DST = (DST & ~(1ULL << (N))) | (((uint64_t)(VAL)) << (N)); \
629 } while (0)
631 #define fGETBIT(N, SRC) (((SRC) >> N) & 1)
632 #define fSETBITS(HI, LO, DST, VAL) \
633 do { \
634 int j; \
635 for (j = LO; j <= HI; j++) { \
636 fSETBIT(j, DST, VAL); \
638 } while (0)
639 #define fCOUNTONES_4(VAL) ctpop32(VAL)
640 #define fCOUNTONES_8(VAL) ctpop64(VAL)
641 #define fBREV_8(VAL) revbit64(VAL)
642 #define fBREV_4(VAL) revbit32(VAL)
643 #define fCL1_8(VAL) clo64(VAL)
644 #define fCL1_4(VAL) clo32(VAL)
645 #define fINTERLEAVE(ODD, EVEN) interleave(ODD, EVEN)
646 #define fDEINTERLEAVE(MIXED) deinterleave(MIXED)
647 #define fHIDE(A) A
648 #define fCONSTLL(A) A##LL
649 #define fECHO(A) (A)
651 #define fTRAP(TRAPTYPE, IMM) helper_raise_exception(env, HEX_EXCP_TRAP0)
652 #define fPAUSE(IMM)
654 #define fALIGN_REG_FIELD_VALUE(FIELD, VAL) \
655 ((VAL) << reg_field_info[FIELD].offset)
656 #define fGET_REG_FIELD_MASK(FIELD) \
657 (((1 << reg_field_info[FIELD].width) - 1) << reg_field_info[FIELD].offset)
658 #define fREAD_REG_FIELD(REG, FIELD) \
659 fEXTRACTU_BITS(env->gpr[HEX_REG_##REG], \
660 reg_field_info[FIELD].width, \
661 reg_field_info[FIELD].offset)
662 #define fGET_FIELD(VAL, FIELD)
663 #define fSET_FIELD(VAL, FIELD, NEWVAL)
664 #define fBARRIER()
665 #define fSYNCH()
666 #define fISYNC()
667 #define fDCFETCH(REG) \
668 do { (void)REG; } while (0) /* Nothing to do in qemu */
669 #define fICINVA(REG) \
670 do { (void)REG; } while (0) /* Nothing to do in qemu */
671 #define fL2FETCH(ADDR, HEIGHT, WIDTH, STRIDE, FLAGS)
672 #define fDCCLEANA(REG) \
673 do { (void)REG; } while (0) /* Nothing to do in qemu */
674 #define fDCCLEANINVA(REG) \
675 do { (void)REG; } while (0) /* Nothing to do in qemu */
677 #define fDCZEROA(REG) do { env->dczero_addr = (REG); } while (0)
679 #define fBRANCH_SPECULATE_STALL(DOTNEWVAL, JUMP_COND, SPEC_DIR, HINTBITNUM, \
680 STRBITNUM) /* Nothing */
683 #endif