tcg-hppa: Fix softmmu loads and stores.
[qemu/aliguori-queue.git] / tcg / hppa / tcg-target.c
blob2f3b7708a1ab82ab412622de2c5ae1567ebbad90
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
32 #endif
34 /* This is an 8 byte temp slot in the stack frame. */
35 #define STACK_TEMP_OFS -16
37 #ifdef CONFIG_USE_GUEST_BASE
38 #define TCG_GUEST_BASE_REG TCG_REG_R16
39 #else
40 #define TCG_GUEST_BASE_REG TCG_REG_R0
41 #endif
43 static const int tcg_target_reg_alloc_order[] = {
44 TCG_REG_R4,
45 TCG_REG_R5,
46 TCG_REG_R6,
47 TCG_REG_R7,
48 TCG_REG_R8,
49 TCG_REG_R9,
50 TCG_REG_R10,
51 TCG_REG_R11,
52 TCG_REG_R12,
53 TCG_REG_R13,
55 TCG_REG_R17,
56 TCG_REG_R14,
57 TCG_REG_R15,
58 TCG_REG_R16,
60 TCG_REG_R26,
61 TCG_REG_R25,
62 TCG_REG_R24,
63 TCG_REG_R23,
65 TCG_REG_RET0,
66 TCG_REG_RET1,
69 static const int tcg_target_call_iarg_regs[4] = {
70 TCG_REG_R26,
71 TCG_REG_R25,
72 TCG_REG_R24,
73 TCG_REG_R23,
76 static const int tcg_target_call_oarg_regs[2] = {
77 TCG_REG_RET0,
78 TCG_REG_RET1,
81 /* True iff val fits a signed field of width BITS. */
82 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
84 return (val << ((sizeof(tcg_target_long) * 8 - bits))
85 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
88 /* True iff depi can be used to compute (reg | MASK).
89 Accept a bit pattern like:
90 0....01....1
91 1....10....0
92 0..01..10..0
93 Copied from gcc sources. */
94 static inline int or_mask_p(tcg_target_ulong mask)
96 if (mask == 0 || mask == -1) {
97 return 0;
99 mask += mask & -mask;
100 return (mask & (mask - 1)) == 0;
103 /* True iff depi or extru can be used to compute (reg & mask).
104 Accept a bit pattern like these:
105 0....01....1
106 1....10....0
107 1..10..01..1
108 Copied from gcc sources. */
109 static inline int and_mask_p(tcg_target_ulong mask)
111 return or_mask_p(~mask);
114 static int low_sign_ext(int val, int len)
116 return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
119 static int reassemble_12(int as12)
121 return (((as12 & 0x800) >> 11) |
122 ((as12 & 0x400) >> 8) |
123 ((as12 & 0x3ff) << 3));
126 static int reassemble_17(int as17)
128 return (((as17 & 0x10000) >> 16) |
129 ((as17 & 0x0f800) << 5) |
130 ((as17 & 0x00400) >> 8) |
131 ((as17 & 0x003ff) << 3));
134 static int reassemble_21(int as21)
136 return (((as21 & 0x100000) >> 20) |
137 ((as21 & 0x0ffe00) >> 8) |
138 ((as21 & 0x000180) << 7) |
139 ((as21 & 0x00007c) << 14) |
140 ((as21 & 0x000003) << 12));
143 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144 such relocations are simply fully handled by the assembler. */
145 #define R_PARISC_PCREL12F R_PARISC_NONE
147 static void patch_reloc(uint8_t *code_ptr, int type,
148 tcg_target_long value, tcg_target_long addend)
150 uint32_t *insn_ptr = (uint32_t *)code_ptr;
151 uint32_t insn = *insn_ptr;
152 tcg_target_long pcrel;
154 value += addend;
155 pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
157 switch (type) {
158 case R_PARISC_PCREL12F:
159 assert(check_fit_tl(pcrel, 12));
160 /* ??? We assume all patches are forward. See tcg_out_brcond
161 re setting the NUL bit on the branch and eliding the nop. */
162 assert(pcrel >= 0);
163 insn &= ~0x1ffdu;
164 insn |= reassemble_12(pcrel);
165 break;
166 case R_PARISC_PCREL17F:
167 assert(check_fit_tl(pcrel, 17));
168 insn &= ~0x1f1ffdu;
169 insn |= reassemble_17(pcrel);
170 break;
171 default:
172 tcg_abort();
175 *insn_ptr = insn;
178 /* maximum number of register used for input function arguments */
179 static inline int tcg_target_get_call_iarg_regs_count(int flags)
181 return 4;
184 /* parse target specific constraints */
185 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
187 const char *ct_str;
189 ct_str = *pct_str;
190 switch (ct_str[0]) {
191 case 'r':
192 ct->ct |= TCG_CT_REG;
193 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
194 break;
195 case 'L': /* qemu_ld/st constraint */
196 ct->ct |= TCG_CT_REG;
197 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
200 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
202 break;
203 case 'Z':
204 ct->ct |= TCG_CT_CONST_0;
205 break;
206 case 'I':
207 ct->ct |= TCG_CT_CONST_S11;
208 break;
209 case 'J':
210 ct->ct |= TCG_CT_CONST_S5;
211 break;
212 case 'K':
213 ct->ct |= TCG_CT_CONST_MS11;
214 break;
215 case 'M':
216 ct->ct |= TCG_CT_CONST_AND;
217 break;
218 case 'O':
219 ct->ct |= TCG_CT_CONST_OR;
220 break;
221 default:
222 return -1;
224 ct_str++;
225 *pct_str = ct_str;
226 return 0;
229 /* test if a constant matches the constraint */
230 static int tcg_target_const_match(tcg_target_long val,
231 const TCGArgConstraint *arg_ct)
233 int ct = arg_ct->ct;
234 if (ct & TCG_CT_CONST) {
235 return 1;
236 } else if (ct & TCG_CT_CONST_0) {
237 return val == 0;
238 } else if (ct & TCG_CT_CONST_S5) {
239 return check_fit_tl(val, 5);
240 } else if (ct & TCG_CT_CONST_S11) {
241 return check_fit_tl(val, 11);
242 } else if (ct & TCG_CT_CONST_MS11) {
243 return check_fit_tl(-val, 11);
244 } else if (ct & TCG_CT_CONST_AND) {
245 return and_mask_p(val);
246 } else if (ct & TCG_CT_CONST_OR) {
247 return or_mask_p(val);
249 return 0;
252 #define INSN_OP(x) ((x) << 26)
253 #define INSN_EXT3BR(x) ((x) << 13)
254 #define INSN_EXT3SH(x) ((x) << 10)
255 #define INSN_EXT4(x) ((x) << 6)
256 #define INSN_EXT5(x) (x)
257 #define INSN_EXT6(x) ((x) << 6)
258 #define INSN_EXT7(x) ((x) << 6)
259 #define INSN_EXT8A(x) ((x) << 6)
260 #define INSN_EXT8B(x) ((x) << 5)
261 #define INSN_T(x) (x)
262 #define INSN_R1(x) ((x) << 16)
263 #define INSN_R2(x) ((x) << 21)
264 #define INSN_DEP_LEN(x) (32 - (x))
265 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
266 #define INSN_SHDEP_P(x) ((x) << 5)
267 #define INSN_COND(x) ((x) << 13)
268 #define INSN_IM11(x) low_sign_ext(x, 11)
269 #define INSN_IM14(x) low_sign_ext(x, 14)
270 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
272 #define COND_NEVER 0
273 #define COND_EQ 1
274 #define COND_LT 2
275 #define COND_LE 3
276 #define COND_LTU 4
277 #define COND_LEU 5
278 #define COND_SV 6
279 #define COND_OD 7
280 #define COND_FALSE 8
282 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
283 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
284 #define INSN_ADDI (INSN_OP(0x2d))
285 #define INSN_ADDIL (INSN_OP(0x0a))
286 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
287 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
288 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
289 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
290 #define INSN_COMICLR (INSN_OP(0x24))
291 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
292 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
293 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
294 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
295 #define INSN_LDIL (INSN_OP(0x08))
296 #define INSN_LDO (INSN_OP(0x0d))
297 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
298 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
299 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
300 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
301 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
302 #define INSN_SUBI (INSN_OP(0x25))
303 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
304 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
305 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
306 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
307 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
308 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
310 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
311 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
312 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
313 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
314 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
315 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
317 #define INSN_LDB (INSN_OP(0x10))
318 #define INSN_LDH (INSN_OP(0x11))
319 #define INSN_LDW (INSN_OP(0x12))
320 #define INSN_LDWM (INSN_OP(0x13))
321 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
323 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
324 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
325 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
327 #define INSN_STB (INSN_OP(0x18))
328 #define INSN_STH (INSN_OP(0x19))
329 #define INSN_STW (INSN_OP(0x1a))
330 #define INSN_STWM (INSN_OP(0x1b))
331 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
333 #define INSN_COMBT (INSN_OP(0x20))
334 #define INSN_COMBF (INSN_OP(0x22))
335 #define INSN_COMIBT (INSN_OP(0x21))
336 #define INSN_COMIBF (INSN_OP(0x23))
338 /* supplied by libgcc */
339 extern void *__canonicalize_funcptr_for_compare(void *);
341 static void tcg_out_mov(TCGContext *s, int ret, int arg)
343 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
344 but hppa-dis.c is unaware of this definition */
345 if (ret != arg) {
346 tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
347 | INSN_R2(TCG_REG_R0));
351 static void tcg_out_movi(TCGContext *s, TCGType type,
352 int ret, tcg_target_long arg)
354 if (check_fit_tl(arg, 14)) {
355 tcg_out32(s, INSN_LDO | INSN_R1(ret)
356 | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
357 } else {
358 uint32_t hi, lo;
359 hi = arg >> 11;
360 lo = arg & 0x7ff;
362 tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
363 if (lo) {
364 tcg_out32(s, INSN_LDO | INSN_R1(ret)
365 | INSN_R2(ret) | INSN_IM14(lo));
370 static void tcg_out_ldst(TCGContext *s, int ret, int addr,
371 tcg_target_long offset, int op)
373 if (!check_fit_tl(offset, 14)) {
374 uint32_t hi, lo, op;
376 hi = offset >> 11;
377 lo = offset & 0x7ff;
379 if (addr == TCG_REG_R0) {
380 op = INSN_LDIL | INSN_R2(TCG_REG_R1);
381 } else {
382 op = INSN_ADDIL | INSN_R2(addr);
384 tcg_out32(s, op | reassemble_21(hi));
386 addr = TCG_REG_R1;
387 offset = lo;
390 if (ret != addr || offset != 0 || op != INSN_LDO) {
391 tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
395 /* This function is required by tcg.c. */
396 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
397 int arg1, tcg_target_long arg2)
399 tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
402 /* This function is required by tcg.c. */
403 static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
404 int arg1, tcg_target_long arg2)
406 tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
409 static void tcg_out_ldst_index(TCGContext *s, int data,
410 int base, int index, int op)
412 tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
415 static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
416 tcg_target_long val)
418 tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
421 /* This function is required by tcg.c. */
422 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
424 tcg_out_addi2(s, reg, reg, val);
427 static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
429 tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
432 static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
433 tcg_target_long val, int op)
435 assert(check_fit_tl(val, 11));
436 tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
439 static inline void tcg_out_nop(TCGContext *s)
441 tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
444 static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
446 tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
449 /* Extract LEN bits at position OFS from ARG and place in RET.
450 Note that here the bit ordering is reversed from the PA-RISC
451 standard, such that the right-most bit is 0. */
452 static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
453 unsigned ofs, unsigned len, int sign)
455 assert(ofs < 32 && len <= 32 - ofs);
456 tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
457 | INSN_R1(ret) | INSN_R2(arg)
458 | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
461 /* Likewise with OFS interpreted little-endian. */
462 static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
463 unsigned ofs, unsigned len)
465 assert(ofs < 32 && len <= 32 - ofs);
466 tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
467 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
470 static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
471 unsigned count)
473 assert(count < 32);
474 tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
475 | INSN_SHDEP_CP(count));
478 static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
480 tcg_out_mtctl_sar(s, creg);
481 tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
484 static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
486 int bs0, bs1;
488 /* Note that the argument is constrained to match or_mask_p. */
489 for (bs0 = 0; bs0 < 32; bs0++) {
490 if ((m & (1u << bs0)) != 0) {
491 break;
494 for (bs1 = bs0; bs1 < 32; bs1++) {
495 if ((m & (1u << bs1)) == 0) {
496 break;
499 assert(bs1 == 32 || (1ul << bs1) > m);
501 tcg_out_mov(s, ret, arg);
502 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(-1)
503 | INSN_SHDEP_CP(31 - bs0) | INSN_DEP_LEN(bs1 - bs0));
506 static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
508 int ls0, ls1, ms0;
510 /* Note that the argument is constrained to match and_mask_p. */
511 for (ls0 = 0; ls0 < 32; ls0++) {
512 if ((m & (1u << ls0)) == 0) {
513 break;
516 for (ls1 = ls0; ls1 < 32; ls1++) {
517 if ((m & (1u << ls1)) != 0) {
518 break;
521 for (ms0 = ls1; ms0 < 32; ms0++) {
522 if ((m & (1u << ms0)) == 0) {
523 break;
526 assert (ms0 == 32);
528 if (ls1 == 32) {
529 tcg_out_extr(s, ret, arg, 0, ls0, 0);
530 } else {
531 tcg_out_mov(s, ret, arg);
532 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(0)
533 | INSN_SHDEP_CP(31 - ls0) | INSN_DEP_LEN(ls1 - ls0));
537 static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
539 tcg_out_extr(s, ret, arg, 0, 8, 1);
542 static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
544 tcg_out_extr(s, ret, arg, 0, 16, 1);
547 static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
549 count &= 31;
550 tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
551 | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
554 static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
556 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
557 tcg_out_mtctl_sar(s, TCG_REG_R20);
558 tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
561 static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
563 count &= 31;
564 tcg_out_extr(s, ret, arg, count, 32 - count, 0);
567 static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
569 tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
572 static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
574 count &= 31;
575 tcg_out_extr(s, ret, arg, count, 32 - count, 1);
578 static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
580 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
581 tcg_out_mtctl_sar(s, TCG_REG_R20);
582 tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
585 static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
587 count &= 31;
588 tcg_out_shd(s, ret, arg, arg, 32 - count);
591 static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
593 tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
594 tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
597 static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
599 count &= 31;
600 tcg_out_shd(s, ret, arg, arg, count);
603 static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
605 tcg_out_vshd(s, ret, arg, arg, creg);
608 static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
610 if (ret != arg) {
611 tcg_out_mov(s, ret, arg); /* arg = xxAB */
613 tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
614 tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
617 static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
619 /* arg = ABCD */
620 tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
621 tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
622 tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
625 static void tcg_out_call(TCGContext *s, void *func)
627 tcg_target_long val, hi, lo, disp;
629 val = (uint32_t)__canonicalize_funcptr_for_compare(func);
630 disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
632 if (check_fit_tl(disp, 17)) {
633 tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
634 } else {
635 hi = val >> 11;
636 lo = val & 0x7ff;
638 tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
639 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
640 | reassemble_17(lo >> 2));
641 tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
645 static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
646 int arg1, int arg2)
648 /* Store both words into the stack for copy to the FPU. */
649 tcg_out_ldst(s, arg1, TCG_REG_SP, STACK_TEMP_OFS, INSN_STW);
650 tcg_out_ldst(s, arg2, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_STW);
652 /* Load both words into the FPU at the same time. We get away
653 with this because we can address the left and right half of the
654 FPU registers individually once loaded. */
655 /* fldds stack_temp(sp),fr22 */
656 tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_SP)
657 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
659 /* xmpyu fr22r,fr22,fr22 */
660 tcg_out32(s, 0x3ad64796);
662 /* Store the 64-bit result back into the stack. */
663 /* fstds stack_temp(sp),fr22 */
664 tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_SP)
665 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
667 /* Load the pieces of the result that the caller requested. */
668 if (reth) {
669 tcg_out_ldst(s, reth, TCG_REG_SP, STACK_TEMP_OFS, INSN_LDW);
671 if (retl) {
672 tcg_out_ldst(s, retl, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_LDW);
676 static void tcg_out_add2(TCGContext *s, int destl, int desth,
677 int al, int ah, int bl, int bh, int blconst)
679 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
681 if (blconst) {
682 tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
683 } else {
684 tcg_out_arith(s, tmp, al, bl, INSN_ADD);
686 tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
688 tcg_out_mov(s, destl, tmp);
691 static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
692 int bl, int bh, int alconst, int blconst)
694 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
696 if (alconst) {
697 if (blconst) {
698 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
699 bl = TCG_REG_R20;
701 tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
702 } else if (blconst) {
703 tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
704 } else {
705 tcg_out_arith(s, tmp, al, bl, INSN_SUB);
707 tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
709 tcg_out_mov(s, destl, tmp);
712 static void tcg_out_branch(TCGContext *s, int label_index, int nul)
714 TCGLabel *l = &s->labels[label_index];
715 uint32_t op = nul ? INSN_BL_N : INSN_BL;
717 if (l->has_value) {
718 tcg_target_long val = l->u.value;
720 val -= (tcg_target_long)s->code_ptr + 8;
721 val >>= 2;
722 assert(check_fit_tl(val, 17));
724 tcg_out32(s, op | reassemble_17(val));
725 } else {
726 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
727 tcg_out32(s, op);
731 static const uint8_t tcg_cond_to_cmp_cond[10] =
733 [TCG_COND_EQ] = COND_EQ,
734 [TCG_COND_NE] = COND_EQ | COND_FALSE,
735 [TCG_COND_LT] = COND_LT,
736 [TCG_COND_GE] = COND_LT | COND_FALSE,
737 [TCG_COND_LE] = COND_LE,
738 [TCG_COND_GT] = COND_LE | COND_FALSE,
739 [TCG_COND_LTU] = COND_LTU,
740 [TCG_COND_GEU] = COND_LTU | COND_FALSE,
741 [TCG_COND_LEU] = COND_LEU,
742 [TCG_COND_GTU] = COND_LEU | COND_FALSE,
745 static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
746 TCGArg c2, int c2const, int label_index)
748 TCGLabel *l = &s->labels[label_index];
749 int op, pacond;
751 /* Note that COMIB operates as if the immediate is the first
752 operand. We model brcond with the immediate in the second
753 to better match what targets are likely to give us. For
754 consistency, model COMB with reversed operands as well. */
755 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
757 if (c2const) {
758 op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
759 op |= INSN_IM5(c2);
760 } else {
761 op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
762 op |= INSN_R1(c2);
764 op |= INSN_R2(c1);
765 op |= INSN_COND(pacond & 7);
767 if (l->has_value) {
768 tcg_target_long val = l->u.value;
770 val -= (tcg_target_long)s->code_ptr + 8;
771 val >>= 2;
772 assert(check_fit_tl(val, 12));
774 /* ??? Assume that all branches to defined labels are backward.
775 Which means that if the nul bit is set, the delay slot is
776 executed if the branch is taken, and not executed in fallthru. */
777 tcg_out32(s, op | reassemble_12(val));
778 tcg_out_nop(s);
779 } else {
780 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
781 /* ??? Assume that all branches to undefined labels are forward.
782 Which means that if the nul bit is set, the delay slot is
783 not executed if the branch is taken, which is what we want. */
784 tcg_out32(s, op | 2);
788 static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
789 TCGArg c1, TCGArg c2, int c2const)
791 int op, pacond;
793 /* Note that COMICLR operates as if the immediate is the first
794 operand. We model setcond with the immediate in the second
795 to better match what targets are likely to give us. For
796 consistency, model COMCLR with reversed operands as well. */
797 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
799 if (c2const) {
800 op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
801 } else {
802 op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
804 op |= INSN_COND(pacond & 7);
805 op |= pacond & COND_FALSE ? 1 << 12 : 0;
807 tcg_out32(s, op);
810 static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
811 TCGArg bl, int blconst, TCGArg bh, int bhconst,
812 int label_index)
814 switch (cond) {
815 case TCG_COND_EQ:
816 case TCG_COND_NE:
817 tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, al, bl, blconst);
818 tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
819 break;
821 default:
822 tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
823 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
824 tcg_out_brcond(s, tcg_unsigned_cond(cond),
825 al, bl, blconst, label_index);
826 break;
830 static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
831 TCGArg c1, TCGArg c2, int c2const)
833 tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
834 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
837 static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
838 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
839 TCGArg bh, int bhconst)
841 int scratch = TCG_REG_R20;
843 if (ret != al && ret != ah
844 && (blconst || ret != bl)
845 && (bhconst || ret != bh)) {
846 scratch = ret;
849 switch (cond) {
850 case TCG_COND_EQ:
851 case TCG_COND_NE:
852 tcg_out_setcond(s, cond, scratch, al, bl, blconst);
853 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
854 tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
855 break;
857 default:
858 tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
859 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
860 tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
861 tcg_out_comclr(s, cond, TCG_REG_R0, ah, bh, bhconst);
862 tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
863 break;
866 tcg_out_mov(s, ret, scratch);
869 #if defined(CONFIG_SOFTMMU)
870 #include "../../softmmu_defs.h"
872 static void *qemu_ld_helpers[4] = {
873 __ldb_mmu,
874 __ldw_mmu,
875 __ldl_mmu,
876 __ldq_mmu,
879 static void *qemu_st_helpers[4] = {
880 __stb_mmu,
881 __stw_mmu,
882 __stl_mmu,
883 __stq_mmu,
886 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
887 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
888 TLB for the memory index. The return value is the offset from ENV
889 contained in R1 afterward (to be used when loading ADDEND); if the
890 return value is 0, R1 is not used. */
892 static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
893 int addrhi, int s_bits, int lab_miss, int offset)
895 int ret;
897 /* Extracting the index into the TLB. The "normal C operation" is
898 r1 = addr_reg >> TARGET_PAGE_BITS;
899 r1 &= CPU_TLB_SIZE - 1;
900 r1 <<= CPU_TLB_ENTRY_BITS;
901 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
902 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
903 operations with an EXTRU. Unfortunately, the current value of
904 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
905 add that follows. */
906 tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
907 tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
908 tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
909 tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
911 /* Make sure that both the addr_{read,write} and addend can be
912 read with a 14-bit offset from the same base register. */
913 if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
914 ret = 0;
915 } else {
916 ret = (offset + 0x400) & ~0x7ff;
917 offset = ret - offset;
918 tcg_out_addi2(s, TCG_REG_R1, r1, ret);
919 r1 = TCG_REG_R1;
922 /* Load the entry from the computed slot. */
923 if (TARGET_LONG_BITS == 64) {
924 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
925 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
926 } else {
927 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
930 /* If not equal, jump to lab_miss. */
931 if (TARGET_LONG_BITS == 64) {
932 tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
933 r0, 0, addrhi, 0, lab_miss);
934 } else {
935 tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
938 return ret;
940 #endif
942 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
943 int addr_reg, int addend_reg, int opc)
945 #ifdef TARGET_WORDS_BIGENDIAN
946 const int bswap = 0;
947 #else
948 const int bswap = 1;
949 #endif
951 switch (opc) {
952 case 0:
953 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
954 break;
955 case 0 | 4:
956 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
957 tcg_out_ext8s(s, datalo_reg, datalo_reg);
958 break;
959 case 1:
960 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
961 if (bswap) {
962 tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
964 break;
965 case 1 | 4:
966 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
967 if (bswap) {
968 tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
969 } else {
970 tcg_out_ext16s(s, datalo_reg, datalo_reg);
972 break;
973 case 2:
974 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
975 if (bswap) {
976 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
978 break;
979 case 3:
980 if (bswap) {
981 int t = datahi_reg;
982 datahi_reg = datalo_reg;
983 datalo_reg = t;
985 /* We can't access the low-part with a reg+reg addressing mode,
986 so perform the addition now and use reg_ofs addressing mode. */
987 if (addend_reg != TCG_REG_R0) {
988 tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
989 addr_reg = TCG_REG_R20;
991 /* Make sure not to clobber the base register. */
992 if (datahi_reg == addr_reg) {
993 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
994 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
995 } else {
996 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
997 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
999 if (bswap) {
1000 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1001 tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
1003 break;
1004 default:
1005 tcg_abort();
1009 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1011 int datalo_reg = *args++;
1012 /* Note that datahi_reg is only used for 64-bit loads. */
1013 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1014 int addrlo_reg = *args++;
1016 #if defined(CONFIG_SOFTMMU)
1017 /* Note that addrhi_reg is only used for 64-bit guests. */
1018 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1019 int mem_index = *args;
1020 int lab1, lab2, argreg, offset;
1022 lab1 = gen_new_label();
1023 lab2 = gen_new_label();
1025 offset = offsetof(CPUState, tlb_table[mem_index][0].addr_read);
1026 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1027 opc & 3, lab1, offset);
1029 /* TLB Hit. */
1030 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1031 offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1032 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc);
1033 tcg_out_branch(s, lab2, 1);
1035 /* TLB Miss. */
1036 /* label1: */
1037 tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1039 argreg = TCG_REG_R26;
1040 tcg_out_mov(s, argreg--, addrlo_reg);
1041 if (TARGET_LONG_BITS == 64) {
1042 tcg_out_mov(s, argreg--, addrhi_reg);
1044 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1046 tcg_out_call(s, qemu_ld_helpers[opc & 3]);
1048 switch (opc) {
1049 case 0:
1050 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
1051 break;
1052 case 0 | 4:
1053 tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
1054 break;
1055 case 1:
1056 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
1057 break;
1058 case 1 | 4:
1059 tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
1060 break;
1061 case 2:
1062 case 2 | 4:
1063 tcg_out_mov(s, datalo_reg, TCG_REG_RET0);
1064 break;
1065 case 3:
1066 tcg_out_mov(s, datahi_reg, TCG_REG_RET0);
1067 tcg_out_mov(s, datalo_reg, TCG_REG_RET1);
1068 break;
1069 default:
1070 tcg_abort();
1073 /* label2: */
1074 tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1075 #else
1076 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1077 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
1078 #endif
1081 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1082 int addr_reg, int opc)
1084 #ifdef TARGET_WORDS_BIGENDIAN
1085 const int bswap = 0;
1086 #else
1087 const int bswap = 1;
1088 #endif
1090 switch (opc) {
1091 case 0:
1092 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
1093 break;
1094 case 1:
1095 if (bswap) {
1096 tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1097 datalo_reg = TCG_REG_R20;
1099 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
1100 break;
1101 case 2:
1102 if (bswap) {
1103 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1104 datalo_reg = TCG_REG_R20;
1106 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
1107 break;
1108 case 3:
1109 if (bswap) {
1110 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1111 tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1112 datahi_reg = TCG_REG_R20;
1113 datalo_reg = TCG_REG_R23;
1115 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1116 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
1117 break;
1118 default:
1119 tcg_abort();
1124 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1126 int datalo_reg = *args++;
1127 /* Note that datahi_reg is only used for 64-bit loads. */
1128 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1129 int addrlo_reg = *args++;
1131 #if defined(CONFIG_SOFTMMU)
1132 /* Note that addrhi_reg is only used for 64-bit guests. */
1133 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1134 int mem_index = *args;
1135 int lab1, lab2, argreg, offset;
1137 lab1 = gen_new_label();
1138 lab2 = gen_new_label();
1140 offset = offsetof(CPUState, tlb_table[mem_index][0].addr_write);
1141 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1142 opc, lab1, offset);
1144 /* TLB Hit. */
1145 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1146 offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1148 /* There are no indexed stores, so we must do this addition explitly.
1149 Careful to avoid R20, which is used for the bswaps to follow. */
1150 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1151 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
1152 tcg_out_branch(s, lab2, 1);
1154 /* TLB Miss. */
1155 /* label1: */
1156 tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1158 argreg = TCG_REG_R26;
1159 tcg_out_mov(s, argreg--, addrlo_reg);
1160 if (TARGET_LONG_BITS == 64) {
1161 tcg_out_mov(s, argreg--, addrhi_reg);
1164 switch(opc) {
1165 case 0:
1166 tcg_out_andi(s, argreg--, datalo_reg, 0xff);
1167 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1168 break;
1169 case 1:
1170 tcg_out_andi(s, argreg--, datalo_reg, 0xffff);
1171 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1172 break;
1173 case 2:
1174 tcg_out_mov(s, argreg--, datalo_reg);
1175 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1176 break;
1177 case 3:
1178 /* Because of the alignment required by the 64-bit data argument,
1179 we will always use R23/R24. Also, we will always run out of
1180 argument registers for storing mem_index, so that will have
1181 to go on the stack. */
1182 if (mem_index == 0) {
1183 argreg = TCG_REG_R0;
1184 } else {
1185 argreg = TCG_REG_R20;
1186 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1188 tcg_out_mov(s, TCG_REG_R23, datahi_reg);
1189 tcg_out_mov(s, TCG_REG_R24, datalo_reg);
1190 tcg_out_st(s, TCG_TYPE_I32, argreg, TCG_REG_SP,
1191 TCG_TARGET_CALL_STACK_OFFSET - 4);
1192 break;
1193 default:
1194 tcg_abort();
1197 tcg_out_call(s, qemu_st_helpers[opc]);
1199 /* label2: */
1200 tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1201 #else
1202 /* There are no indexed stores, so if GUEST_BASE is set we must do the add
1203 explicitly. Careful to avoid R20, which is used for the bswaps to follow. */
1204 if (GUEST_BASE != 0) {
1205 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_GUEST_BASE_REG, INSN_ADDL);
1206 addrlo_reg = TCG_REG_R31;
1208 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
1209 #endif
1212 static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1214 if (!check_fit_tl(arg, 14)) {
1215 uint32_t hi, lo;
1216 hi = arg & ~0x7ff;
1217 lo = arg & 0x7ff;
1218 if (lo) {
1219 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1220 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1221 tcg_out_addi(s, TCG_REG_RET0, lo);
1222 return;
1224 arg = hi;
1226 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1227 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1230 static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1232 if (s->tb_jmp_offset) {
1233 /* direct jump method */
1234 fprintf(stderr, "goto_tb direct\n");
1235 tcg_abort();
1236 } else {
1237 /* indirect jump method */
1238 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1239 (tcg_target_long)(s->tb_next + arg));
1240 tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1242 s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1245 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1246 const int *const_args)
1248 switch (opc) {
1249 case INDEX_op_exit_tb:
1250 tcg_out_exit_tb(s, args[0]);
1251 break;
1252 case INDEX_op_goto_tb:
1253 tcg_out_goto_tb(s, args[0]);
1254 break;
1256 case INDEX_op_call:
1257 if (const_args[0]) {
1258 tcg_out_call(s, (void *)args[0]);
1259 } else {
1260 /* ??? FIXME: the value in the register in args[0] is almost
1261 certainly a procedure descriptor, not a code address. We
1262 probably need to use the millicode $$dyncall routine. */
1263 tcg_abort();
1265 break;
1267 case INDEX_op_jmp:
1268 fprintf(stderr, "unimplemented jmp\n");
1269 tcg_abort();
1270 break;
1272 case INDEX_op_br:
1273 tcg_out_branch(s, args[0], 1);
1274 break;
1276 case INDEX_op_movi_i32:
1277 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1278 break;
1280 case INDEX_op_ld8u_i32:
1281 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1282 break;
1283 case INDEX_op_ld8s_i32:
1284 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1285 tcg_out_ext8s(s, args[0], args[0]);
1286 break;
1287 case INDEX_op_ld16u_i32:
1288 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1289 break;
1290 case INDEX_op_ld16s_i32:
1291 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1292 tcg_out_ext16s(s, args[0], args[0]);
1293 break;
1294 case INDEX_op_ld_i32:
1295 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
1296 break;
1298 case INDEX_op_st8_i32:
1299 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
1300 break;
1301 case INDEX_op_st16_i32:
1302 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
1303 break;
1304 case INDEX_op_st_i32:
1305 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1306 break;
1308 case INDEX_op_add_i32:
1309 if (const_args[2]) {
1310 tcg_out_addi2(s, args[0], args[1], args[2]);
1311 } else {
1312 tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1314 break;
1316 case INDEX_op_sub_i32:
1317 if (const_args[1]) {
1318 if (const_args[2]) {
1319 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1320 } else {
1321 /* Recall that SUBI is a reversed subtract. */
1322 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1324 } else if (const_args[2]) {
1325 tcg_out_addi2(s, args[0], args[1], -args[2]);
1326 } else {
1327 tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1329 break;
1331 case INDEX_op_and_i32:
1332 if (const_args[2]) {
1333 tcg_out_andi(s, args[0], args[1], args[2]);
1334 } else {
1335 tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1337 break;
1339 case INDEX_op_or_i32:
1340 if (const_args[2]) {
1341 tcg_out_ori(s, args[0], args[1], args[2]);
1342 } else {
1343 tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1345 break;
1347 case INDEX_op_xor_i32:
1348 tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1349 break;
1351 case INDEX_op_andc_i32:
1352 if (const_args[2]) {
1353 tcg_out_andi(s, args[0], args[1], ~args[2]);
1354 } else {
1355 tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1357 break;
1359 case INDEX_op_shl_i32:
1360 if (const_args[2]) {
1361 tcg_out_shli(s, args[0], args[1], args[2]);
1362 } else {
1363 tcg_out_shl(s, args[0], args[1], args[2]);
1365 break;
1367 case INDEX_op_shr_i32:
1368 if (const_args[2]) {
1369 tcg_out_shri(s, args[0], args[1], args[2]);
1370 } else {
1371 tcg_out_shr(s, args[0], args[1], args[2]);
1373 break;
1375 case INDEX_op_sar_i32:
1376 if (const_args[2]) {
1377 tcg_out_sari(s, args[0], args[1], args[2]);
1378 } else {
1379 tcg_out_sar(s, args[0], args[1], args[2]);
1381 break;
1383 case INDEX_op_rotl_i32:
1384 if (const_args[2]) {
1385 tcg_out_rotli(s, args[0], args[1], args[2]);
1386 } else {
1387 tcg_out_rotl(s, args[0], args[1], args[2]);
1389 break;
1391 case INDEX_op_rotr_i32:
1392 if (const_args[2]) {
1393 tcg_out_rotri(s, args[0], args[1], args[2]);
1394 } else {
1395 tcg_out_rotr(s, args[0], args[1], args[2]);
1397 break;
1399 case INDEX_op_mul_i32:
1400 tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
1401 break;
1402 case INDEX_op_mulu2_i32:
1403 tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
1404 break;
1406 case INDEX_op_bswap16_i32:
1407 tcg_out_bswap16(s, args[0], args[1], 0);
1408 break;
1409 case INDEX_op_bswap32_i32:
1410 tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1411 break;
1413 case INDEX_op_not_i32:
1414 tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1415 break;
1416 case INDEX_op_ext8s_i32:
1417 tcg_out_ext8s(s, args[0], args[1]);
1418 break;
1419 case INDEX_op_ext16s_i32:
1420 tcg_out_ext16s(s, args[0], args[1]);
1421 break;
1423 /* These three correspond exactly to the fallback implementation.
1424 But by including them we reduce the number of TCG ops that
1425 need to be generated, and these opcodes are fairly common. */
1426 case INDEX_op_neg_i32:
1427 tcg_out_arith(s, args[0], TCG_REG_R0, args[1], INSN_SUB);
1428 break;
1429 case INDEX_op_ext8u_i32:
1430 tcg_out_andi(s, args[0], args[1], 0xff);
1431 break;
1432 case INDEX_op_ext16u_i32:
1433 tcg_out_andi(s, args[0], args[1], 0xffff);
1434 break;
1436 case INDEX_op_brcond_i32:
1437 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1438 break;
1439 case INDEX_op_brcond2_i32:
1440 tcg_out_brcond2(s, args[4], args[0], args[1],
1441 args[2], const_args[2],
1442 args[3], const_args[3], args[5]);
1443 break;
1445 case INDEX_op_setcond_i32:
1446 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1447 break;
1448 case INDEX_op_setcond2_i32:
1449 tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1450 args[3], const_args[3], args[4], const_args[4]);
1451 break;
1453 case INDEX_op_add2_i32:
1454 tcg_out_add2(s, args[0], args[1], args[2], args[3],
1455 args[4], args[5], const_args[4]);
1456 break;
1458 case INDEX_op_sub2_i32:
1459 tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1460 args[4], args[5], const_args[2], const_args[4]);
1461 break;
1463 case INDEX_op_qemu_ld8u:
1464 tcg_out_qemu_ld(s, args, 0);
1465 break;
1466 case INDEX_op_qemu_ld8s:
1467 tcg_out_qemu_ld(s, args, 0 | 4);
1468 break;
1469 case INDEX_op_qemu_ld16u:
1470 tcg_out_qemu_ld(s, args, 1);
1471 break;
1472 case INDEX_op_qemu_ld16s:
1473 tcg_out_qemu_ld(s, args, 1 | 4);
1474 break;
1475 case INDEX_op_qemu_ld32:
1476 tcg_out_qemu_ld(s, args, 2);
1477 break;
1478 case INDEX_op_qemu_ld64:
1479 tcg_out_qemu_ld(s, args, 3);
1480 break;
1482 case INDEX_op_qemu_st8:
1483 tcg_out_qemu_st(s, args, 0);
1484 break;
1485 case INDEX_op_qemu_st16:
1486 tcg_out_qemu_st(s, args, 1);
1487 break;
1488 case INDEX_op_qemu_st32:
1489 tcg_out_qemu_st(s, args, 2);
1490 break;
1491 case INDEX_op_qemu_st64:
1492 tcg_out_qemu_st(s, args, 3);
1493 break;
1495 default:
1496 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1497 tcg_abort();
1501 static const TCGTargetOpDef hppa_op_defs[] = {
1502 { INDEX_op_exit_tb, { } },
1503 { INDEX_op_goto_tb, { } },
1505 { INDEX_op_call, { "ri" } },
1506 { INDEX_op_jmp, { "r" } },
1507 { INDEX_op_br, { } },
1509 { INDEX_op_mov_i32, { "r", "r" } },
1510 { INDEX_op_movi_i32, { "r" } },
1512 { INDEX_op_ld8u_i32, { "r", "r" } },
1513 { INDEX_op_ld8s_i32, { "r", "r" } },
1514 { INDEX_op_ld16u_i32, { "r", "r" } },
1515 { INDEX_op_ld16s_i32, { "r", "r" } },
1516 { INDEX_op_ld_i32, { "r", "r" } },
1517 { INDEX_op_st8_i32, { "rZ", "r" } },
1518 { INDEX_op_st16_i32, { "rZ", "r" } },
1519 { INDEX_op_st_i32, { "rZ", "r" } },
1521 { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1522 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
1523 { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1524 { INDEX_op_or_i32, { "r", "rZ", "rO" } },
1525 { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
1526 /* Note that the second argument will be inverted, which means
1527 we want a constant whose inversion matches M, and that O = ~M.
1528 See the implementation of and_mask_p. */
1529 { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
1531 { INDEX_op_mul_i32, { "r", "r", "r" } },
1532 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1534 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1535 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1536 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1537 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1538 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1540 { INDEX_op_bswap16_i32, { "r", "r" } },
1541 { INDEX_op_bswap32_i32, { "r", "r" } },
1542 { INDEX_op_neg_i32, { "r", "r" } },
1543 { INDEX_op_not_i32, { "r", "r" } },
1545 { INDEX_op_ext8s_i32, { "r", "r" } },
1546 { INDEX_op_ext8u_i32, { "r", "r" } },
1547 { INDEX_op_ext16s_i32, { "r", "r" } },
1548 { INDEX_op_ext16u_i32, { "r", "r" } },
1550 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1551 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1553 { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1554 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1556 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1557 { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1559 #if TARGET_LONG_BITS == 32
1560 { INDEX_op_qemu_ld8u, { "r", "L" } },
1561 { INDEX_op_qemu_ld8s, { "r", "L" } },
1562 { INDEX_op_qemu_ld16u, { "r", "L" } },
1563 { INDEX_op_qemu_ld16s, { "r", "L" } },
1564 { INDEX_op_qemu_ld32, { "r", "L" } },
1565 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1567 { INDEX_op_qemu_st8, { "LZ", "L" } },
1568 { INDEX_op_qemu_st16, { "LZ", "L" } },
1569 { INDEX_op_qemu_st32, { "LZ", "L" } },
1570 { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
1571 #else
1572 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1573 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1574 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1575 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1576 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1577 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1579 { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1580 { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1581 { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1582 { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
1583 #endif
1584 { -1 },
1587 static int tcg_target_callee_save_regs[] = {
1588 /* R2, the return address register, is saved specially
1589 in the caller's frame. */
1590 /* R3, the frame pointer, is not currently modified. */
1591 TCG_REG_R4,
1592 TCG_REG_R5,
1593 TCG_REG_R6,
1594 TCG_REG_R7,
1595 TCG_REG_R8,
1596 TCG_REG_R9,
1597 TCG_REG_R10,
1598 TCG_REG_R11,
1599 TCG_REG_R12,
1600 TCG_REG_R13,
1601 TCG_REG_R14,
1602 TCG_REG_R15,
1603 TCG_REG_R16,
1604 /* R17 is the global env, so no need to save. */
1605 TCG_REG_R18
1608 void tcg_target_qemu_prologue(TCGContext *s)
1610 int frame_size, i;
1612 /* Allocate space for the fixed frame marker. */
1613 frame_size = -TCG_TARGET_CALL_STACK_OFFSET;
1614 frame_size += TCG_TARGET_STATIC_CALL_ARGS_SIZE;
1616 /* Allocate space for the saved registers. */
1617 frame_size += ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1619 /* Align the allocated space. */
1620 frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
1621 & -TCG_TARGET_STACK_ALIGN);
1623 /* The return address is stored in the caller's frame. */
1624 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -20);
1626 /* Allocate stack frame, saving the first register at the same time. */
1627 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1628 TCG_REG_SP, frame_size, INSN_STWM);
1630 /* Save all callee saved registers. */
1631 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1632 tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1633 TCG_REG_SP, -frame_size + i * 4);
1636 #ifdef CONFIG_USE_GUEST_BASE
1637 /* Note that GUEST_BASE can change after the prologue is generated.
1638 To combat that, load the value from the variable instead of
1639 embedding a constant here. */
1640 tcg_out_ld(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1641 TCG_REG_R0, (tcg_target_long)&guest_base);
1642 #endif
1644 /* Jump to TB, and adjust R18 to be the return address. */
1645 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R26));
1646 tcg_out_mov(s, TCG_REG_R18, TCG_REG_R31);
1648 /* Restore callee saved registers. */
1649 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -frame_size - 20);
1650 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1651 tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1652 TCG_REG_SP, -frame_size + i * 4);
1655 /* Deallocate stack frame and return. */
1656 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1657 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1658 TCG_REG_SP, -frame_size, INSN_LDWM);
1661 void tcg_target_init(TCGContext *s)
1663 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1665 tcg_regset_clear(tcg_target_call_clobber_regs);
1666 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1667 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1668 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1669 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1670 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1671 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1672 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1673 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1674 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
1676 tcg_regset_clear(s->reserved_regs);
1677 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1678 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1679 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1680 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1681 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1682 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1683 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1684 tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
1685 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
1686 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1687 #ifdef CONFIG_USE_GUEST_BASE
1688 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1689 #endif
1691 tcg_add_target_add_op_defs(hppa_op_defs);