versatile_pci: convert to symbolic names
[qemu.git] / tcg / hppa / tcg-target.h
blob7ab6f0cede9806683ed54779a5e4b4fb26fc961a
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #define TCG_TARGET_HPPA 1
27 #if defined(_PA_RISC1_1)
28 #define TCG_TARGET_REG_BITS 32
29 #else
30 #error unsupported
31 #endif
33 #define TCG_TARGET_WORDS_BIGENDIAN
35 #define TCG_TARGET_NB_REGS 32
37 enum {
38 TCG_REG_R0 = 0,
39 TCG_REG_R1,
40 TCG_REG_RP,
41 TCG_REG_R3,
42 TCG_REG_R4,
43 TCG_REG_R5,
44 TCG_REG_R6,
45 TCG_REG_R7,
46 TCG_REG_R8,
47 TCG_REG_R9,
48 TCG_REG_R10,
49 TCG_REG_R11,
50 TCG_REG_R12,
51 TCG_REG_R13,
52 TCG_REG_R14,
53 TCG_REG_R15,
54 TCG_REG_R16,
55 TCG_REG_R17,
56 TCG_REG_R18,
57 TCG_REG_R19,
58 TCG_REG_R20,
59 TCG_REG_R21,
60 TCG_REG_R22,
61 TCG_REG_R23,
62 TCG_REG_R24,
63 TCG_REG_R25,
64 TCG_REG_R26,
65 TCG_REG_DP,
66 TCG_REG_RET0,
67 TCG_REG_RET1,
68 TCG_REG_SP,
69 TCG_REG_R31,
72 /* used for function call generation */
73 #define TCG_REG_CALL_STACK TCG_REG_SP
74 #define TCG_TARGET_STACK_ALIGN 16
75 #define TCG_TARGET_STACK_GROWSUP
77 /* optional instructions */
78 //#define TCG_TARGET_HAS_ext8s_i32
79 //#define TCG_TARGET_HAS_ext16s_i32
80 //#define TCG_TARGET_HAS_bswap16_i32
81 //#define TCG_TARGET_HAS_bswap32_i32
83 /* Note: must be synced with dyngen-exec.h */
84 #define TCG_AREG0 TCG_REG_R17
85 #define TCG_AREG1 TCG_REG_R14
86 #define TCG_AREG2 TCG_REG_R15
88 static inline void flush_icache_range(unsigned long start, unsigned long stop)
90 start &= ~31;
91 while (start <= stop)
93 asm volatile ("fdc 0(%0)\n"
94 "sync\n"
95 "fic 0(%%sr4, %0)\n"
96 "sync\n"
97 : : "r"(start) : "memory");
98 start += 32;
102 /* supplied by libgcc */
103 extern void *__canonicalize_funcptr_for_compare(void *);
105 /* Field selection types defined by hppa */
106 #define rnd(x) (((x)+0x1000)&~0x1fff)
107 /* lsel: select left 21 bits */
108 #define lsel(v,a) (((v)+(a))>>11)
109 /* rsel: select right 11 bits */
110 #define rsel(v,a) (((v)+(a))&0x7ff)
111 /* lrsel with rounding of addend to nearest 8k */
112 #define lrsel(v,a) (((v)+rnd(a))>>11)
113 /* rrsel with rounding of addend to nearest 8k */
114 #define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
116 #define mask(x,sz) ((x) & ~((1<<(sz))-1))
118 static inline int reassemble_12(int as12)
120 return (((as12 & 0x800) >> 11) |
121 ((as12 & 0x400) >> 8) |
122 ((as12 & 0x3ff) << 3));
125 static inline int reassemble_14(int as14)
127 return (((as14 & 0x1fff) << 1) |
128 ((as14 & 0x2000) >> 13));
131 static inline int reassemble_17(int as17)
133 return (((as17 & 0x10000) >> 16) |
134 ((as17 & 0x0f800) << 5) |
135 ((as17 & 0x00400) >> 8) |
136 ((as17 & 0x003ff) << 3));
139 static inline int reassemble_21(int as21)
141 return (((as21 & 0x100000) >> 20) |
142 ((as21 & 0x0ffe00) >> 8) |
143 ((as21 & 0x000180) << 7) |
144 ((as21 & 0x00007c) << 14) |
145 ((as21 & 0x000003) << 12));
148 static inline void hppa_patch21l(uint32_t *insn, int val, int addend)
150 val = lrsel(val, addend);
151 *insn = mask(*insn, 21) | reassemble_21(val);
154 static inline void hppa_patch14r(uint32_t *insn, int val, int addend)
156 val = rrsel(val, addend);
157 *insn = mask(*insn, 14) | reassemble_14(val);
160 static inline void hppa_patch17r(uint32_t *insn, int val, int addend)
162 val = rrsel(val, addend);
163 *insn = (*insn & ~0x1f1ffd) | reassemble_17(val);
167 static inline void hppa_patch21l_dprel(uint32_t *insn, int val, int addend)
169 register unsigned int dp asm("r27");
170 hppa_patch21l(insn, val - dp, addend);
173 static inline void hppa_patch14r_dprel(uint32_t *insn, int val, int addend)
175 register unsigned int dp asm("r27");
176 hppa_patch14r(insn, val - dp, addend);
179 static inline void hppa_patch17f(uint32_t *insn, int val, int addend)
181 int dot = (int)insn & ~0x3;
182 int v = ((val + addend) - dot - 8) / 4;
183 if (v > (1 << 16) || v < -(1 << 16)) {
184 printf("cannot fit branch to offset %d [%08x->%08x]\n", v, dot, val);
185 abort();
187 *insn = (*insn & ~0x1f1ffd) | reassemble_17(v);
190 static inline void hppa_load_imm21l(uint32_t *insn, int val, int addend)
192 /* Transform addil L'sym(%dp) to ldil L'val, %r1 */
193 *insn = 0x20200000 | reassemble_21(lrsel(val, 0));
196 static inline void hppa_load_imm14r(uint32_t *insn, int val, int addend)
198 /* Transform ldw R'sym(%r1), %rN to ldo R'sym(%r1), %rN */
199 hppa_patch14r(insn, val, addend);
200 /* HACK */
201 if (addend == 0)
202 *insn = (*insn & ~0xfc000000) | (0x0d << 26);