2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
36 int __op_param1
, __op_param2
, __op_param3
;
37 #if defined(__sparc__) || defined(__arm__)
38 void __op_gen_label1(){}
39 void __op_gen_label2(){}
40 void __op_gen_label3(){}
42 int __op_gen_label1
, __op_gen_label2
, __op_gen_label3
;
44 int __op_jmp0
, __op_jmp1
, __op_jmp2
, __op_jmp3
;
48 static inline void flush_icache_range(unsigned long start
, unsigned long stop
)
51 #elif defined(__ia64__)
52 static inline void flush_icache_range(unsigned long start
, unsigned long stop
)
54 while (start
< stop
) {
55 asm volatile ("fc %0" :: "r"(start
));
58 asm volatile (";;sync.i;;srlz.i;;");
60 #elif defined(__powerpc__)
62 #define MIN_CACHE_LINE_SIZE 8 /* conservative value */
64 static inline void flush_icache_range(unsigned long start
, unsigned long stop
)
68 start
&= ~(MIN_CACHE_LINE_SIZE
- 1);
69 stop
= (stop
+ MIN_CACHE_LINE_SIZE
- 1) & ~(MIN_CACHE_LINE_SIZE
- 1);
71 for (p
= start
; p
< stop
; p
+= MIN_CACHE_LINE_SIZE
) {
72 asm volatile ("dcbst 0,%0" : : "r"(p
) : "memory");
74 asm volatile ("sync" : : : "memory");
75 for (p
= start
; p
< stop
; p
+= MIN_CACHE_LINE_SIZE
) {
76 asm volatile ("icbi 0,%0" : : "r"(p
) : "memory");
78 asm volatile ("sync" : : : "memory");
79 asm volatile ("isync" : : : "memory");
81 #elif defined(__alpha__)
82 static inline void flush_icache_range(unsigned long start
, unsigned long stop
)
86 #elif defined(__sparc__)
87 static inline void flush_icache_range(unsigned long start
, unsigned long stop
)
91 p
= start
& ~(8UL - 1UL);
92 stop
= (stop
+ (8UL - 1UL)) & ~(8UL - 1UL);
94 for (; p
< stop
; p
+= 8)
95 __asm__
__volatile__("flush\t%0" : : "r" (p
));
97 #elif defined(__arm__)
98 static inline void flush_icache_range(unsigned long start
, unsigned long stop
)
100 register unsigned long _beg
__asm ("a1") = start
;
101 register unsigned long _end
__asm ("a2") = stop
;
102 register unsigned long _flg
__asm ("a3") = 0;
103 __asm
__volatile__ ("swi 0x9f0002" : : "r" (_beg
), "r" (_end
), "r" (_flg
));
105 #elif defined(__mc68000)
107 # include <asm/cachectl.h>
108 static inline void flush_icache_range(unsigned long start
, unsigned long stop
)
110 cacheflush(start
,FLUSH_SCOPE_LINE
,FLUSH_CACHE_BOTH
,stop
-start
+16);
112 #elif defined(__mips__)
114 #include <sys/cachectl.h>
115 static inline void flush_icache_range(unsigned long start
, unsigned long stop
)
117 _flush_cache ((void *)start
, stop
- start
, BCACHE
);
120 #error unsupported CPU
125 register int gp
asm("$29");
127 static inline void immediate_ldah(void *p
, int val
) {
129 long high
= ((val
>> 16) + ((val
>> 15) & 1)) & 0xffff;
135 static inline void immediate_lda(void *dest
, int val
) {
136 *(uint16_t *) dest
= val
;
138 void fix_bsr(void *p
, int offset
) {
140 *dest
&= ~((1 << 21) - 1);
141 *dest
|= (offset
>> 2) & ((1 << 21) - 1);
144 #endif /* __alpha__ */
148 /* Patch instruction with "val" where "mask" has 1 bits. */
149 static inline void ia64_patch (uint64_t insn_addr
, uint64_t mask
, uint64_t val
)
151 uint64_t m0
, m1
, v0
, v1
, b0
, b1
, *b
= (uint64_t *) (insn_addr
& -16);
152 # define insn_mask ((1UL << 41) - 1)
155 b0
= b
[0]; b1
= b
[1];
156 shift
= 5 + 41 * (insn_addr
% 16); /* 5 template, 3 x 41-bit insns */
158 m1
= mask
<< (shift
- 64);
159 v1
= val
<< (shift
- 64);
161 m0
= mask
<< shift
; m1
= mask
>> (64 - shift
);
162 v0
= val
<< shift
; v1
= val
>> (64 - shift
);
163 b
[0] = (b0
& ~m0
) | (v0
& m0
);
165 b
[1] = (b1
& ~m1
) | (v1
& m1
);
168 static inline void ia64_patch_imm60 (uint64_t insn_addr
, uint64_t val
)
170 ia64_patch(insn_addr
,
172 ( ((val
& 0x0800000000000000UL
) >> 23) /* bit 59 -> 36 */
173 | ((val
& 0x00000000000fffffUL
) << 13) /* bit 0 -> 13 */));
174 ia64_patch(insn_addr
- 1, 0x1fffffffffcUL
, val
>> 18);
177 static inline void ia64_imm64 (void *insn
, uint64_t val
)
179 /* Ignore the slot number of the relocation; GCC and Intel
180 toolchains differed for some time on whether IMM64 relocs are
181 against slot 1 (Intel) or slot 2 (GCC). */
182 uint64_t insn_addr
= (uint64_t) insn
& ~3UL;
184 ia64_patch(insn_addr
+ 2,
186 ( ((val
& 0x8000000000000000UL
) >> 27) /* bit 63 -> 36 */
187 | ((val
& 0x0000000000200000UL
) << 0) /* bit 21 -> 21 */
188 | ((val
& 0x00000000001f0000UL
) << 6) /* bit 16 -> 22 */
189 | ((val
& 0x000000000000ff80UL
) << 20) /* bit 7 -> 27 */
190 | ((val
& 0x000000000000007fUL
) << 13) /* bit 0 -> 13 */)
192 ia64_patch(insn_addr
+ 1, 0x1ffffffffffUL
, val
>> 22);
195 static inline void ia64_imm60b (void *insn
, uint64_t val
)
197 /* Ignore the slot number of the relocation; GCC and Intel
198 toolchains differed for some time on whether IMM64 relocs are
199 against slot 1 (Intel) or slot 2 (GCC). */
200 uint64_t insn_addr
= (uint64_t) insn
& ~3UL;
202 if (val
+ ((uint64_t) 1 << 59) >= (1UL << 60))
203 fprintf(stderr
, "%s: value %ld out of IMM60 range\n",
204 __FUNCTION__
, (int64_t) val
);
205 ia64_patch_imm60(insn_addr
+ 2, val
);
208 static inline void ia64_imm22 (void *insn
, uint64_t val
)
210 if (val
+ (1 << 21) >= (1 << 22))
211 fprintf(stderr
, "%s: value %li out of IMM22 range\n",
212 __FUNCTION__
, (int64_t)val
);
213 ia64_patch((uint64_t) insn
, 0x01fffcfe000UL
,
214 ( ((val
& 0x200000UL
) << 15) /* bit 21 -> 36 */
215 | ((val
& 0x1f0000UL
) << 6) /* bit 16 -> 22 */
216 | ((val
& 0x00ff80UL
) << 20) /* bit 7 -> 27 */
217 | ((val
& 0x00007fUL
) << 13) /* bit 0 -> 13 */));
220 /* Like ia64_imm22(), but also clear bits 20-21. For addl, this has
221 the effect of turning "addl rX=imm22,rY" into "addl
223 static inline void ia64_imm22_r0 (void *insn
, uint64_t val
)
225 if (val
+ (1 << 21) >= (1 << 22))
226 fprintf(stderr
, "%s: value %li out of IMM22 range\n",
227 __FUNCTION__
, (int64_t)val
);
228 ia64_patch((uint64_t) insn
, 0x01fffcfe000UL
| (0x3UL
<< 20),
229 ( ((val
& 0x200000UL
) << 15) /* bit 21 -> 36 */
230 | ((val
& 0x1f0000UL
) << 6) /* bit 16 -> 22 */
231 | ((val
& 0x00ff80UL
) << 20) /* bit 7 -> 27 */
232 | ((val
& 0x00007fUL
) << 13) /* bit 0 -> 13 */));
235 static inline void ia64_imm21b (void *insn
, uint64_t val
)
237 if (val
+ (1 << 20) >= (1 << 21))
238 fprintf(stderr
, "%s: value %li out of IMM21b range\n",
239 __FUNCTION__
, (int64_t)val
);
240 ia64_patch((uint64_t) insn
, 0x11ffffe000UL
,
241 ( ((val
& 0x100000UL
) << 16) /* bit 20 -> 36 */
242 | ((val
& 0x0fffffUL
) << 13) /* bit 0 -> 13 */));
245 static inline void ia64_nop_b (void *insn
)
247 ia64_patch((uint64_t) insn
, (1UL << 41) - 1, 2UL << 37);
250 static inline void ia64_ldxmov(void *insn
, uint64_t val
)
252 if (val
+ (1 << 21) < (1 << 22))
253 ia64_patch((uint64_t) insn
, 0x1fff80fe000UL
, 8UL << 37);
256 static inline int ia64_patch_ltoff(void *insn
, uint64_t val
,
259 if (relaxable
&& (val
+ (1 << 21) < (1 << 22))) {
260 ia64_imm22_r0(insn
, val
);
267 struct ia64_fixup
*next
;
268 void *addr
; /* address that needs to be patched */
272 #define IA64_PLT(insn, plt_index) \
274 struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
275 fixup->next = plt_fixes; \
277 fixup->addr = (insn); \
278 fixup->value = (plt_index); \
279 plt_offset[(plt_index)] = 1; \
282 #define IA64_LTOFF(insn, val, relaxable) \
284 if (ia64_patch_ltoff(insn, val, relaxable)) { \
285 struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
286 fixup->next = ltoff_fixes; \
287 ltoff_fixes = fixup; \
288 fixup->addr = (insn); \
289 fixup->value = (val); \
293 static inline void ia64_apply_fixes (uint8_t **gen_code_pp
,
294 struct ia64_fixup
*ltoff_fixes
,
296 struct ia64_fixup
*plt_fixes
,
298 unsigned long *plt_target
,
299 unsigned int *plt_offset
)
301 static const uint8_t plt_bundle
[] = {
302 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; movl r1=GP */
303 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60,
305 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; brl IP */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0
308 uint8_t *gen_code_ptr
= *gen_code_pp
, *plt_start
, *got_start
;
310 struct ia64_fixup
*fixup
;
311 unsigned int offset
= 0;
319 plt_start
= gen_code_ptr
;
321 for (i
= 0; i
< num_plts
; ++i
) {
323 plt_offset
[i
] = offset
;
324 offset
+= sizeof(plt_bundle
);
326 fdesc
= (struct fdesc
*) plt_target
[i
];
327 memcpy(gen_code_ptr
, plt_bundle
, sizeof(plt_bundle
));
328 ia64_imm64 (gen_code_ptr
+ 0x02, fdesc
->gp
);
329 ia64_imm60b(gen_code_ptr
+ 0x12,
330 (fdesc
->ip
- (long) (gen_code_ptr
+ 0x10)) >> 4);
331 gen_code_ptr
+= sizeof(plt_bundle
);
335 for (fixup
= plt_fixes
; fixup
; fixup
= fixup
->next
)
336 ia64_imm21b(fixup
->addr
,
337 ((long) plt_start
+ plt_offset
[fixup
->value
]
338 - ((long) fixup
->addr
& ~0xf)) >> 4);
341 got_start
= gen_code_ptr
;
343 /* First, create the GOT: */
344 for (fixup
= ltoff_fixes
; fixup
; fixup
= fixup
->next
) {
345 /* first check if we already have this value in the GOT: */
346 for (vp
= (uint64_t *) got_start
; vp
< (uint64_t *) gen_code_ptr
; ++vp
)
347 if (*vp
== fixup
->value
)
349 if (vp
== (uint64_t *) gen_code_ptr
) {
350 /* Nope, we need to put the value in the GOT: */
354 ia64_imm22(fixup
->addr
, (long) vp
- gp
);
356 /* Keep code ptr aligned. */
357 if ((long) gen_code_ptr
& 15)
359 *gen_code_pp
= gen_code_ptr
;
364 #ifdef CONFIG_DYNGEN_OP
367 struct hppa_branch_stub
{
370 struct hppa_branch_stub
*next
;
373 #define HPPA_RECORD_BRANCH(LIST, LOC, TARGET) \
375 struct hppa_branch_stub *stub = alloca(sizeof(struct hppa_branch_stub)); \
376 stub->location = LOC; \
377 stub->target = TARGET; \
382 static inline void hppa_process_stubs(struct hppa_branch_stub
*stub
,
383 uint8_t **gen_code_pp
)
385 uint32_t *s
= (uint32_t *)*gen_code_pp
;
390 for (; stub
!= NULL
; stub
= stub
->next
) {
391 unsigned long l
= (unsigned long)p
;
394 * be,n R'target(%sr4,%r1)
396 *p
++ = 0x20200000 | reassemble_21(lrsel(stub
->target
, 0));
397 *p
++ = 0xe0202002 | (reassemble_17(rrsel(stub
->target
, 0) >> 2));
398 hppa_patch17f(stub
->location
, l
, 0);
401 *s
= 0xe8000002 | reassemble_17((p
- s
) - 2);
402 *gen_code_pp
= (uint8_t *)p
;
404 #endif /* __hppa__ */
406 const TCGArg
*dyngen_op(TCGContext
*s
, int opc
, const TCGArg
*opparam_ptr
)
408 uint8_t *gen_code_ptr
;
411 struct hppa_branch_stub
*hppa_stubs
= NULL
;
414 gen_code_ptr
= s
->code_ptr
;
417 /* op.h is dynamically generated by dyngen.c from op.c */
425 hppa_process_stubs(hppa_stubs
, &gen_code_ptr
);
428 s
->code_ptr
= gen_code_ptr
;