Fix include SHT_NOTE sections everywhere
[tinycc/self_contained.git] / arm64-gen.c
blob4ce46e6d0d17d970835e852e7a4394e3246cafb0
1 /*
2 * A64 code generator for TCC
4 * Copyright (c) 2014-2015 Edmund Grimley Evans
6 * Copying and distribution of this file, with or without modification,
7 * are permitted in any medium without royalty provided the copyright
8 * notice and this notice are preserved. This file is offered as-is,
9 * without any warranty.
12 #ifdef TARGET_DEFS_ONLY
14 // Number of registers available to allocator:
15 #define NB_REGS 28 // x0-x18, x30, v0-v7
17 #define TREG_R(x) (x) // x = 0..18
18 #define TREG_R30 19
19 #define TREG_F(x) (x + 20) // x = 0..7
21 // Register classes sorted from more general to more precise:
22 #define RC_INT (1 << 0)
23 #define RC_FLOAT (1 << 1)
24 #define RC_R(x) (1 << (2 + (x))) // x = 0..18
25 #define RC_R30 (1 << 21)
26 #define RC_F(x) (1 << (22 + (x))) // x = 0..7
28 #define RC_IRET (RC_R(0)) // int return register class
29 #define RC_FRET (RC_F(0)) // float return register class
31 #define REG_IRET (TREG_R(0)) // int return register number
32 #define REG_FRET (TREG_F(0)) // float return register number
34 #define PTR_SIZE 8
36 #define LDOUBLE_SIZE 16
37 #define LDOUBLE_ALIGN 16
39 #define MAX_ALIGN 16
41 #define CHAR_IS_UNSIGNED
43 /* define if return values need to be extended explicitely
44 at caller side (for interfacing with non-TCC compilers) */
45 #define PROMOTE_RET
46 /******************************************************/
47 #else /* ! TARGET_DEFS_ONLY */
48 /******************************************************/
49 #define USING_GLOBALS
50 #include "tcc.h"
51 #include <assert.h>
53 ST_DATA const int reg_classes[NB_REGS] = {
54 RC_INT | RC_R(0),
55 RC_INT | RC_R(1),
56 RC_INT | RC_R(2),
57 RC_INT | RC_R(3),
58 RC_INT | RC_R(4),
59 RC_INT | RC_R(5),
60 RC_INT | RC_R(6),
61 RC_INT | RC_R(7),
62 RC_INT | RC_R(8),
63 RC_INT | RC_R(9),
64 RC_INT | RC_R(10),
65 RC_INT | RC_R(11),
66 RC_INT | RC_R(12),
67 RC_INT | RC_R(13),
68 RC_INT | RC_R(14),
69 RC_INT | RC_R(15),
70 RC_INT | RC_R(16),
71 RC_INT | RC_R(17),
72 RC_INT | RC_R(18),
73 RC_R30, // not in RC_INT as we make special use of x30
74 RC_FLOAT | RC_F(0),
75 RC_FLOAT | RC_F(1),
76 RC_FLOAT | RC_F(2),
77 RC_FLOAT | RC_F(3),
78 RC_FLOAT | RC_F(4),
79 RC_FLOAT | RC_F(5),
80 RC_FLOAT | RC_F(6),
81 RC_FLOAT | RC_F(7)
84 #if defined(CONFIG_TCC_BCHECK)
85 static addr_t func_bound_offset;
86 static unsigned long func_bound_ind;
87 ST_DATA int func_bound_add_epilog;
88 #endif
90 #define IS_FREG(x) ((x) >= TREG_F(0))
92 static uint32_t intr(int r)
94 assert(TREG_R(0) <= r && r <= TREG_R30);
95 return r < TREG_R30 ? r : 30;
98 static uint32_t fltr(int r)
100 assert(TREG_F(0) <= r && r <= TREG_F(7));
101 return r - TREG_F(0);
104 // Add an instruction to text section:
105 ST_FUNC void o(unsigned int c)
107 int ind1 = ind + 4;
108 if (nocode_wanted)
109 return;
110 if (ind1 > cur_text_section->data_allocated)
111 section_realloc(cur_text_section, ind1);
112 write32le(cur_text_section->data + ind, c);
113 ind = ind1;
116 static int arm64_encode_bimm64(uint64_t x)
118 int neg = x & 1;
119 int rep, pos, len;
121 if (neg)
122 x = ~x;
123 if (!x)
124 return -1;
126 if (x >> 2 == (x & (((uint64_t)1 << (64 - 2)) - 1)))
127 rep = 2, x &= ((uint64_t)1 << 2) - 1;
128 else if (x >> 4 == (x & (((uint64_t)1 << (64 - 4)) - 1)))
129 rep = 4, x &= ((uint64_t)1 << 4) - 1;
130 else if (x >> 8 == (x & (((uint64_t)1 << (64 - 8)) - 1)))
131 rep = 8, x &= ((uint64_t)1 << 8) - 1;
132 else if (x >> 16 == (x & (((uint64_t)1 << (64 - 16)) - 1)))
133 rep = 16, x &= ((uint64_t)1 << 16) - 1;
134 else if (x >> 32 == (x & (((uint64_t)1 << (64 - 32)) - 1)))
135 rep = 32, x &= ((uint64_t)1 << 32) - 1;
136 else
137 rep = 64;
139 pos = 0;
140 if (!(x & (((uint64_t)1 << 32) - 1))) x >>= 32, pos += 32;
141 if (!(x & (((uint64_t)1 << 16) - 1))) x >>= 16, pos += 16;
142 if (!(x & (((uint64_t)1 << 8) - 1))) x >>= 8, pos += 8;
143 if (!(x & (((uint64_t)1 << 4) - 1))) x >>= 4, pos += 4;
144 if (!(x & (((uint64_t)1 << 2) - 1))) x >>= 2, pos += 2;
145 if (!(x & (((uint64_t)1 << 1) - 1))) x >>= 1, pos += 1;
147 len = 0;
148 if (!(~x & (((uint64_t)1 << 32) - 1))) x >>= 32, len += 32;
149 if (!(~x & (((uint64_t)1 << 16) - 1))) x >>= 16, len += 16;
150 if (!(~x & (((uint64_t)1 << 8) - 1))) x >>= 8, len += 8;
151 if (!(~x & (((uint64_t)1 << 4) - 1))) x >>= 4, len += 4;
152 if (!(~x & (((uint64_t)1 << 2) - 1))) x >>= 2, len += 2;
153 if (!(~x & (((uint64_t)1 << 1) - 1))) x >>= 1, len += 1;
155 if (x)
156 return -1;
157 if (neg) {
158 pos = (pos + len) & (rep - 1);
159 len = rep - len;
161 return ((0x1000 & rep << 6) | (((rep - 1) ^ 31) << 1 & 63) |
162 ((rep - pos) & (rep - 1)) << 6 | (len - 1));
165 static uint32_t arm64_movi(int r, uint64_t x)
167 uint64_t m = 0xffff;
168 int e;
169 if (!(x & ~m))
170 return 0x52800000 | r | x << 5; // movz w(r),#(x)
171 if (!(x & ~(m << 16)))
172 return 0x52a00000 | r | x >> 11; // movz w(r),#(x >> 16),lsl #16
173 if (!(x & ~(m << 32)))
174 return 0xd2c00000 | r | x >> 27; // movz x(r),#(x >> 32),lsl #32
175 if (!(x & ~(m << 48)))
176 return 0xd2e00000 | r | x >> 43; // movz x(r),#(x >> 48),lsl #48
177 if ((x & ~m) == m << 16)
178 return (0x12800000 | r |
179 (~x << 5 & 0x1fffe0)); // movn w(r),#(~x)
180 if ((x & ~(m << 16)) == m)
181 return (0x12a00000 | r |
182 (~x >> 11 & 0x1fffe0)); // movn w(r),#(~x >> 16),lsl #16
183 if (!~(x | m))
184 return (0x92800000 | r |
185 (~x << 5 & 0x1fffe0)); // movn x(r),#(~x)
186 if (!~(x | m << 16))
187 return (0x92a00000 | r |
188 (~x >> 11 & 0x1fffe0)); // movn x(r),#(~x >> 16),lsl #16
189 if (!~(x | m << 32))
190 return (0x92c00000 | r |
191 (~x >> 27 & 0x1fffe0)); // movn x(r),#(~x >> 32),lsl #32
192 if (!~(x | m << 48))
193 return (0x92e00000 | r |
194 (~x >> 43 & 0x1fffe0)); // movn x(r),#(~x >> 32),lsl #32
195 if (!(x >> 32) && (e = arm64_encode_bimm64(x | x << 32)) >= 0)
196 return 0x320003e0 | r | (uint32_t)e << 10; // movi w(r),#(x)
197 if ((e = arm64_encode_bimm64(x)) >= 0)
198 return 0xb20003e0 | r | (uint32_t)e << 10; // movi x(r),#(x)
199 return 0;
202 static void arm64_movimm(int r, uint64_t x)
204 uint32_t i;
205 if ((i = arm64_movi(r, x)))
206 o(i); // a single MOV
207 else {
208 // MOVZ/MOVN and 1-3 MOVKs
209 int z = 0, m = 0;
210 uint32_t mov1 = 0xd2800000; // movz
211 uint64_t x1 = x;
212 for (i = 0; i < 64; i += 16) {
213 z += !(x >> i & 0xffff);
214 m += !(~x >> i & 0xffff);
216 if (m > z) {
217 x1 = ~x;
218 mov1 = 0x92800000; // movn
220 for (i = 0; i < 64; i += 16)
221 if (x1 >> i & 0xffff) {
222 o(mov1 | r | (x1 >> i & 0xffff) << 5 | i << 17);
223 // movz/movn x(r),#(*),lsl #(i)
224 break;
226 for (i += 16; i < 64; i += 16)
227 if (x1 >> i & 0xffff)
228 o(0xf2800000 | r | (x >> i & 0xffff) << 5 | i << 17);
229 // movk x(r),#(*),lsl #(i)
233 // Patch all branches in list pointed to by t to branch to a:
234 ST_FUNC void gsym_addr(int t_, int a_)
236 uint32_t t = t_;
237 uint32_t a = a_;
238 while (t) {
239 unsigned char *ptr = cur_text_section->data + t;
240 uint32_t next = read32le(ptr);
241 if (a - t + 0x8000000 >= 0x10000000)
242 tcc_error("branch out of range");
243 write32le(ptr, (a - t == 4 ? 0xd503201f : // nop
244 0x14000000 | ((a - t) >> 2 & 0x3ffffff))); // b
245 t = next;
249 static int arm64_type_size(int t)
252 * case values are in increasing order (from 1 to 11).
253 * which 'may' help compiler optimizers. See tcc.h
255 switch (t & VT_BTYPE) {
256 case VT_BYTE: return 0;
257 case VT_SHORT: return 1;
258 case VT_INT: return 2;
259 case VT_LLONG: return 3;
260 case VT_PTR: return 3;
261 case VT_FUNC: return 3;
262 case VT_STRUCT: return 3;
263 case VT_FLOAT: return 2;
264 case VT_DOUBLE: return 3;
265 case VT_LDOUBLE: return 4;
266 case VT_BOOL: return 0;
268 assert(0);
269 return 0;
272 static void arm64_spoff(int reg, uint64_t off)
274 uint32_t sub = off >> 63;
275 if (sub)
276 off = -off;
277 if (off < 4096)
278 o(0x910003e0 | sub << 30 | reg | off << 10);
279 // (add|sub) x(reg),sp,#(off)
280 else {
281 arm64_movimm(30, off); // use x30 for offset
282 o(0x8b3e63e0 | sub << 30 | reg); // (add|sub) x(reg),sp,x30
286 /* invert 0: return value to use for store/load */
287 /* invert 1: return value to use for arm64_sym */
288 static uint64_t arm64_check_offset(int invert, int sz_, uint64_t off)
290 uint32_t sz = sz_;
291 if (!(off & ~((uint32_t)0xfff << sz)) ||
292 (off < 256 || -off <= 256))
293 return invert ? off : 0ul;
294 else if ((off & ((uint32_t)0xfff << sz)))
295 return invert ? off & ((uint32_t)0xfff << sz)
296 : off & ~((uint32_t)0xfff << sz);
297 else if (off & 0x1ff)
298 return invert ? off & 0x1ff : off & ~0x1ff;
299 else
300 return invert ? 0ul : off;
303 static void arm64_ldrx(int sg, int sz_, int dst, int bas, uint64_t off)
305 uint32_t sz = sz_;
306 if (sz >= 2)
307 sg = 0;
308 if (!(off & ~((uint32_t)0xfff << sz)))
309 o(0x39400000 | dst | bas << 5 | off << (10 - sz) |
310 (uint32_t)!!sg << 23 | sz << 30); // ldr(*) x(dst),[x(bas),#(off)]
311 else if (off < 256 || -off <= 256)
312 o(0x38400000 | dst | bas << 5 | (off & 511) << 12 |
313 (uint32_t)!!sg << 23 | sz << 30); // ldur(*) x(dst),[x(bas),#(off)]
314 else {
315 arm64_movimm(30, off); // use x30 for offset
316 o(0x38206800 | dst | bas << 5 | (uint32_t)30 << 16 |
317 (uint32_t)(!!sg + 1) << 22 | sz << 30); // ldr(*) x(dst),[x(bas),x30]
321 static void arm64_ldrv(int sz_, int dst, int bas, uint64_t off)
323 uint32_t sz = sz_;
324 if (!(off & ~((uint32_t)0xfff << sz)))
325 o(0x3d400000 | dst | bas << 5 | off << (10 - sz) |
326 (sz & 4) << 21 | (sz & 3) << 30); // ldr (s|d|q)(dst),[x(bas),#(off)]
327 else if (off < 256 || -off <= 256)
328 o(0x3c400000 | dst | bas << 5 | (off & 511) << 12 |
329 (sz & 4) << 21 | (sz & 3) << 30); // ldur (s|d|q)(dst),[x(bas),#(off)]
330 else {
331 arm64_movimm(30, off); // use x30 for offset
332 o(0x3c606800 | dst | bas << 5 | (uint32_t)30 << 16 |
333 sz << 30 | (sz & 4) << 21); // ldr (s|d|q)(dst),[x(bas),x30]
337 static void arm64_ldrs(int reg_, int size)
339 uint32_t reg = reg_;
340 // Use x30 for intermediate value in some cases.
341 switch (size) {
342 default: assert(0); break;
343 case 0:
344 /* Can happen with zero size structs */
345 break;
346 case 1:
347 arm64_ldrx(0, 0, reg, reg, 0);
348 break;
349 case 2:
350 arm64_ldrx(0, 1, reg, reg, 0);
351 break;
352 case 3:
353 arm64_ldrx(0, 1, 30, reg, 0);
354 arm64_ldrx(0, 0, reg, reg, 2);
355 o(0x2a0043c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #16
356 break;
357 case 4:
358 arm64_ldrx(0, 2, reg, reg, 0);
359 break;
360 case 5:
361 arm64_ldrx(0, 2, 30, reg, 0);
362 arm64_ldrx(0, 0, reg, reg, 4);
363 o(0xaa0083c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #32
364 break;
365 case 6:
366 arm64_ldrx(0, 2, 30, reg, 0);
367 arm64_ldrx(0, 1, reg, reg, 4);
368 o(0xaa0083c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #32
369 break;
370 case 7:
371 arm64_ldrx(0, 2, 30, reg, 0);
372 arm64_ldrx(0, 2, reg, reg, 3);
373 o(0x53087c00 | reg | reg << 5); // lsr w(reg), w(reg), #8
374 o(0xaa0083c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #32
375 break;
376 case 8:
377 arm64_ldrx(0, 3, reg, reg, 0);
378 break;
379 case 9:
380 arm64_ldrx(0, 0, reg + 1, reg, 8);
381 arm64_ldrx(0, 3, reg, reg, 0);
382 break;
383 case 10:
384 arm64_ldrx(0, 1, reg + 1, reg, 8);
385 arm64_ldrx(0, 3, reg, reg, 0);
386 break;
387 case 11:
388 arm64_ldrx(0, 2, reg + 1, reg, 7);
389 o(0x53087c00 | (reg+1) | (reg+1) << 5); // lsr w(reg+1), w(reg+1), #8
390 arm64_ldrx(0, 3, reg, reg, 0);
391 break;
392 case 12:
393 arm64_ldrx(0, 2, reg + 1, reg, 8);
394 arm64_ldrx(0, 3, reg, reg, 0);
395 break;
396 case 13:
397 arm64_ldrx(0, 3, reg + 1, reg, 5);
398 o(0xd358fc00 | (reg+1) | (reg+1) << 5); // lsr x(reg+1), x(reg+1), #24
399 arm64_ldrx(0, 3, reg, reg, 0);
400 break;
401 case 14:
402 arm64_ldrx(0, 3, reg + 1, reg, 6);
403 o(0xd350fc00 | (reg+1) | (reg+1) << 5); // lsr x(reg+1), x(reg+1), #16
404 arm64_ldrx(0, 3, reg, reg, 0);
405 break;
406 case 15:
407 arm64_ldrx(0, 3, reg + 1, reg, 7);
408 o(0xd348fc00 | (reg+1) | (reg+1) << 5); // lsr x(reg+1), x(reg+1), #8
409 arm64_ldrx(0, 3, reg, reg, 0);
410 break;
411 case 16:
412 o(0xa9400000 | reg | (reg+1) << 10 | reg << 5);
413 // ldp x(reg),x(reg+1),[x(reg)]
414 break;
418 static void arm64_strx(int sz_, int dst, int bas, uint64_t off)
420 uint32_t sz = sz_;
421 if (!(off & ~((uint32_t)0xfff << sz)))
422 o(0x39000000 | dst | bas << 5 | off << (10 - sz) | sz << 30);
423 // str(*) x(dst),[x(bas],#(off)]
424 else if (off < 256 || -off <= 256)
425 o(0x38000000 | dst | bas << 5 | (off & 511) << 12 | sz << 30);
426 // stur(*) x(dst),[x(bas],#(off)]
427 else {
428 arm64_movimm(30, off); // use x30 for offset
429 o(0x38206800 | dst | bas << 5 | (uint32_t)30 << 16 | sz << 30);
430 // str(*) x(dst),[x(bas),x30]
434 static void arm64_strv(int sz_, int dst, int bas, uint64_t off)
436 uint32_t sz = sz_;
437 if (!(off & ~((uint32_t)0xfff << sz)))
438 o(0x3d000000 | dst | bas << 5 | off << (10 - sz) |
439 (sz & 4) << 21 | (sz & 3) << 30); // str (s|d|q)(dst),[x(bas),#(off)]
440 else if (off < 256 || -off <= 256)
441 o(0x3c000000 | dst | bas << 5 | (off & 511) << 12 |
442 (sz & 4) << 21 | (sz & 3) << 30); // stur (s|d|q)(dst),[x(bas),#(off)]
443 else {
444 arm64_movimm(30, off); // use x30 for offset
445 o(0x3c206800 | dst | bas << 5 | (uint32_t)30 << 16 |
446 sz << 30 | (sz & 4) << 21); // str (s|d|q)(dst),[x(bas),x30]
450 static void arm64_sym(int r, Sym *sym, unsigned long addend)
452 greloca(cur_text_section, sym, ind, R_AARCH64_ADR_GOT_PAGE, 0);
453 o(0x90000000 | r); // adrp xr, #sym
454 greloca(cur_text_section, sym, ind, R_AARCH64_LD64_GOT_LO12_NC, 0);
455 o(0xf9400000 | r | (r << 5)); // ld xr,[xr, #sym]
456 if (addend) {
457 // add xr, xr, #addend
458 if (addend & 0xffful)
459 o(0x91000000 | r | r << 5 | (addend & 0xfff) << 10);
460 if (addend > 0xffful) {
461 // add xr, xr, #addend, lsl #12
462 if (addend & 0xfff000ul)
463 o(0x91400000 | r | r << 5 | ((addend >> 12) & 0xfff) << 10);
464 if (addend > 0xfffffful) {
465 /* very unlikely */
466 int t = r ? 0 : 1;
467 o(0xf81f0fe0 | t); /* str xt, [sp, #-16]! */
468 arm64_movimm(t, addend & ~0xfffffful); // use xt for addent
469 o(0x91000000 | r | (t << 5)); /* add xr, xt, #0 */
470 o(0xf84107e0 | t); /* ldr xt, [sp], #16 */
476 static void arm64_load_cmp(int r, SValue *sv);
478 ST_FUNC void load(int r, SValue *sv)
480 int svtt = sv->type.t;
481 int svr = sv->r & ~VT_BOUNDED;
482 int svrv = svr & VT_VALMASK;
483 uint64_t svcul = (uint32_t)sv->c.i;
484 svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
486 if (svr == (VT_LOCAL | VT_LVAL)) {
487 if (IS_FREG(r))
488 arm64_ldrv(arm64_type_size(svtt), fltr(r), 29, svcul);
489 else
490 arm64_ldrx(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
491 intr(r), 29, svcul);
492 return;
495 if (svr == (VT_CONST | VT_LVAL)) {
496 arm64_sym(30, sv->sym, // use x30 for address
497 arm64_check_offset(0, arm64_type_size(svtt), sv->c.i));
498 if (IS_FREG(r))
499 arm64_ldrv(arm64_type_size(svtt), fltr(r), 30,
500 arm64_check_offset(1, arm64_type_size(svtt), sv->c.i));
501 else
502 arm64_ldrx(!(svtt&VT_UNSIGNED), arm64_type_size(svtt), intr(r), 30,
503 arm64_check_offset(1, arm64_type_size(svtt), sv->c.i));
504 return;
507 if ((svr & ~VT_VALMASK) == VT_LVAL && svrv < VT_CONST) {
508 if ((svtt & VT_BTYPE) != VT_VOID) {
509 if (IS_FREG(r))
510 arm64_ldrv(arm64_type_size(svtt), fltr(r), intr(svrv), 0);
511 else
512 arm64_ldrx(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
513 intr(r), intr(svrv), 0);
515 return;
518 if (svr == (VT_CONST | VT_LVAL | VT_SYM)) {
519 arm64_sym(30, sv->sym, // use x30 for address
520 arm64_check_offset(0, arm64_type_size(svtt), svcul));
521 if (IS_FREG(r))
522 arm64_ldrv(arm64_type_size(svtt), fltr(r), 30,
523 arm64_check_offset(1, arm64_type_size(svtt), svcul));
524 else
525 arm64_ldrx(!(svtt&VT_UNSIGNED), arm64_type_size(svtt), intr(r), 30,
526 arm64_check_offset(1, arm64_type_size(svtt), svcul));
527 return;
530 if (svr == (VT_CONST | VT_SYM)) {
531 arm64_sym(intr(r), sv->sym, svcul);
532 return;
535 if (svr == VT_CONST) {
536 if ((svtt & VT_BTYPE) != VT_VOID)
537 arm64_movimm(intr(r), arm64_type_size(svtt) == 3 ?
538 sv->c.i : (uint32_t)svcul);
539 return;
542 if (svr < VT_CONST) {
543 if (IS_FREG(r) && IS_FREG(svr))
544 if (svtt == VT_LDOUBLE)
545 o(0x4ea01c00 | fltr(r) | fltr(svr) << 5);
546 // mov v(r).16b,v(svr).16b
547 else
548 o(0x1e604000 | fltr(r) | fltr(svr) << 5); // fmov d(r),d(svr)
549 else if (!IS_FREG(r) && !IS_FREG(svr))
550 o(0xaa0003e0 | intr(r) | intr(svr) << 16); // mov x(r),x(svr)
551 else
552 assert(0);
553 return;
556 if (svr == VT_LOCAL) {
557 if (-svcul < 0x1000)
558 o(0xd10003a0 | intr(r) | -svcul << 10); // sub x(r),x29,#...
559 else {
560 arm64_movimm(30, -svcul); // use x30 for offset
561 o(0xcb0003a0 | intr(r) | (uint32_t)30 << 16); // sub x(r),x29,x30
563 return;
566 if (svr == VT_JMP || svr == VT_JMPI) {
567 int t = (svr == VT_JMPI);
568 arm64_movimm(intr(r), t);
569 o(0x14000002); // b .+8
570 gsym(svcul);
571 arm64_movimm(intr(r), t ^ 1);
572 return;
575 if (svr == (VT_LLOCAL | VT_LVAL)) {
576 arm64_ldrx(0, 3, 30, 29, svcul); // use x30 for offset
577 if (IS_FREG(r))
578 arm64_ldrv(arm64_type_size(svtt), fltr(r), 30, 0);
579 else
580 arm64_ldrx(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
581 intr(r), 30, 0);
582 return;
585 if (svr == VT_CMP) {
586 arm64_load_cmp(r, sv);
587 return;
590 printf("load(%x, (%x, %x, %lx))\n", r, svtt, sv->r, (long)svcul);
591 assert(0);
594 ST_FUNC void store(int r, SValue *sv)
596 int svtt = sv->type.t;
597 int svr = sv->r & ~VT_BOUNDED;
598 int svrv = svr & VT_VALMASK;
599 uint64_t svcul = (uint32_t)sv->c.i;
600 svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
602 if (svr == (VT_LOCAL | VT_LVAL)) {
603 if (IS_FREG(r))
604 arm64_strv(arm64_type_size(svtt), fltr(r), 29, svcul);
605 else
606 arm64_strx(arm64_type_size(svtt), intr(r), 29, svcul);
607 return;
610 if (svr == (VT_CONST | VT_LVAL)) {
611 arm64_sym(30, sv->sym, // use x30 for address
612 arm64_check_offset(0, arm64_type_size(svtt), sv->c.i));
613 if (IS_FREG(r))
614 arm64_strv(arm64_type_size(svtt), fltr(r), 30,
615 arm64_check_offset(1, arm64_type_size(svtt), sv->c.i));
616 else
617 arm64_strx(arm64_type_size(svtt), intr(r), 30,
618 arm64_check_offset(1, arm64_type_size(svtt), sv->c.i));
619 return;
622 if ((svr & ~VT_VALMASK) == VT_LVAL && svrv < VT_CONST) {
623 if (IS_FREG(r))
624 arm64_strv(arm64_type_size(svtt), fltr(r), intr(svrv), 0);
625 else
626 arm64_strx(arm64_type_size(svtt), intr(r), intr(svrv), 0);
627 return;
630 if (svr == (VT_CONST | VT_LVAL | VT_SYM)) {
631 arm64_sym(30, sv->sym, // use x30 for address
632 arm64_check_offset(0, arm64_type_size(svtt), svcul));
633 if (IS_FREG(r))
634 arm64_strv(arm64_type_size(svtt), fltr(r), 30,
635 arm64_check_offset(1, arm64_type_size(svtt), svcul));
636 else
637 arm64_strx(arm64_type_size(svtt), intr(r), 30,
638 arm64_check_offset(1, arm64_type_size(svtt), svcul));
639 return;
642 printf("store(%x, (%x, %x, %lx))\n", r, svtt, sv->r, (long)svcul);
643 assert(0);
646 static void arm64_gen_bl_or_b(int b)
648 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST && (vtop->r & VT_SYM)) {
649 greloca(cur_text_section, vtop->sym, ind,
650 b ? R_AARCH64_JUMP26 : R_AARCH64_CALL26, 0);
651 o(0x14000000 | (uint32_t)!b << 31); // b/bl .
653 else {
654 #ifdef CONFIG_TCC_BCHECK
655 vtop->r &= ~VT_MUSTBOUND;
656 #endif
657 o(0xd61f0000 | (uint32_t)!b << 21 | intr(gv(RC_R30)) << 5); // br/blr
661 #if defined(CONFIG_TCC_BCHECK)
663 static void gen_bounds_call(int v)
665 Sym *sym = external_helper_sym(v);
667 greloca(cur_text_section, sym, ind, R_AARCH64_CALL26, 0);
668 o(0x94000000); // bl
671 static void gen_bounds_prolog(void)
673 /* leave some room for bound checking code */
674 func_bound_offset = lbounds_section->data_offset;
675 func_bound_ind = ind;
676 func_bound_add_epilog = 0;
677 o(0xd503201f); /* nop -> mov x0, lbound section pointer */
678 o(0xd503201f);
679 o(0xd503201f);
680 o(0xd503201f); /* nop -> call __bound_local_new */
683 static void gen_bounds_epilog(void)
685 addr_t saved_ind;
686 addr_t *bounds_ptr;
687 Sym *sym_data;
688 int offset_modified = func_bound_offset != lbounds_section->data_offset;
690 if (!offset_modified && !func_bound_add_epilog)
691 return;
693 /* add end of table info */
694 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
695 *bounds_ptr = 0;
697 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
698 func_bound_offset, lbounds_section->data_offset);
700 /* generate bound local allocation */
701 if (offset_modified) {
702 saved_ind = ind;
703 ind = func_bound_ind;
704 greloca(cur_text_section, sym_data, ind, R_AARCH64_ADR_GOT_PAGE, 0);
705 o(0x90000000 | 0); // adrp x0, #sym_data
706 greloca(cur_text_section, sym_data, ind, R_AARCH64_LD64_GOT_LO12_NC, 0);
707 o(0xf9400000 | 0 | (0 << 5)); // ld x0,[x0, #sym_data]
708 gen_bounds_call(TOK___bound_local_new);
709 ind = saved_ind;
712 /* generate bound check local freeing */
713 o(0xf81f0fe0); /* str x0, [sp, #-16]! */
714 o(0x3c9f0fe0); /* str q0, [sp, #-16]! */
715 greloca(cur_text_section, sym_data, ind, R_AARCH64_ADR_GOT_PAGE, 0);
716 o(0x90000000 | 0); // adrp x0, #sym_data
717 greloca(cur_text_section, sym_data, ind, R_AARCH64_LD64_GOT_LO12_NC, 0);
718 o(0xf9400000 | 0 | (0 << 5)); // ld x0,[x0, #sym_data]
719 gen_bounds_call(TOK___bound_local_delete);
720 o(0x3cc107e0); /* ldr q0, [sp], #16 */
721 o(0xf84107e0); /* ldr x0, [sp], #16 */
723 #endif
725 static int arm64_hfa_aux(CType *type, int *fsize, int num)
727 if (is_float(type->t)) {
728 int a, n = type_size(type, &a);
729 if (num >= 4 || (*fsize && *fsize != n))
730 return -1;
731 *fsize = n;
732 return num + 1;
734 else if ((type->t & VT_BTYPE) == VT_STRUCT) {
735 int is_struct = 0; // rather than union
736 Sym *field;
737 for (field = type->ref->next; field; field = field->next)
738 if (field->c) {
739 is_struct = 1;
740 break;
742 if (is_struct) {
743 int num0 = num;
744 for (field = type->ref->next; field; field = field->next) {
745 if (field->c != (num - num0) * *fsize)
746 return -1;
747 num = arm64_hfa_aux(&field->type, fsize, num);
748 if (num == -1)
749 return -1;
751 if (type->ref->c != (num - num0) * *fsize)
752 return -1;
753 return num;
755 else { // union
756 int num0 = num;
757 for (field = type->ref->next; field; field = field->next) {
758 int num1 = arm64_hfa_aux(&field->type, fsize, num0);
759 if (num1 == -1)
760 return -1;
761 num = num1 < num ? num : num1;
763 if (type->ref->c != (num - num0) * *fsize)
764 return -1;
765 return num;
768 else if ((type->t & VT_ARRAY) && ((type->t & VT_BTYPE) != VT_PTR)) {
769 int num1;
770 if (!type->ref->c)
771 return num;
772 num1 = arm64_hfa_aux(&type->ref->type, fsize, num);
773 if (num1 == -1 || (num1 != num && type->ref->c > 4))
774 return -1;
775 num1 = num + type->ref->c * (num1 - num);
776 if (num1 > 4)
777 return -1;
778 return num1;
780 return -1;
783 static int arm64_hfa(CType *type, unsigned *fsize)
785 if ((type->t & VT_BTYPE) == VT_STRUCT ||
786 ((type->t & VT_ARRAY) && ((type->t & VT_BTYPE) != VT_PTR))) {
787 int sz = 0;
788 int n = arm64_hfa_aux(type, &sz, 0);
789 if (0 < n && n <= 4) {
790 if (fsize)
791 *fsize = sz;
792 return n;
795 return 0;
798 static unsigned long arm64_pcs_aux(int n, CType **type, unsigned long *a)
800 int nx = 0; // next integer register
801 int nv = 0; // next vector register
802 unsigned long ns = 32; // next stack offset
803 int i;
805 for (i = 0; i < n; i++) {
806 int hfa = arm64_hfa(type[i], 0);
807 int size, align;
809 if ((type[i]->t & VT_ARRAY) ||
810 (type[i]->t & VT_BTYPE) == VT_FUNC)
811 size = align = 8;
812 else
813 size = type_size(type[i], &align);
815 if (hfa)
816 // B.2
818 else if (size > 16) {
819 // B.3: replace with pointer
820 if (nx < 8)
821 a[i] = nx++ << 1 | 1;
822 else {
823 ns = (ns + 7) & ~7;
824 a[i] = ns | 1;
825 ns += 8;
827 continue;
829 else if ((type[i]->t & VT_BTYPE) == VT_STRUCT)
830 // B.4
831 size = (size + 7) & ~7;
833 // C.1
834 if (is_float(type[i]->t) && nv < 8) {
835 a[i] = 16 + (nv++ << 1);
836 continue;
839 // C.2
840 if (hfa && nv + hfa <= 8) {
841 a[i] = 16 + (nv << 1);
842 nv += hfa;
843 continue;
846 // C.3
847 if (hfa) {
848 nv = 8;
849 size = (size + 7) & ~7;
852 // C.4
853 if (hfa || (type[i]->t & VT_BTYPE) == VT_LDOUBLE) {
854 ns = (ns + 7) & ~7;
855 ns = (ns + align - 1) & -align;
858 // C.5
859 if ((type[i]->t & VT_BTYPE) == VT_FLOAT)
860 size = 8;
862 // C.6
863 if (hfa || is_float(type[i]->t)) {
864 a[i] = ns;
865 ns += size;
866 continue;
869 // C.7
870 if ((type[i]->t & VT_BTYPE) != VT_STRUCT && size <= 8 && nx < 8) {
871 a[i] = nx++ << 1;
872 continue;
875 // C.8
876 if (align == 16)
877 nx = (nx + 1) & ~1;
879 // C.9
880 if ((type[i]->t & VT_BTYPE) != VT_STRUCT && size == 16 && nx < 7) {
881 a[i] = nx << 1;
882 nx += 2;
883 continue;
886 // C.10
887 if ((type[i]->t & VT_BTYPE) == VT_STRUCT && size <= (8 - nx) * 8) {
888 a[i] = nx << 1;
889 nx += (size + 7) >> 3;
890 continue;
893 // C.11
894 nx = 8;
896 // C.12
897 ns = (ns + 7) & ~7;
898 ns = (ns + align - 1) & -align;
900 // C.13
901 if ((type[i]->t & VT_BTYPE) == VT_STRUCT) {
902 a[i] = ns;
903 ns += size;
904 continue;
907 // C.14
908 if (size < 8)
909 size = 8;
911 // C.15
912 a[i] = ns;
913 ns += size;
916 return ns - 32;
919 static unsigned long arm64_pcs(int n, CType **type, unsigned long *a)
921 unsigned long stack;
923 // Return type:
924 if ((type[0]->t & VT_BTYPE) == VT_VOID)
925 a[0] = -1;
926 else {
927 arm64_pcs_aux(1, type, a);
928 assert(a[0] == 0 || a[0] == 1 || a[0] == 16);
931 // Argument types:
932 stack = arm64_pcs_aux(n, type + 1, a + 1);
934 if (0) {
935 int i;
936 for (i = 0; i <= n; i++) {
937 if (!i)
938 printf("arm64_pcs return: ");
939 else
940 printf("arm64_pcs arg %d: ", i);
941 if (a[i] == (unsigned long)-1)
942 printf("void\n");
943 else if (a[i] == 1 && !i)
944 printf("X8 pointer\n");
945 else if (a[i] < 16)
946 printf("X%lu%s\n", a[i] / 2, a[i] & 1 ? " pointer" : "");
947 else if (a[i] < 32)
948 printf("V%lu\n", a[i] / 2 - 8);
949 else
950 printf("stack %lu%s\n",
951 (a[i] - 32) & ~1, a[i] & 1 ? " pointer" : "");
955 return stack;
958 ST_FUNC void gfunc_call(int nb_args)
960 CType *return_type;
961 CType **t;
962 unsigned long *a, *a1;
963 unsigned long stack;
964 int i;
966 #ifdef CONFIG_TCC_BCHECK
967 if (tcc_state->do_bounds_check)
968 gbound_args(nb_args);
969 #endif
971 return_type = &vtop[-nb_args].type.ref->type;
972 if ((return_type->t & VT_BTYPE) == VT_STRUCT)
973 --nb_args;
975 t = tcc_malloc((nb_args + 1) * sizeof(*t));
976 a = tcc_malloc((nb_args + 1) * sizeof(*a));
977 a1 = tcc_malloc((nb_args + 1) * sizeof(*a1));
979 t[0] = return_type;
980 for (i = 0; i < nb_args; i++)
981 t[nb_args - i] = &vtop[-i].type;
983 stack = arm64_pcs(nb_args, t, a);
985 // Allocate space for structs replaced by pointer:
986 for (i = nb_args; i; i--)
987 if (a[i] & 1) {
988 SValue *arg = &vtop[i - nb_args];
989 int align, size = type_size(&arg->type, &align);
990 assert((arg->type.t & VT_BTYPE) == VT_STRUCT);
991 stack = (stack + align - 1) & -align;
992 a1[i] = stack;
993 stack += size;
996 stack = (stack + 15) >> 4 << 4;
998 if (stack >= 0x1000000) // 16Mb
999 tcc_error("stack size too big %lu", stack);
1000 if (stack & 0xfff)
1001 o(0xd10003ff | (stack & 0xfff) << 10); // sub sp,sp,#(n)
1002 if (stack >> 12)
1003 o(0xd14003ff | (stack >> 12) << 10);
1005 // First pass: set all values on stack
1006 for (i = nb_args; i; i--) {
1007 vpushv(vtop - nb_args + i);
1009 if (a[i] & 1) {
1010 // struct replaced by pointer
1011 int r = get_reg(RC_INT);
1012 arm64_spoff(intr(r), a1[i]);
1013 vset(&vtop->type, r | VT_LVAL, 0);
1014 vswap();
1015 vstore();
1016 if (a[i] >= 32) {
1017 // pointer on stack
1018 r = get_reg(RC_INT);
1019 arm64_spoff(intr(r), a1[i]);
1020 arm64_strx(3, intr(r), 31, (a[i] - 32) >> 1 << 1);
1023 else if (a[i] >= 32) {
1024 // value on stack
1025 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
1026 int r = get_reg(RC_INT);
1027 arm64_spoff(intr(r), a[i] - 32);
1028 vset(&vtop->type, r | VT_LVAL, 0);
1029 vswap();
1030 vstore();
1032 else if (is_float(vtop->type.t)) {
1033 gv(RC_FLOAT);
1034 arm64_strv(arm64_type_size(vtop[0].type.t),
1035 fltr(vtop[0].r), 31, a[i] - 32);
1037 else {
1038 gv(RC_INT);
1039 arm64_strx(arm64_type_size(vtop[0].type.t),
1040 intr(vtop[0].r), 31, a[i] - 32);
1044 --vtop;
1047 // Second pass: assign values to registers
1048 for (i = nb_args; i; i--, vtop--) {
1049 if (a[i] < 16 && !(a[i] & 1)) {
1050 // value in general-purpose registers
1051 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
1052 int align, size = type_size(&vtop->type, &align);
1053 if (size) {
1054 vtop->type.t = VT_PTR;
1055 gaddrof();
1056 gv(RC_R(a[i] / 2));
1057 arm64_ldrs(a[i] / 2, size);
1060 else
1061 gv(RC_R(a[i] / 2));
1063 else if (a[i] < 16)
1064 // struct replaced by pointer in register
1065 arm64_spoff(a[i] / 2, a1[i]);
1066 else if (a[i] < 32) {
1067 // value in floating-point registers
1068 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
1069 uint32_t j, sz, n = arm64_hfa(&vtop->type, &sz);
1070 vtop->type.t = VT_PTR;
1071 gaddrof();
1072 gv(RC_R30);
1073 for (j = 0; j < n; j++)
1074 o(0x3d4003c0 |
1075 (sz & 16) << 19 | -(sz & 8) << 27 | (sz & 4) << 29 |
1076 (a[i] / 2 - 8 + j) |
1077 j << 10); // ldr ([sdq])(*),[x30,#(j * sz)]
1079 else
1080 gv(RC_F(a[i] / 2 - 8));
1084 if ((return_type->t & VT_BTYPE) == VT_STRUCT) {
1085 if (a[0] == 1) {
1086 // indirect return: set x8 and discard the stack value
1087 gv(RC_R(8));
1088 --vtop;
1090 else
1091 // return in registers: keep the address for after the call
1092 vswap();
1095 save_regs(0);
1096 arm64_gen_bl_or_b(0);
1097 --vtop;
1098 if (stack & 0xfff)
1099 o(0x910003ff | (stack & 0xfff) << 10); // add sp,sp,#(n)
1100 if (stack >> 12)
1101 o(0x914003ff | (stack >> 12) << 10);
1104 int rt = return_type->t;
1105 int bt = rt & VT_BTYPE;
1106 if (bt == VT_STRUCT && !(a[0] & 1)) {
1107 // A struct was returned in registers, so write it out:
1108 gv(RC_R(8));
1109 --vtop;
1110 if (a[0] == 0) {
1111 int align, size = type_size(return_type, &align);
1112 assert(size <= 16);
1113 if (size > 8)
1114 o(0xa9000500); // stp x0,x1,[x8]
1115 else if (size)
1116 arm64_strx(size > 4 ? 3 : size > 2 ? 2 : size > 1, 0, 8, 0);
1119 else if (a[0] == 16) {
1120 uint32_t j, sz, n = arm64_hfa(return_type, &sz);
1121 for (j = 0; j < n; j++)
1122 o(0x3d000100 |
1123 (sz & 16) << 19 | -(sz & 8) << 27 | (sz & 4) << 29 |
1124 (a[i] / 2 - 8 + j) |
1125 j << 10); // str ([sdq])(*),[x8,#(j * sz)]
1130 tcc_free(a1);
1131 tcc_free(a);
1132 tcc_free(t);
1135 static unsigned long arm64_func_va_list_stack;
1136 static int arm64_func_va_list_gr_offs;
1137 static int arm64_func_va_list_vr_offs;
1138 static int arm64_func_sub_sp_offset;
1140 ST_FUNC void gfunc_prolog(Sym *func_sym)
1142 CType *func_type = &func_sym->type;
1143 int n = 0;
1144 int i = 0;
1145 Sym *sym;
1146 CType **t;
1147 unsigned long *a;
1149 func_vc = 144; // offset of where x8 is stored
1151 for (sym = func_type->ref; sym; sym = sym->next)
1152 ++n;
1153 t = n ? tcc_malloc(n * sizeof(*t)) : NULL;
1154 a = n ? tcc_malloc(n * sizeof(*a)) : NULL;
1156 for (sym = func_type->ref; sym; sym = sym->next)
1157 t[i++] = &sym->type;
1159 arm64_func_va_list_stack = arm64_pcs(n - 1, t, a);
1161 o(0xa9b27bfd); // stp x29,x30,[sp,#-224]!
1162 o(0xad0087e0); // stp q0,q1,[sp,#16]
1163 o(0xad018fe2); // stp q2,q3,[sp,#48]
1164 o(0xad0297e4); // stp q4,q5,[sp,#80]
1165 o(0xad039fe6); // stp q6,q7,[sp,#112]
1166 o(0xa90923e8); // stp x8,x8,[sp,#144]
1167 o(0xa90a07e0); // stp x0,x1,[sp,#160]
1168 o(0xa90b0fe2); // stp x2,x3,[sp,#176]
1169 o(0xa90c17e4); // stp x4,x5,[sp,#192]
1170 o(0xa90d1fe6); // stp x6,x7,[sp,#208]
1172 arm64_func_va_list_gr_offs = -64;
1173 arm64_func_va_list_vr_offs = -128;
1175 for (i = 1, sym = func_type->ref->next; sym; i++, sym = sym->next) {
1176 int off = (a[i] < 16 ? 160 + a[i] / 2 * 8 :
1177 a[i] < 32 ? 16 + (a[i] - 16) / 2 * 16 :
1178 224 + ((a[i] - 32) >> 1 << 1));
1179 sym_push(sym->v & ~SYM_FIELD, &sym->type,
1180 (a[i] & 1 ? VT_LLOCAL : VT_LOCAL) | VT_LVAL,
1181 off);
1183 if (a[i] < 16) {
1184 int align, size = type_size(&sym->type, &align);
1185 arm64_func_va_list_gr_offs = (a[i] / 2 - 7 +
1186 (!(a[i] & 1) && size > 8)) * 8;
1188 else if (a[i] < 32) {
1189 uint32_t hfa = arm64_hfa(&sym->type, 0);
1190 arm64_func_va_list_vr_offs = (a[i] / 2 - 16 +
1191 (hfa ? hfa : 1)) * 16;
1194 // HFAs of float and double need to be written differently:
1195 if (16 <= a[i] && a[i] < 32 && (sym->type.t & VT_BTYPE) == VT_STRUCT) {
1196 uint32_t j, sz, k = arm64_hfa(&sym->type, &sz);
1197 if (sz < 16)
1198 for (j = 0; j < k; j++) {
1199 o(0x3d0003e0 | -(sz & 8) << 27 | (sz & 4) << 29 |
1200 ((a[i] - 16) / 2 + j) | (off / sz + j) << 10);
1201 // str ([sdq])(*),[sp,#(j * sz)]
1206 tcc_free(a);
1207 tcc_free(t);
1209 o(0x910003fd); // mov x29,sp
1210 arm64_func_sub_sp_offset = ind;
1211 // In gfunc_epilog these will be replaced with code to decrement SP:
1212 o(0xd503201f); // nop
1213 o(0xd503201f); // nop
1214 loc = 0;
1215 #ifdef CONFIG_TCC_BCHECK
1216 if (tcc_state->do_bounds_check)
1217 gen_bounds_prolog();
1218 #endif
1221 ST_FUNC void gen_va_start(void)
1223 int r;
1224 --vtop; // we don't need the "arg"
1225 gaddrof();
1226 r = intr(gv(RC_INT));
1228 if (arm64_func_va_list_stack) {
1229 //xx could use add (immediate) here
1230 arm64_movimm(30, arm64_func_va_list_stack + 224);
1231 o(0x8b1e03be); // add x30,x29,x30
1233 else
1234 o(0x910383be); // add x30,x29,#224
1235 o(0xf900001e | r << 5); // str x30,[x(r)]
1237 if (arm64_func_va_list_gr_offs) {
1238 if (arm64_func_va_list_stack)
1239 o(0x910383be); // add x30,x29,#224
1240 o(0xf900041e | r << 5); // str x30,[x(r),#8]
1243 if (arm64_func_va_list_vr_offs) {
1244 o(0x910243be); // add x30,x29,#144
1245 o(0xf900081e | r << 5); // str x30,[x(r),#16]
1248 arm64_movimm(30, arm64_func_va_list_gr_offs);
1249 o(0xb900181e | r << 5); // str w30,[x(r),#24]
1251 arm64_movimm(30, arm64_func_va_list_vr_offs);
1252 o(0xb9001c1e | r << 5); // str w30,[x(r),#28]
1254 --vtop;
1257 ST_FUNC void gen_va_arg(CType *t)
1259 int align, size = type_size(t, &align);
1260 unsigned fsize, hfa = arm64_hfa(t, &fsize);
1261 uint32_t r0, r1;
1263 if (is_float(t->t)) {
1264 hfa = 1;
1265 fsize = size;
1268 gaddrof();
1269 r0 = intr(gv(RC_INT));
1270 r1 = get_reg(RC_INT);
1271 vtop[0].r = r1 | VT_LVAL;
1272 r1 = intr(r1);
1274 if (!hfa) {
1275 uint32_t n = size > 16 ? 8 : (size + 7) & -8;
1276 o(0xb940181e | r0 << 5); // ldr w30,[x(r0),#24] // __gr_offs
1277 if (align == 16) {
1278 assert(0); // this path untested but needed for __uint128_t
1279 o(0x11003fde); // add w30,w30,#15
1280 o(0x121c6fde); // and w30,w30,#-16
1282 o(0x310003c0 | r1 | n << 10); // adds w(r1),w30,#(n)
1283 o(0x540000ad); // b.le .+20
1284 o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack
1285 o(0x9100001e | r1 << 5 | n << 10); // add x30,x(r1),#(n)
1286 o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack
1287 o(0x14000004); // b .+16
1288 o(0xb9001800 | r1 | r0 << 5); // str w(r1),[x(r0),#24] // __gr_offs
1289 o(0xf9400400 | r1 | r0 << 5); // ldr x(r1),[x(r0),#8] // __gr_top
1290 o(0x8b3ec000 | r1 | r1 << 5); // add x(r1),x(r1),w30,sxtw
1291 if (size > 16)
1292 o(0xf9400000 | r1 | r1 << 5); // ldr x(r1),[x(r1)]
1294 else {
1295 uint32_t rsz = hfa << 4;
1296 uint32_t ssz = (size + 7) & -(uint32_t)8;
1297 uint32_t b1, b2;
1298 o(0xb9401c1e | r0 << 5); // ldr w30,[x(r0),#28] // __vr_offs
1299 o(0x310003c0 | r1 | rsz << 10); // adds w(r1),w30,#(rsz)
1300 b1 = ind; o(0x5400000d); // b.le lab1
1301 o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack
1302 if (fsize == 16) {
1303 o(0x91003c00 | r1 | r1 << 5); // add x(r1),x(r1),#15
1304 o(0x927cec00 | r1 | r1 << 5); // and x(r1),x(r1),#-16
1306 o(0x9100001e | r1 << 5 | ssz << 10); // add x30,x(r1),#(ssz)
1307 o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack
1308 b2 = ind; o(0x14000000); // b lab2
1309 // lab1:
1310 write32le(cur_text_section->data + b1, 0x5400000d | (ind - b1) << 3);
1311 o(0xb9001c00 | r1 | r0 << 5); // str w(r1),[x(r0),#28] // __vr_offs
1312 o(0xf9400800 | r1 | r0 << 5); // ldr x(r1),[x(r0),#16] // __vr_top
1313 if (hfa == 1 || fsize == 16)
1314 o(0x8b3ec000 | r1 | r1 << 5); // add x(r1),x(r1),w30,sxtw
1315 else {
1316 // We need to change the layout of this HFA.
1317 // Get some space on the stack using global variable "loc":
1318 loc = (loc - size) & -(uint32_t)align;
1319 o(0x8b3ec000 | 30 | r1 << 5); // add x30,x(r1),w30,sxtw
1320 arm64_movimm(r1, loc);
1321 o(0x8b0003a0 | r1 | r1 << 16); // add x(r1),x29,x(r1)
1322 o(0x4c402bdc | (uint32_t)fsize << 7 |
1323 (uint32_t)(hfa == 2) << 15 |
1324 (uint32_t)(hfa == 3) << 14); // ld1 {v28.(4s|2d),...},[x30]
1325 o(0x0d00801c | r1 << 5 | (fsize == 8) << 10 |
1326 (uint32_t)(hfa != 2) << 13 |
1327 (uint32_t)(hfa != 3) << 21); // st(hfa) {v28.(s|d),...}[0],[x(r1)]
1329 // lab2:
1330 write32le(cur_text_section->data + b2, 0x14000000 | (ind - b2) >> 2);
1334 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret,
1335 int *align, int *regsize)
1337 return 0;
1340 ST_FUNC void gfunc_return(CType *func_type)
1342 CType *t = func_type;
1343 unsigned long a;
1345 arm64_pcs(0, &t, &a);
1346 switch (a) {
1347 case -1:
1348 break;
1349 case 0:
1350 if ((func_type->t & VT_BTYPE) == VT_STRUCT) {
1351 int align, size = type_size(func_type, &align);
1352 gaddrof();
1353 gv(RC_R(0));
1354 arm64_ldrs(0, size);
1356 else
1357 gv(RC_IRET);
1358 break;
1359 case 1: {
1360 CType type = *func_type;
1361 mk_pointer(&type);
1362 vset(&type, VT_LOCAL | VT_LVAL, func_vc);
1363 indir();
1364 vswap();
1365 vstore();
1366 break;
1368 case 16:
1369 if ((func_type->t & VT_BTYPE) == VT_STRUCT) {
1370 uint32_t j, sz, n = arm64_hfa(&vtop->type, &sz);
1371 gaddrof();
1372 gv(RC_R(0));
1373 for (j = 0; j < n; j++)
1374 o(0x3d400000 |
1375 (sz & 16) << 19 | -(sz & 8) << 27 | (sz & 4) << 29 |
1376 j | j << 10); // ldr ([sdq])(*),[x0,#(j * sz)]
1378 else
1379 gv(RC_FRET);
1380 break;
1381 default:
1382 assert(0);
1384 vtop--;
1387 ST_FUNC void gfunc_epilog(void)
1389 #ifdef CONFIG_TCC_BCHECK
1390 if (tcc_state->do_bounds_check)
1391 gen_bounds_epilog();
1392 #endif
1394 if (loc) {
1395 // Insert instructions to subtract size of stack frame from SP.
1396 unsigned char *ptr = cur_text_section->data + arm64_func_sub_sp_offset;
1397 uint64_t diff = (-loc + 15) & ~15;
1398 if (!(diff >> 24)) {
1399 if (diff & 0xfff) // sub sp,sp,#(diff & 0xfff)
1400 write32le(ptr, 0xd10003ff | (diff & 0xfff) << 10);
1401 if (diff >> 12) // sub sp,sp,#(diff >> 12),lsl #12
1402 write32le(ptr + 4, 0xd14003ff | (diff >> 12) << 10);
1404 else {
1405 // In this case we may subtract more than necessary,
1406 // but always less than 17/16 of what we were aiming for.
1407 int i = 0;
1408 int j = 0;
1409 while (diff >> 20) {
1410 diff = (diff + 0xffff) >> 16;
1411 ++i;
1413 while (diff >> 16) {
1414 diff = (diff + 1) >> 1;
1415 ++j;
1417 write32le(ptr, 0xd2800010 | diff << 5 | i << 21);
1418 // mov x16,#(diff),lsl #(16 * i)
1419 write32le(ptr + 4, 0xcb3063ff | j << 10);
1420 // sub sp,sp,x16,lsl #(j)
1423 o(0x910003bf); // mov sp,x29
1424 o(0xa8ce7bfd); // ldp x29,x30,[sp],#224
1426 o(0xd65f03c0); // ret
1429 ST_FUNC void gen_fill_nops(int bytes)
1431 if ((bytes & 3))
1432 tcc_error("alignment of code section not multiple of 4");
1433 while (bytes > 0) {
1434 o(0xd503201f); // nop
1435 bytes -= 4;
1439 // Generate forward branch to label:
1440 ST_FUNC int gjmp(int t)
1442 int r = ind;
1443 if (nocode_wanted)
1444 return t;
1445 o(t);
1446 return r;
1449 // Generate branch to known address:
1450 ST_FUNC void gjmp_addr(int a)
1452 assert(a - ind + 0x8000000 < 0x10000000);
1453 o(0x14000000 | ((a - ind) >> 2 & 0x3ffffff));
1456 ST_FUNC int gjmp_append(int n, int t)
1458 void *p;
1459 /* insert vtop->c jump list in t */
1460 if (n) {
1461 uint32_t n1 = n, n2;
1462 while ((n2 = read32le(p = cur_text_section->data + n1)))
1463 n1 = n2;
1464 write32le(p, t);
1465 t = n;
1467 return t;
1470 void arm64_vset_VT_CMP(int op)
1472 if (op >= TOK_ULT && op <= TOK_GT) {
1473 vtop->cmp_r = vtop->r;
1474 vset_VT_CMP(0x80);
1478 static void arm64_gen_opil(int op, uint32_t l);
1480 static void arm64_load_cmp(int r, SValue *sv)
1482 sv->r = sv->cmp_r;
1483 if (sv->c.i & 1) {
1484 vpushi(1);
1485 arm64_gen_opil('^', 0);
1487 if (r != sv->r) {
1488 load(r, sv);
1489 sv->r = r;
1493 ST_FUNC int gjmp_cond(int op, int t)
1495 int bt = vtop->type.t & VT_BTYPE;
1497 int inv = op & 1;
1498 vtop->r = vtop->cmp_r;
1500 if (bt == VT_LDOUBLE) {
1501 uint32_t a, b, f = fltr(gv(RC_FLOAT));
1502 a = get_reg(RC_INT);
1503 vpushi(0);
1504 vtop[0].r = a;
1505 b = get_reg(RC_INT);
1506 a = intr(a);
1507 b = intr(b);
1508 o(0x4e083c00 | a | f << 5); // mov x(a),v(f).d[0]
1509 o(0x4e183c00 | b | f << 5); // mov x(b),v(f).d[1]
1510 o(0xaa000400 | a | a << 5 | b << 16); // orr x(a),x(a),x(b),lsl #1
1511 o(0xb4000040 | a | !!inv << 24); // cbz/cbnz x(a),.+8
1512 --vtop;
1514 else if (bt == VT_FLOAT || bt == VT_DOUBLE) {
1515 uint32_t a = fltr(gv(RC_FLOAT));
1516 o(0x1e202008 | a << 5 | (bt != VT_FLOAT) << 22); // fcmp
1517 o(0x54000040 | !!inv); // b.eq/b.ne .+8
1519 else {
1520 uint32_t ll = (bt == VT_PTR || bt == VT_LLONG);
1521 uint32_t a = intr(gv(RC_INT));
1522 o(0x34000040 | a | !!inv << 24 | ll << 31); // cbz/cbnz wA,.+8
1524 return gjmp(t);
1527 static int arm64_iconst(uint64_t *val, SValue *sv)
1529 if ((sv->r & (VT_VALMASK | VT_LVAL | VT_SYM)) != VT_CONST)
1530 return 0;
1531 if (val) {
1532 int t = sv->type.t;
1533 int bt = t & VT_BTYPE;
1534 *val = ((bt == VT_LLONG || bt == VT_PTR) ? sv->c.i :
1535 (uint32_t)sv->c.i |
1536 (t & VT_UNSIGNED ? 0 : -(sv->c.i & 0x80000000)));
1538 return 1;
1541 static int arm64_gen_opic(int op, uint32_t l, int rev, uint64_t val,
1542 uint32_t x, uint32_t a)
1544 if (op == '-' && !rev) {
1545 val = -val;
1546 op = '+';
1548 val = l ? val : (uint32_t)val;
1550 switch (op) {
1552 case '+': {
1553 uint32_t s = l ? val >> 63 : val >> 31;
1554 val = s ? -val : val;
1555 val = l ? val : (uint32_t)val;
1556 if (!(val & ~(uint64_t)0xfff))
1557 o(0x11000000 | l << 31 | s << 30 | x | a << 5 | val << 10);
1558 else if (!(val & ~(uint64_t)0xfff000))
1559 o(0x11400000 | l << 31 | s << 30 | x | a << 5 | val >> 12 << 10);
1560 else {
1561 arm64_movimm(30, val); // use x30
1562 o(0x0b1e0000 | l << 31 | s << 30 | x | a << 5);
1564 return 1;
1567 case '-':
1568 if (!val)
1569 o(0x4b0003e0 | l << 31 | x | a << 16); // neg
1570 else if (val == (l ? (uint64_t)-1 : (uint32_t)-1))
1571 o(0x2a2003e0 | l << 31 | x | a << 16); // mvn
1572 else {
1573 arm64_movimm(30, val); // use x30
1574 o(0x4b0003c0 | l << 31 | x | a << 16); // sub
1576 return 1;
1578 case '^':
1579 if (val == -1 || (val == 0xffffffff && !l)) {
1580 o(0x2a2003e0 | l << 31 | x | a << 16); // mvn
1581 return 1;
1583 // fall through
1584 case '&':
1585 case '|': {
1586 int e = arm64_encode_bimm64(l ? val : val | val << 32);
1587 if (e < 0)
1588 return 0;
1589 o((op == '&' ? 0x12000000 :
1590 op == '|' ? 0x32000000 : 0x52000000) |
1591 l << 31 | x | a << 5 | (uint32_t)e << 10);
1592 return 1;
1595 case TOK_SAR:
1596 case TOK_SHL:
1597 case TOK_SHR: {
1598 uint32_t n = 32 << l;
1599 val = val & (n - 1);
1600 if (rev)
1601 return 0;
1602 if (!val) {
1603 // tcc_warning("shift count >= width of type");
1604 o(0x2a0003e0 | l << 31 | a << 16);
1605 return 1;
1607 else if (op == TOK_SHL)
1608 o(0x53000000 | l << 31 | l << 22 | x | a << 5 |
1609 (n - val) << 16 | (n - 1 - val) << 10); // lsl
1610 else
1611 o(0x13000000 | (op == TOK_SHR) << 30 | l << 31 | l << 22 |
1612 x | a << 5 | val << 16 | (n - 1) << 10); // lsr/asr
1613 return 1;
1617 return 0;
1620 static void arm64_gen_opil(int op, uint32_t l)
1622 uint32_t x, a, b;
1624 // Special treatment for operations with a constant operand:
1626 uint64_t val;
1627 int rev = 1;
1629 if (arm64_iconst(0, &vtop[0])) {
1630 vswap();
1631 rev = 0;
1633 if (arm64_iconst(&val, &vtop[-1])) {
1634 gv(RC_INT);
1635 a = intr(vtop[0].r);
1636 --vtop;
1637 x = get_reg(RC_INT);
1638 ++vtop;
1639 if (arm64_gen_opic(op, l, rev, val, intr(x), a)) {
1640 vtop[0].r = x;
1641 vswap();
1642 --vtop;
1643 return;
1646 if (!rev)
1647 vswap();
1650 gv2(RC_INT, RC_INT);
1651 assert(vtop[-1].r < VT_CONST && vtop[0].r < VT_CONST);
1652 a = intr(vtop[-1].r);
1653 b = intr(vtop[0].r);
1654 vtop -= 2;
1655 x = get_reg(RC_INT);
1656 ++vtop;
1657 vtop[0].r = x;
1658 x = intr(x);
1660 switch (op) {
1661 case '%':
1662 // Use x30 for quotient:
1663 o(0x1ac00c00 | l << 31 | 30 | a << 5 | b << 16); // sdiv
1664 o(0x1b008000 | l << 31 | x | (uint32_t)30 << 5 |
1665 b << 16 | a << 10); // msub
1666 break;
1667 case '&':
1668 o(0x0a000000 | l << 31 | x | a << 5 | b << 16); // and
1669 break;
1670 case '*':
1671 o(0x1b007c00 | l << 31 | x | a << 5 | b << 16); // mul
1672 break;
1673 case '+':
1674 o(0x0b000000 | l << 31 | x | a << 5 | b << 16); // add
1675 break;
1676 case '-':
1677 o(0x4b000000 | l << 31 | x | a << 5 | b << 16); // sub
1678 break;
1679 case '/':
1680 o(0x1ac00c00 | l << 31 | x | a << 5 | b << 16); // sdiv
1681 break;
1682 case '^':
1683 o(0x4a000000 | l << 31 | x | a << 5 | b << 16); // eor
1684 break;
1685 case '|':
1686 o(0x2a000000 | l << 31 | x | a << 5 | b << 16); // orr
1687 break;
1688 case TOK_EQ:
1689 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1690 o(0x1a9f17e0 | x); // cset wA,eq
1691 break;
1692 case TOK_GE:
1693 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1694 o(0x1a9fb7e0 | x); // cset wA,ge
1695 break;
1696 case TOK_GT:
1697 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1698 o(0x1a9fd7e0 | x); // cset wA,gt
1699 break;
1700 case TOK_LE:
1701 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1702 o(0x1a9fc7e0 | x); // cset wA,le
1703 break;
1704 case TOK_LT:
1705 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1706 o(0x1a9fa7e0 | x); // cset wA,lt
1707 break;
1708 case TOK_NE:
1709 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1710 o(0x1a9f07e0 | x); // cset wA,ne
1711 break;
1712 case TOK_SAR:
1713 o(0x1ac02800 | l << 31 | x | a << 5 | b << 16); // asr
1714 break;
1715 case TOK_SHL:
1716 o(0x1ac02000 | l << 31 | x | a << 5 | b << 16); // lsl
1717 break;
1718 case TOK_SHR:
1719 o(0x1ac02400 | l << 31 | x | a << 5 | b << 16); // lsr
1720 break;
1721 case TOK_UDIV:
1722 case TOK_PDIV:
1723 o(0x1ac00800 | l << 31 | x | a << 5 | b << 16); // udiv
1724 break;
1725 case TOK_UGE:
1726 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1727 o(0x1a9f37e0 | x); // cset wA,cs
1728 break;
1729 case TOK_UGT:
1730 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1731 o(0x1a9f97e0 | x); // cset wA,hi
1732 break;
1733 case TOK_ULT:
1734 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1735 o(0x1a9f27e0 | x); // cset wA,cc
1736 break;
1737 case TOK_ULE:
1738 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1739 o(0x1a9f87e0 | x); // cset wA,ls
1740 break;
1741 case TOK_UMOD:
1742 // Use x30 for quotient:
1743 o(0x1ac00800 | l << 31 | 30 | a << 5 | b << 16); // udiv
1744 o(0x1b008000 | l << 31 | x | (uint32_t)30 << 5 |
1745 b << 16 | a << 10); // msub
1746 break;
1747 default:
1748 assert(0);
1752 ST_FUNC void gen_opi(int op)
1754 arm64_gen_opil(op, 0);
1755 arm64_vset_VT_CMP(op);
1758 ST_FUNC void gen_opl(int op)
1760 arm64_gen_opil(op, 1);
1761 arm64_vset_VT_CMP(op);
1764 ST_FUNC void gen_opf(int op)
1766 uint32_t x, a, b, dbl;
1768 if (vtop[0].type.t == VT_LDOUBLE) {
1769 CType type = vtop[0].type;
1770 int func = 0;
1771 int cond = -1;
1772 switch (op) {
1773 case '*': func = TOK___multf3; break;
1774 case '+': func = TOK___addtf3; break;
1775 case '-': func = TOK___subtf3; break;
1776 case '/': func = TOK___divtf3; break;
1777 case TOK_EQ: func = TOK___eqtf2; cond = 1; break;
1778 case TOK_NE: func = TOK___netf2; cond = 0; break;
1779 case TOK_LT: func = TOK___lttf2; cond = 10; break;
1780 case TOK_GE: func = TOK___getf2; cond = 11; break;
1781 case TOK_LE: func = TOK___letf2; cond = 12; break;
1782 case TOK_GT: func = TOK___gttf2; cond = 13; break;
1783 default: assert(0); break;
1785 vpush_helper_func(func);
1786 vrott(3);
1787 gfunc_call(2);
1788 vpushi(0);
1789 vtop->r = cond < 0 ? REG_FRET : REG_IRET;
1790 if (cond < 0)
1791 vtop->type = type;
1792 else {
1793 o(0x7100001f); // cmp w0,#0
1794 o(0x1a9f07e0 | (uint32_t)cond << 12); // cset w0,(cond)
1796 return;
1799 dbl = vtop[0].type.t != VT_FLOAT;
1800 gv2(RC_FLOAT, RC_FLOAT);
1801 assert(vtop[-1].r < VT_CONST && vtop[0].r < VT_CONST);
1802 a = fltr(vtop[-1].r);
1803 b = fltr(vtop[0].r);
1804 vtop -= 2;
1805 switch (op) {
1806 case TOK_EQ: case TOK_NE:
1807 case TOK_LT: case TOK_GE: case TOK_LE: case TOK_GT:
1808 x = get_reg(RC_INT);
1809 ++vtop;
1810 vtop[0].r = x;
1811 x = intr(x);
1812 break;
1813 default:
1814 x = get_reg(RC_FLOAT);
1815 ++vtop;
1816 vtop[0].r = x;
1817 x = fltr(x);
1818 break;
1821 switch (op) {
1822 case '*':
1823 o(0x1e200800 | dbl << 22 | x | a << 5 | b << 16); // fmul
1824 break;
1825 case '+':
1826 o(0x1e202800 | dbl << 22 | x | a << 5 | b << 16); // fadd
1827 break;
1828 case '-':
1829 o(0x1e203800 | dbl << 22 | x | a << 5 | b << 16); // fsub
1830 break;
1831 case '/':
1832 o(0x1e201800 | dbl << 22 | x | a << 5 | b << 16); // fdiv
1833 break;
1834 case TOK_EQ:
1835 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1836 o(0x1a9f17e0 | x); // cset w(x),eq
1837 break;
1838 case TOK_GE:
1839 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1840 o(0x1a9fb7e0 | x); // cset w(x),ge
1841 break;
1842 case TOK_GT:
1843 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1844 o(0x1a9fd7e0 | x); // cset w(x),gt
1845 break;
1846 case TOK_LE:
1847 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1848 o(0x1a9f87e0 | x); // cset w(x),ls
1849 break;
1850 case TOK_LT:
1851 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1852 o(0x1a9f57e0 | x); // cset w(x),mi
1853 break;
1854 case TOK_NE:
1855 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1856 o(0x1a9f07e0 | x); // cset w(x),ne
1857 break;
1858 default:
1859 assert(0);
1861 arm64_vset_VT_CMP(op);
1864 // Generate sign extension from 32 to 64 bits:
1865 ST_FUNC void gen_cvt_sxtw(void)
1867 uint32_t r = intr(gv(RC_INT));
1868 o(0x93407c00 | r | r << 5); // sxtw x(r),w(r)
1871 /* char/short to int conversion */
1872 ST_FUNC void gen_cvt_csti(int t)
1874 int r = intr(gv(RC_INT));
1875 o(0x13001c00
1876 | ((t & VT_BTYPE) == VT_SHORT) << 13
1877 | (uint32_t)!!(t & VT_UNSIGNED) << 30
1878 | r | r << 5); // [su]xt[bh] w(r),w(r)
1881 ST_FUNC void gen_cvt_itof(int t)
1883 if (t == VT_LDOUBLE) {
1884 int f = vtop->type.t;
1885 int func = (f & VT_BTYPE) == VT_LLONG ?
1886 (f & VT_UNSIGNED ? TOK___floatunditf : TOK___floatditf) :
1887 (f & VT_UNSIGNED ? TOK___floatunsitf : TOK___floatsitf);
1888 vpush_helper_func(func);
1889 vrott(2);
1890 gfunc_call(1);
1891 vpushi(0);
1892 vtop->type.t = t;
1893 vtop->r = REG_FRET;
1894 return;
1896 else {
1897 int d, n = intr(gv(RC_INT));
1898 int s = !(vtop->type.t & VT_UNSIGNED);
1899 uint32_t l = ((vtop->type.t & VT_BTYPE) == VT_LLONG);
1900 --vtop;
1901 d = get_reg(RC_FLOAT);
1902 ++vtop;
1903 vtop[0].r = d;
1904 o(0x1e220000 | (uint32_t)!s << 16 |
1905 (uint32_t)(t != VT_FLOAT) << 22 | fltr(d) |
1906 l << 31 | n << 5); // [us]cvtf [sd](d),[wx](n)
1910 ST_FUNC void gen_cvt_ftoi(int t)
1912 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1913 int func = (t & VT_BTYPE) == VT_LLONG ?
1914 (t & VT_UNSIGNED ? TOK___fixunstfdi : TOK___fixtfdi) :
1915 (t & VT_UNSIGNED ? TOK___fixunstfsi : TOK___fixtfsi);
1916 vpush_helper_func(func);
1917 vrott(2);
1918 gfunc_call(1);
1919 vpushi(0);
1920 vtop->type.t = t;
1921 vtop->r = REG_IRET;
1922 return;
1924 else {
1925 int d, n = fltr(gv(RC_FLOAT));
1926 uint32_t l = ((vtop->type.t & VT_BTYPE) != VT_FLOAT);
1927 --vtop;
1928 d = get_reg(RC_INT);
1929 ++vtop;
1930 vtop[0].r = d;
1931 o(0x1e380000 |
1932 (uint32_t)!!(t & VT_UNSIGNED) << 16 |
1933 (uint32_t)((t & VT_BTYPE) == VT_LLONG) << 31 | intr(d) |
1934 l << 22 | n << 5); // fcvtz[su] [wx](d),[sd](n)
1938 ST_FUNC void gen_cvt_ftof(int t)
1940 int f = vtop[0].type.t & VT_BTYPE;
1941 assert(t == VT_FLOAT || t == VT_DOUBLE || t == VT_LDOUBLE);
1942 assert(f == VT_FLOAT || f == VT_DOUBLE || f == VT_LDOUBLE);
1943 if (t == f)
1944 return;
1946 if (t == VT_LDOUBLE || f == VT_LDOUBLE) {
1947 int func = (t == VT_LDOUBLE) ?
1948 (f == VT_FLOAT ? TOK___extendsftf2 : TOK___extenddftf2) :
1949 (t == VT_FLOAT ? TOK___trunctfsf2 : TOK___trunctfdf2);
1950 vpush_helper_func(func);
1951 vrott(2);
1952 gfunc_call(1);
1953 vpushi(0);
1954 vtop->type.t = t;
1955 vtop->r = REG_FRET;
1957 else {
1958 int x, a;
1959 gv(RC_FLOAT);
1960 assert(vtop[0].r < VT_CONST);
1961 a = fltr(vtop[0].r);
1962 --vtop;
1963 x = get_reg(RC_FLOAT);
1964 ++vtop;
1965 vtop[0].r = x;
1966 x = fltr(x);
1968 if (f == VT_FLOAT)
1969 o(0x1e22c000 | x | a << 5); // fcvt d(x),s(a)
1970 else
1971 o(0x1e624000 | x | a << 5); // fcvt s(x),d(a)
1975 ST_FUNC void ggoto(void)
1977 arm64_gen_bl_or_b(1);
1978 --vtop;
1981 ST_FUNC void gen_clear_cache(void)
1983 uint32_t beg, end, dsz, isz, p, lab1, b1;
1984 gv2(RC_INT, RC_INT);
1985 vpushi(0);
1986 vtop->r = get_reg(RC_INT);
1987 vpushi(0);
1988 vtop->r = get_reg(RC_INT);
1989 vpushi(0);
1990 vtop->r = get_reg(RC_INT);
1991 beg = intr(vtop[-4].r); // x0
1992 end = intr(vtop[-3].r); // x1
1993 dsz = intr(vtop[-2].r); // x2
1994 isz = intr(vtop[-1].r); // x3
1995 p = intr(vtop[0].r); // x4
1996 vtop -= 5;
1998 o(0xd53b0020 | isz); // mrs x(isz),ctr_el0
1999 o(0x52800080 | p); // mov w(p),#4
2000 o(0x53104c00 | dsz | isz << 5); // ubfx w(dsz),w(isz),#16,#4
2001 o(0x1ac02000 | dsz | p << 5 | dsz << 16); // lsl w(dsz),w(p),w(dsz)
2002 o(0x12000c00 | isz | isz << 5); // and w(isz),w(isz),#15
2003 o(0x1ac02000 | isz | p << 5 | isz << 16); // lsl w(isz),w(p),w(isz)
2004 o(0x51000400 | p | dsz << 5); // sub w(p),w(dsz),#1
2005 o(0x8a240004 | p | beg << 5 | p << 16); // bic x(p),x(beg),x(p)
2006 b1 = ind; o(0x14000000); // b
2007 lab1 = ind;
2008 o(0xd50b7b20 | p); // dc cvau,x(p)
2009 o(0x8b000000 | p | p << 5 | dsz << 16); // add x(p),x(p),x(dsz)
2010 write32le(cur_text_section->data + b1, 0x14000000 | (ind - b1) >> 2);
2011 o(0xeb00001f | p << 5 | end << 16); // cmp x(p),x(end)
2012 o(0x54ffffa3 | ((lab1 - ind) << 3 & 0xffffe0)); // b.cc lab1
2013 o(0xd5033b9f); // dsb ish
2014 o(0x51000400 | p | isz << 5); // sub w(p),w(isz),#1
2015 o(0x8a240004 | p | beg << 5 | p << 16); // bic x(p),x(beg),x(p)
2016 b1 = ind; o(0x14000000); // b
2017 lab1 = ind;
2018 o(0xd50b7520 | p); // ic ivau,x(p)
2019 o(0x8b000000 | p | p << 5 | isz << 16); // add x(p),x(p),x(isz)
2020 write32le(cur_text_section->data + b1, 0x14000000 | (ind - b1) >> 2);
2021 o(0xeb00001f | p << 5 | end << 16); // cmp x(p),x(end)
2022 o(0x54ffffa3 | ((lab1 - ind) << 3 & 0xffffe0)); // b.cc lab1
2023 o(0xd5033b9f); // dsb ish
2024 o(0xd5033fdf); // isb
2027 ST_FUNC void gen_vla_sp_save(int addr) {
2028 uint32_t r = intr(get_reg(RC_INT));
2029 o(0x910003e0 | r); // mov x(r),sp
2030 arm64_strx(3, r, 29, addr);
2033 ST_FUNC void gen_vla_sp_restore(int addr) {
2034 // Use x30 because this function can be called when there
2035 // is a live return value in x0 but there is nothing on
2036 // the value stack to prevent get_reg from returning x0.
2037 uint32_t r = 30;
2038 arm64_ldrx(0, 3, r, 29, addr);
2039 o(0x9100001f | r << 5); // mov sp,x(r)
2042 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2043 uint32_t r;
2044 #if defined(CONFIG_TCC_BCHECK)
2045 if (tcc_state->do_bounds_check)
2046 vpushv(vtop);
2047 #endif
2048 r = intr(gv(RC_INT));
2049 #if defined(CONFIG_TCC_BCHECK)
2050 if (tcc_state->do_bounds_check)
2051 o(0x91004000 | r | r << 5); // add x(r),x(r),#15+1
2052 else
2053 #endif
2054 o(0x91003c00 | r | r << 5); // add x(r),x(r),#15
2055 o(0x927cec00 | r | r << 5); // bic x(r),x(r),#15
2056 o(0xcb2063ff | r << 16); // sub sp,sp,x(r)
2057 vpop();
2058 #if defined(CONFIG_TCC_BCHECK)
2059 if (tcc_state->do_bounds_check) {
2060 vpushi(0);
2061 vtop->r = TREG_R(0);
2062 o(0x910003e0 | vtop->r); // mov r0,sp
2063 vswap();
2064 vpush_helper_func(TOK___bound_new_region);
2065 vrott(3);
2066 gfunc_call(2);
2067 func_bound_add_epilog = 1;
2069 #endif
2072 /* end of A64 code generator */
2073 /*************************************************************/
2074 #endif
2075 /*************************************************************/