Implement proper floating point negation
[tinycc.git] / arm64-gen.c
blobfeff7415b01c84fd9ffaf2603f456951be2db046
1 /*
2 * A64 code generator for TCC
4 * Copyright (c) 2014-2015 Edmund Grimley Evans
6 * Copying and distribution of this file, with or without modification,
7 * are permitted in any medium without royalty provided the copyright
8 * notice and this notice are preserved. This file is offered as-is,
9 * without any warranty.
12 #ifdef TARGET_DEFS_ONLY
14 // Number of registers available to allocator:
15 #define NB_REGS 28 // x0-x18, x30, v0-v7
17 #define TREG_R(x) (x) // x = 0..18
18 #define TREG_R30 19
19 #define TREG_F(x) (x + 20) // x = 0..7
21 // Register classes sorted from more general to more precise:
22 #define RC_INT (1 << 0)
23 #define RC_FLOAT (1 << 1)
24 #define RC_R(x) (1 << (2 + (x))) // x = 0..18
25 #define RC_R30 (1 << 21)
26 #define RC_F(x) (1 << (22 + (x))) // x = 0..7
28 #define RC_IRET (RC_R(0)) // int return register class
29 #define RC_FRET (RC_F(0)) // float return register class
31 #define REG_IRET (TREG_R(0)) // int return register number
32 #define REG_FRET (TREG_F(0)) // float return register number
34 #define PTR_SIZE 8
36 #define LDOUBLE_SIZE 16
37 #define LDOUBLE_ALIGN 16
39 #define MAX_ALIGN 16
41 #define CHAR_IS_UNSIGNED
43 /* define if return values need to be extended explicitely
44 at caller side (for interfacing with non-TCC compilers) */
45 #define PROMOTE_RET
46 /******************************************************/
47 #else /* ! TARGET_DEFS_ONLY */
48 /******************************************************/
49 #define USING_GLOBALS
50 #include "tcc.h"
51 #include <assert.h>
53 ST_DATA const char *target_machine_defs =
54 "__aarch64__\0"
57 ST_DATA const int reg_classes[NB_REGS] = {
58 RC_INT | RC_R(0),
59 RC_INT | RC_R(1),
60 RC_INT | RC_R(2),
61 RC_INT | RC_R(3),
62 RC_INT | RC_R(4),
63 RC_INT | RC_R(5),
64 RC_INT | RC_R(6),
65 RC_INT | RC_R(7),
66 RC_INT | RC_R(8),
67 RC_INT | RC_R(9),
68 RC_INT | RC_R(10),
69 RC_INT | RC_R(11),
70 RC_INT | RC_R(12),
71 RC_INT | RC_R(13),
72 RC_INT | RC_R(14),
73 RC_INT | RC_R(15),
74 RC_INT | RC_R(16),
75 RC_INT | RC_R(17),
76 RC_INT | RC_R(18),
77 RC_R30, // not in RC_INT as we make special use of x30
78 RC_FLOAT | RC_F(0),
79 RC_FLOAT | RC_F(1),
80 RC_FLOAT | RC_F(2),
81 RC_FLOAT | RC_F(3),
82 RC_FLOAT | RC_F(4),
83 RC_FLOAT | RC_F(5),
84 RC_FLOAT | RC_F(6),
85 RC_FLOAT | RC_F(7)
88 #if defined(CONFIG_TCC_BCHECK)
89 static addr_t func_bound_offset;
90 static unsigned long func_bound_ind;
91 ST_DATA int func_bound_add_epilog;
92 #endif
94 #define IS_FREG(x) ((x) >= TREG_F(0))
96 static uint32_t intr(int r)
98 assert(TREG_R(0) <= r && r <= TREG_R30);
99 return r < TREG_R30 ? r : 30;
102 static uint32_t fltr(int r)
104 assert(TREG_F(0) <= r && r <= TREG_F(7));
105 return r - TREG_F(0);
108 // Add an instruction to text section:
109 ST_FUNC void o(unsigned int c)
111 int ind1 = ind + 4;
112 if (nocode_wanted)
113 return;
114 if (ind1 > cur_text_section->data_allocated)
115 section_realloc(cur_text_section, ind1);
116 write32le(cur_text_section->data + ind, c);
117 ind = ind1;
120 static int arm64_encode_bimm64(uint64_t x)
122 int neg = x & 1;
123 int rep, pos, len;
125 if (neg)
126 x = ~x;
127 if (!x)
128 return -1;
130 if (x >> 2 == (x & (((uint64_t)1 << (64 - 2)) - 1)))
131 rep = 2, x &= ((uint64_t)1 << 2) - 1;
132 else if (x >> 4 == (x & (((uint64_t)1 << (64 - 4)) - 1)))
133 rep = 4, x &= ((uint64_t)1 << 4) - 1;
134 else if (x >> 8 == (x & (((uint64_t)1 << (64 - 8)) - 1)))
135 rep = 8, x &= ((uint64_t)1 << 8) - 1;
136 else if (x >> 16 == (x & (((uint64_t)1 << (64 - 16)) - 1)))
137 rep = 16, x &= ((uint64_t)1 << 16) - 1;
138 else if (x >> 32 == (x & (((uint64_t)1 << (64 - 32)) - 1)))
139 rep = 32, x &= ((uint64_t)1 << 32) - 1;
140 else
141 rep = 64;
143 pos = 0;
144 if (!(x & (((uint64_t)1 << 32) - 1))) x >>= 32, pos += 32;
145 if (!(x & (((uint64_t)1 << 16) - 1))) x >>= 16, pos += 16;
146 if (!(x & (((uint64_t)1 << 8) - 1))) x >>= 8, pos += 8;
147 if (!(x & (((uint64_t)1 << 4) - 1))) x >>= 4, pos += 4;
148 if (!(x & (((uint64_t)1 << 2) - 1))) x >>= 2, pos += 2;
149 if (!(x & (((uint64_t)1 << 1) - 1))) x >>= 1, pos += 1;
151 len = 0;
152 if (!(~x & (((uint64_t)1 << 32) - 1))) x >>= 32, len += 32;
153 if (!(~x & (((uint64_t)1 << 16) - 1))) x >>= 16, len += 16;
154 if (!(~x & (((uint64_t)1 << 8) - 1))) x >>= 8, len += 8;
155 if (!(~x & (((uint64_t)1 << 4) - 1))) x >>= 4, len += 4;
156 if (!(~x & (((uint64_t)1 << 2) - 1))) x >>= 2, len += 2;
157 if (!(~x & (((uint64_t)1 << 1) - 1))) x >>= 1, len += 1;
159 if (x)
160 return -1;
161 if (neg) {
162 pos = (pos + len) & (rep - 1);
163 len = rep - len;
165 return ((0x1000 & rep << 6) | (((rep - 1) ^ 31) << 1 & 63) |
166 ((rep - pos) & (rep - 1)) << 6 | (len - 1));
169 static uint32_t arm64_movi(int r, uint64_t x)
171 uint64_t m = 0xffff;
172 int e;
173 if (!(x & ~m))
174 return 0x52800000 | r | x << 5; // movz w(r),#(x)
175 if (!(x & ~(m << 16)))
176 return 0x52a00000 | r | x >> 11; // movz w(r),#(x >> 16),lsl #16
177 if (!(x & ~(m << 32)))
178 return 0xd2c00000 | r | x >> 27; // movz x(r),#(x >> 32),lsl #32
179 if (!(x & ~(m << 48)))
180 return 0xd2e00000 | r | x >> 43; // movz x(r),#(x >> 48),lsl #48
181 if ((x & ~m) == m << 16)
182 return (0x12800000 | r |
183 (~x << 5 & 0x1fffe0)); // movn w(r),#(~x)
184 if ((x & ~(m << 16)) == m)
185 return (0x12a00000 | r |
186 (~x >> 11 & 0x1fffe0)); // movn w(r),#(~x >> 16),lsl #16
187 if (!~(x | m))
188 return (0x92800000 | r |
189 (~x << 5 & 0x1fffe0)); // movn x(r),#(~x)
190 if (!~(x | m << 16))
191 return (0x92a00000 | r |
192 (~x >> 11 & 0x1fffe0)); // movn x(r),#(~x >> 16),lsl #16
193 if (!~(x | m << 32))
194 return (0x92c00000 | r |
195 (~x >> 27 & 0x1fffe0)); // movn x(r),#(~x >> 32),lsl #32
196 if (!~(x | m << 48))
197 return (0x92e00000 | r |
198 (~x >> 43 & 0x1fffe0)); // movn x(r),#(~x >> 32),lsl #32
199 if (!(x >> 32) && (e = arm64_encode_bimm64(x | x << 32)) >= 0)
200 return 0x320003e0 | r | (uint32_t)e << 10; // movi w(r),#(x)
201 if ((e = arm64_encode_bimm64(x)) >= 0)
202 return 0xb20003e0 | r | (uint32_t)e << 10; // movi x(r),#(x)
203 return 0;
206 static void arm64_movimm(int r, uint64_t x)
208 uint32_t i;
209 if ((i = arm64_movi(r, x)))
210 o(i); // a single MOV
211 else {
212 // MOVZ/MOVN and 1-3 MOVKs
213 int z = 0, m = 0;
214 uint32_t mov1 = 0xd2800000; // movz
215 uint64_t x1 = x;
216 for (i = 0; i < 64; i += 16) {
217 z += !(x >> i & 0xffff);
218 m += !(~x >> i & 0xffff);
220 if (m > z) {
221 x1 = ~x;
222 mov1 = 0x92800000; // movn
224 for (i = 0; i < 64; i += 16)
225 if (x1 >> i & 0xffff) {
226 o(mov1 | r | (x1 >> i & 0xffff) << 5 | i << 17);
227 // movz/movn x(r),#(*),lsl #(i)
228 break;
230 for (i += 16; i < 64; i += 16)
231 if (x1 >> i & 0xffff)
232 o(0xf2800000 | r | (x >> i & 0xffff) << 5 | i << 17);
233 // movk x(r),#(*),lsl #(i)
237 // Patch all branches in list pointed to by t to branch to a:
238 ST_FUNC void gsym_addr(int t_, int a_)
240 uint32_t t = t_;
241 uint32_t a = a_;
242 while (t) {
243 unsigned char *ptr = cur_text_section->data + t;
244 uint32_t next = read32le(ptr);
245 if (a - t + 0x8000000 >= 0x10000000)
246 tcc_error("branch out of range");
247 write32le(ptr, (a - t == 4 ? 0xd503201f : // nop
248 0x14000000 | ((a - t) >> 2 & 0x3ffffff))); // b
249 t = next;
253 static int arm64_type_size(int t)
256 * case values are in increasing order (from 1 to 11).
257 * which 'may' help compiler optimizers. See tcc.h
259 switch (t & VT_BTYPE) {
260 case VT_BYTE: return 0;
261 case VT_SHORT: return 1;
262 case VT_INT: return 2;
263 case VT_LLONG: return 3;
264 case VT_PTR: return 3;
265 case VT_FUNC: return 3;
266 case VT_STRUCT: return 3;
267 case VT_FLOAT: return 2;
268 case VT_DOUBLE: return 3;
269 case VT_LDOUBLE: return 4;
270 case VT_BOOL: return 0;
272 assert(0);
273 return 0;
276 static void arm64_spoff(int reg, uint64_t off)
278 uint32_t sub = off >> 63;
279 if (sub)
280 off = -off;
281 if (off < 4096)
282 o(0x910003e0 | sub << 30 | reg | off << 10);
283 // (add|sub) x(reg),sp,#(off)
284 else {
285 arm64_movimm(30, off); // use x30 for offset
286 o(0x8b3e63e0 | sub << 30 | reg); // (add|sub) x(reg),sp,x30
290 /* invert 0: return value to use for store/load */
291 /* invert 1: return value to use for arm64_sym */
292 static uint64_t arm64_check_offset(int invert, int sz_, uint64_t off)
294 uint32_t sz = sz_;
295 if (!(off & ~((uint32_t)0xfff << sz)) ||
296 (off < 256 || -off <= 256))
297 return invert ? off : 0ul;
298 else if ((off & ((uint32_t)0xfff << sz)))
299 return invert ? off & ((uint32_t)0xfff << sz)
300 : off & ~((uint32_t)0xfff << sz);
301 else if (off & 0x1ff)
302 return invert ? off & 0x1ff : off & ~0x1ff;
303 else
304 return invert ? 0ul : off;
307 static void arm64_ldrx(int sg, int sz_, int dst, int bas, uint64_t off)
309 uint32_t sz = sz_;
310 if (sz >= 2)
311 sg = 0;
312 if (!(off & ~((uint32_t)0xfff << sz)))
313 o(0x39400000 | dst | bas << 5 | off << (10 - sz) |
314 (uint32_t)!!sg << 23 | sz << 30); // ldr(*) x(dst),[x(bas),#(off)]
315 else if (off < 256 || -off <= 256)
316 o(0x38400000 | dst | bas << 5 | (off & 511) << 12 |
317 (uint32_t)!!sg << 23 | sz << 30); // ldur(*) x(dst),[x(bas),#(off)]
318 else {
319 arm64_movimm(30, off); // use x30 for offset
320 o(0x38206800 | dst | bas << 5 | (uint32_t)30 << 16 |
321 (uint32_t)(!!sg + 1) << 22 | sz << 30); // ldr(*) x(dst),[x(bas),x30]
325 static void arm64_ldrv(int sz_, int dst, int bas, uint64_t off)
327 uint32_t sz = sz_;
328 if (!(off & ~((uint32_t)0xfff << sz)))
329 o(0x3d400000 | dst | bas << 5 | off << (10 - sz) |
330 (sz & 4) << 21 | (sz & 3) << 30); // ldr (s|d|q)(dst),[x(bas),#(off)]
331 else if (off < 256 || -off <= 256)
332 o(0x3c400000 | dst | bas << 5 | (off & 511) << 12 |
333 (sz & 4) << 21 | (sz & 3) << 30); // ldur (s|d|q)(dst),[x(bas),#(off)]
334 else {
335 arm64_movimm(30, off); // use x30 for offset
336 o(0x3c606800 | dst | bas << 5 | (uint32_t)30 << 16 |
337 sz << 30 | (sz & 4) << 21); // ldr (s|d|q)(dst),[x(bas),x30]
341 static void arm64_ldrs(int reg_, int size)
343 uint32_t reg = reg_;
344 // Use x30 for intermediate value in some cases.
345 switch (size) {
346 default: assert(0); break;
347 case 0:
348 /* Can happen with zero size structs */
349 break;
350 case 1:
351 arm64_ldrx(0, 0, reg, reg, 0);
352 break;
353 case 2:
354 arm64_ldrx(0, 1, reg, reg, 0);
355 break;
356 case 3:
357 arm64_ldrx(0, 1, 30, reg, 0);
358 arm64_ldrx(0, 0, reg, reg, 2);
359 o(0x2a0043c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #16
360 break;
361 case 4:
362 arm64_ldrx(0, 2, reg, reg, 0);
363 break;
364 case 5:
365 arm64_ldrx(0, 2, 30, reg, 0);
366 arm64_ldrx(0, 0, reg, reg, 4);
367 o(0xaa0083c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #32
368 break;
369 case 6:
370 arm64_ldrx(0, 2, 30, reg, 0);
371 arm64_ldrx(0, 1, reg, reg, 4);
372 o(0xaa0083c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #32
373 break;
374 case 7:
375 arm64_ldrx(0, 2, 30, reg, 0);
376 arm64_ldrx(0, 2, reg, reg, 3);
377 o(0x53087c00 | reg | reg << 5); // lsr w(reg), w(reg), #8
378 o(0xaa0083c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #32
379 break;
380 case 8:
381 arm64_ldrx(0, 3, reg, reg, 0);
382 break;
383 case 9:
384 arm64_ldrx(0, 0, reg + 1, reg, 8);
385 arm64_ldrx(0, 3, reg, reg, 0);
386 break;
387 case 10:
388 arm64_ldrx(0, 1, reg + 1, reg, 8);
389 arm64_ldrx(0, 3, reg, reg, 0);
390 break;
391 case 11:
392 arm64_ldrx(0, 2, reg + 1, reg, 7);
393 o(0x53087c00 | (reg+1) | (reg+1) << 5); // lsr w(reg+1), w(reg+1), #8
394 arm64_ldrx(0, 3, reg, reg, 0);
395 break;
396 case 12:
397 arm64_ldrx(0, 2, reg + 1, reg, 8);
398 arm64_ldrx(0, 3, reg, reg, 0);
399 break;
400 case 13:
401 arm64_ldrx(0, 3, reg + 1, reg, 5);
402 o(0xd358fc00 | (reg+1) | (reg+1) << 5); // lsr x(reg+1), x(reg+1), #24
403 arm64_ldrx(0, 3, reg, reg, 0);
404 break;
405 case 14:
406 arm64_ldrx(0, 3, reg + 1, reg, 6);
407 o(0xd350fc00 | (reg+1) | (reg+1) << 5); // lsr x(reg+1), x(reg+1), #16
408 arm64_ldrx(0, 3, reg, reg, 0);
409 break;
410 case 15:
411 arm64_ldrx(0, 3, reg + 1, reg, 7);
412 o(0xd348fc00 | (reg+1) | (reg+1) << 5); // lsr x(reg+1), x(reg+1), #8
413 arm64_ldrx(0, 3, reg, reg, 0);
414 break;
415 case 16:
416 o(0xa9400000 | reg | (reg+1) << 10 | reg << 5);
417 // ldp x(reg),x(reg+1),[x(reg)]
418 break;
422 static void arm64_strx(int sz_, int dst, int bas, uint64_t off)
424 uint32_t sz = sz_;
425 if (!(off & ~((uint32_t)0xfff << sz)))
426 o(0x39000000 | dst | bas << 5 | off << (10 - sz) | sz << 30);
427 // str(*) x(dst),[x(bas],#(off)]
428 else if (off < 256 || -off <= 256)
429 o(0x38000000 | dst | bas << 5 | (off & 511) << 12 | sz << 30);
430 // stur(*) x(dst),[x(bas],#(off)]
431 else {
432 arm64_movimm(30, off); // use x30 for offset
433 o(0x38206800 | dst | bas << 5 | (uint32_t)30 << 16 | sz << 30);
434 // str(*) x(dst),[x(bas),x30]
438 static void arm64_strv(int sz_, int dst, int bas, uint64_t off)
440 uint32_t sz = sz_;
441 if (!(off & ~((uint32_t)0xfff << sz)))
442 o(0x3d000000 | dst | bas << 5 | off << (10 - sz) |
443 (sz & 4) << 21 | (sz & 3) << 30); // str (s|d|q)(dst),[x(bas),#(off)]
444 else if (off < 256 || -off <= 256)
445 o(0x3c000000 | dst | bas << 5 | (off & 511) << 12 |
446 (sz & 4) << 21 | (sz & 3) << 30); // stur (s|d|q)(dst),[x(bas),#(off)]
447 else {
448 arm64_movimm(30, off); // use x30 for offset
449 o(0x3c206800 | dst | bas << 5 | (uint32_t)30 << 16 |
450 sz << 30 | (sz & 4) << 21); // str (s|d|q)(dst),[x(bas),x30]
454 static void arm64_sym(int r, Sym *sym, unsigned long addend)
456 greloca(cur_text_section, sym, ind, R_AARCH64_ADR_GOT_PAGE, 0);
457 o(0x90000000 | r); // adrp xr, #sym
458 greloca(cur_text_section, sym, ind, R_AARCH64_LD64_GOT_LO12_NC, 0);
459 o(0xf9400000 | r | (r << 5)); // ld xr,[xr, #sym]
460 if (addend) {
461 // add xr, xr, #addend
462 if (addend & 0xffful)
463 o(0x91000000 | r | r << 5 | (addend & 0xfff) << 10);
464 if (addend > 0xffful) {
465 // add xr, xr, #addend, lsl #12
466 if (addend & 0xfff000ul)
467 o(0x91400000 | r | r << 5 | ((addend >> 12) & 0xfff) << 10);
468 if (addend > 0xfffffful) {
469 /* very unlikely */
470 int t = r ? 0 : 1;
471 o(0xf81f0fe0 | t); /* str xt, [sp, #-16]! */
472 arm64_movimm(t, addend & ~0xfffffful); // use xt for addent
473 o(0x91000000 | r | (t << 5)); /* add xr, xt, #0 */
474 o(0xf84107e0 | t); /* ldr xt, [sp], #16 */
480 static void arm64_load_cmp(int r, SValue *sv);
482 ST_FUNC void load(int r, SValue *sv)
484 int svtt = sv->type.t;
485 int svr = sv->r & ~VT_BOUNDED;
486 int svrv = svr & VT_VALMASK;
487 uint64_t svcul = (uint32_t)sv->c.i;
488 svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
490 if (svr == (VT_LOCAL | VT_LVAL)) {
491 if (IS_FREG(r))
492 arm64_ldrv(arm64_type_size(svtt), fltr(r), 29, svcul);
493 else
494 arm64_ldrx(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
495 intr(r), 29, svcul);
496 return;
499 if (svr == (VT_CONST | VT_LVAL)) {
500 arm64_sym(30, sv->sym, // use x30 for address
501 arm64_check_offset(0, arm64_type_size(svtt), sv->c.i));
502 if (IS_FREG(r))
503 arm64_ldrv(arm64_type_size(svtt), fltr(r), 30,
504 arm64_check_offset(1, arm64_type_size(svtt), sv->c.i));
505 else
506 arm64_ldrx(!(svtt&VT_UNSIGNED), arm64_type_size(svtt), intr(r), 30,
507 arm64_check_offset(1, arm64_type_size(svtt), sv->c.i));
508 return;
511 if ((svr & ~VT_VALMASK) == VT_LVAL && svrv < VT_CONST) {
512 if ((svtt & VT_BTYPE) != VT_VOID) {
513 if (IS_FREG(r))
514 arm64_ldrv(arm64_type_size(svtt), fltr(r), intr(svrv), 0);
515 else
516 arm64_ldrx(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
517 intr(r), intr(svrv), 0);
519 return;
522 if (svr == (VT_CONST | VT_LVAL | VT_SYM)) {
523 arm64_sym(30, sv->sym, // use x30 for address
524 arm64_check_offset(0, arm64_type_size(svtt), svcul));
525 if (IS_FREG(r))
526 arm64_ldrv(arm64_type_size(svtt), fltr(r), 30,
527 arm64_check_offset(1, arm64_type_size(svtt), svcul));
528 else
529 arm64_ldrx(!(svtt&VT_UNSIGNED), arm64_type_size(svtt), intr(r), 30,
530 arm64_check_offset(1, arm64_type_size(svtt), svcul));
531 return;
534 if (svr == (VT_CONST | VT_SYM)) {
535 arm64_sym(intr(r), sv->sym, svcul);
536 return;
539 if (svr == VT_CONST) {
540 if ((svtt & VT_BTYPE) != VT_VOID)
541 arm64_movimm(intr(r), arm64_type_size(svtt) == 3 ?
542 sv->c.i : (uint32_t)svcul);
543 return;
546 if (svr < VT_CONST) {
547 if (IS_FREG(r) && IS_FREG(svr))
548 if (svtt == VT_LDOUBLE)
549 o(0x4ea01c00 | fltr(r) | fltr(svr) << 5);
550 // mov v(r).16b,v(svr).16b
551 else
552 o(0x1e604000 | fltr(r) | fltr(svr) << 5); // fmov d(r),d(svr)
553 else if (!IS_FREG(r) && !IS_FREG(svr))
554 o(0xaa0003e0 | intr(r) | intr(svr) << 16); // mov x(r),x(svr)
555 else
556 assert(0);
557 return;
560 if (svr == VT_LOCAL) {
561 if (-svcul < 0x1000)
562 o(0xd10003a0 | intr(r) | -svcul << 10); // sub x(r),x29,#...
563 else {
564 arm64_movimm(30, -svcul); // use x30 for offset
565 o(0xcb0003a0 | intr(r) | (uint32_t)30 << 16); // sub x(r),x29,x30
567 return;
570 if (svr == VT_JMP || svr == VT_JMPI) {
571 int t = (svr == VT_JMPI);
572 arm64_movimm(intr(r), t);
573 o(0x14000002); // b .+8
574 gsym(svcul);
575 arm64_movimm(intr(r), t ^ 1);
576 return;
579 if (svr == (VT_LLOCAL | VT_LVAL)) {
580 arm64_ldrx(0, 3, 30, 29, svcul); // use x30 for offset
581 if (IS_FREG(r))
582 arm64_ldrv(arm64_type_size(svtt), fltr(r), 30, 0);
583 else
584 arm64_ldrx(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
585 intr(r), 30, 0);
586 return;
589 if (svr == VT_CMP) {
590 arm64_load_cmp(r, sv);
591 return;
594 printf("load(%x, (%x, %x, %lx))\n", r, svtt, sv->r, (long)svcul);
595 assert(0);
598 ST_FUNC void store(int r, SValue *sv)
600 int svtt = sv->type.t;
601 int svr = sv->r & ~VT_BOUNDED;
602 int svrv = svr & VT_VALMASK;
603 uint64_t svcul = (uint32_t)sv->c.i;
604 svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
606 if (svr == (VT_LOCAL | VT_LVAL)) {
607 if (IS_FREG(r))
608 arm64_strv(arm64_type_size(svtt), fltr(r), 29, svcul);
609 else
610 arm64_strx(arm64_type_size(svtt), intr(r), 29, svcul);
611 return;
614 if (svr == (VT_CONST | VT_LVAL)) {
615 arm64_sym(30, sv->sym, // use x30 for address
616 arm64_check_offset(0, arm64_type_size(svtt), sv->c.i));
617 if (IS_FREG(r))
618 arm64_strv(arm64_type_size(svtt), fltr(r), 30,
619 arm64_check_offset(1, arm64_type_size(svtt), sv->c.i));
620 else
621 arm64_strx(arm64_type_size(svtt), intr(r), 30,
622 arm64_check_offset(1, arm64_type_size(svtt), sv->c.i));
623 return;
626 if ((svr & ~VT_VALMASK) == VT_LVAL && svrv < VT_CONST) {
627 if (IS_FREG(r))
628 arm64_strv(arm64_type_size(svtt), fltr(r), intr(svrv), 0);
629 else
630 arm64_strx(arm64_type_size(svtt), intr(r), intr(svrv), 0);
631 return;
634 if (svr == (VT_CONST | VT_LVAL | VT_SYM)) {
635 arm64_sym(30, sv->sym, // use x30 for address
636 arm64_check_offset(0, arm64_type_size(svtt), svcul));
637 if (IS_FREG(r))
638 arm64_strv(arm64_type_size(svtt), fltr(r), 30,
639 arm64_check_offset(1, arm64_type_size(svtt), svcul));
640 else
641 arm64_strx(arm64_type_size(svtt), intr(r), 30,
642 arm64_check_offset(1, arm64_type_size(svtt), svcul));
643 return;
646 printf("store(%x, (%x, %x, %lx))\n", r, svtt, sv->r, (long)svcul);
647 assert(0);
650 static void arm64_gen_bl_or_b(int b)
652 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST && (vtop->r & VT_SYM)) {
653 greloca(cur_text_section, vtop->sym, ind,
654 b ? R_AARCH64_JUMP26 : R_AARCH64_CALL26, 0);
655 o(0x14000000 | (uint32_t)!b << 31); // b/bl .
657 else {
658 #ifdef CONFIG_TCC_BCHECK
659 vtop->r &= ~VT_MUSTBOUND;
660 #endif
661 o(0xd61f0000 | (uint32_t)!b << 21 | intr(gv(RC_R30)) << 5); // br/blr
665 #if defined(CONFIG_TCC_BCHECK)
667 static void gen_bounds_call(int v)
669 Sym *sym = external_helper_sym(v);
671 greloca(cur_text_section, sym, ind, R_AARCH64_CALL26, 0);
672 o(0x94000000); // bl
675 static void gen_bounds_prolog(void)
677 /* leave some room for bound checking code */
678 func_bound_offset = lbounds_section->data_offset;
679 func_bound_ind = ind;
680 func_bound_add_epilog = 0;
681 o(0xd503201f); /* nop -> mov x0, lbound section pointer */
682 o(0xd503201f);
683 o(0xd503201f);
684 o(0xd503201f); /* nop -> call __bound_local_new */
687 static void gen_bounds_epilog(void)
689 addr_t saved_ind;
690 addr_t *bounds_ptr;
691 Sym *sym_data;
692 int offset_modified = func_bound_offset != lbounds_section->data_offset;
694 if (!offset_modified && !func_bound_add_epilog)
695 return;
697 /* add end of table info */
698 bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
699 *bounds_ptr = 0;
701 sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
702 func_bound_offset, lbounds_section->data_offset);
704 /* generate bound local allocation */
705 if (offset_modified) {
706 saved_ind = ind;
707 ind = func_bound_ind;
708 greloca(cur_text_section, sym_data, ind, R_AARCH64_ADR_GOT_PAGE, 0);
709 o(0x90000000 | 0); // adrp x0, #sym_data
710 greloca(cur_text_section, sym_data, ind, R_AARCH64_LD64_GOT_LO12_NC, 0);
711 o(0xf9400000 | 0 | (0 << 5)); // ld x0,[x0, #sym_data]
712 gen_bounds_call(TOK___bound_local_new);
713 ind = saved_ind;
716 /* generate bound check local freeing */
717 o(0xf81f0fe0); /* str x0, [sp, #-16]! */
718 o(0x3c9f0fe0); /* str q0, [sp, #-16]! */
719 greloca(cur_text_section, sym_data, ind, R_AARCH64_ADR_GOT_PAGE, 0);
720 o(0x90000000 | 0); // adrp x0, #sym_data
721 greloca(cur_text_section, sym_data, ind, R_AARCH64_LD64_GOT_LO12_NC, 0);
722 o(0xf9400000 | 0 | (0 << 5)); // ld x0,[x0, #sym_data]
723 gen_bounds_call(TOK___bound_local_delete);
724 o(0x3cc107e0); /* ldr q0, [sp], #16 */
725 o(0xf84107e0); /* ldr x0, [sp], #16 */
727 #endif
729 static int arm64_hfa_aux(CType *type, int *fsize, int num)
731 if (is_float(type->t)) {
732 int a, n = type_size(type, &a);
733 if (num >= 4 || (*fsize && *fsize != n))
734 return -1;
735 *fsize = n;
736 return num + 1;
738 else if ((type->t & VT_BTYPE) == VT_STRUCT) {
739 int is_struct = 0; // rather than union
740 Sym *field;
741 for (field = type->ref->next; field; field = field->next)
742 if (field->c) {
743 is_struct = 1;
744 break;
746 if (is_struct) {
747 int num0 = num;
748 for (field = type->ref->next; field; field = field->next) {
749 if (field->c != (num - num0) * *fsize)
750 return -1;
751 num = arm64_hfa_aux(&field->type, fsize, num);
752 if (num == -1)
753 return -1;
755 if (type->ref->c != (num - num0) * *fsize)
756 return -1;
757 return num;
759 else { // union
760 int num0 = num;
761 for (field = type->ref->next; field; field = field->next) {
762 int num1 = arm64_hfa_aux(&field->type, fsize, num0);
763 if (num1 == -1)
764 return -1;
765 num = num1 < num ? num : num1;
767 if (type->ref->c != (num - num0) * *fsize)
768 return -1;
769 return num;
772 else if ((type->t & VT_ARRAY) && ((type->t & VT_BTYPE) != VT_PTR)) {
773 int num1;
774 if (!type->ref->c)
775 return num;
776 num1 = arm64_hfa_aux(&type->ref->type, fsize, num);
777 if (num1 == -1 || (num1 != num && type->ref->c > 4))
778 return -1;
779 num1 = num + type->ref->c * (num1 - num);
780 if (num1 > 4)
781 return -1;
782 return num1;
784 return -1;
787 static int arm64_hfa(CType *type, unsigned *fsize)
789 if ((type->t & VT_BTYPE) == VT_STRUCT ||
790 ((type->t & VT_ARRAY) && ((type->t & VT_BTYPE) != VT_PTR))) {
791 int sz = 0;
792 int n = arm64_hfa_aux(type, &sz, 0);
793 if (0 < n && n <= 4) {
794 if (fsize)
795 *fsize = sz;
796 return n;
799 return 0;
802 static unsigned long arm64_pcs_aux(int n, CType **type, unsigned long *a)
804 int nx = 0; // next integer register
805 int nv = 0; // next vector register
806 unsigned long ns = 32; // next stack offset
807 int i;
809 for (i = 0; i < n; i++) {
810 int hfa = arm64_hfa(type[i], 0);
811 int size, align;
813 if ((type[i]->t & VT_ARRAY) ||
814 (type[i]->t & VT_BTYPE) == VT_FUNC)
815 size = align = 8;
816 else
817 size = type_size(type[i], &align);
819 if (hfa)
820 // B.2
822 else if (size > 16) {
823 // B.3: replace with pointer
824 if (nx < 8)
825 a[i] = nx++ << 1 | 1;
826 else {
827 ns = (ns + 7) & ~7;
828 a[i] = ns | 1;
829 ns += 8;
831 continue;
833 else if ((type[i]->t & VT_BTYPE) == VT_STRUCT)
834 // B.4
835 size = (size + 7) & ~7;
837 // C.1
838 if (is_float(type[i]->t) && nv < 8) {
839 a[i] = 16 + (nv++ << 1);
840 continue;
843 // C.2
844 if (hfa && nv + hfa <= 8) {
845 a[i] = 16 + (nv << 1);
846 nv += hfa;
847 continue;
850 // C.3
851 if (hfa) {
852 nv = 8;
853 size = (size + 7) & ~7;
856 // C.4
857 if (hfa || (type[i]->t & VT_BTYPE) == VT_LDOUBLE) {
858 ns = (ns + 7) & ~7;
859 ns = (ns + align - 1) & -align;
862 // C.5
863 if ((type[i]->t & VT_BTYPE) == VT_FLOAT)
864 size = 8;
866 // C.6
867 if (hfa || is_float(type[i]->t)) {
868 a[i] = ns;
869 ns += size;
870 continue;
873 // C.7
874 if ((type[i]->t & VT_BTYPE) != VT_STRUCT && size <= 8 && nx < 8) {
875 a[i] = nx++ << 1;
876 continue;
879 // C.8
880 if (align == 16)
881 nx = (nx + 1) & ~1;
883 // C.9
884 if ((type[i]->t & VT_BTYPE) != VT_STRUCT && size == 16 && nx < 7) {
885 a[i] = nx << 1;
886 nx += 2;
887 continue;
890 // C.10
891 if ((type[i]->t & VT_BTYPE) == VT_STRUCT && size <= (8 - nx) * 8) {
892 a[i] = nx << 1;
893 nx += (size + 7) >> 3;
894 continue;
897 // C.11
898 nx = 8;
900 // C.12
901 ns = (ns + 7) & ~7;
902 ns = (ns + align - 1) & -align;
904 // C.13
905 if ((type[i]->t & VT_BTYPE) == VT_STRUCT) {
906 a[i] = ns;
907 ns += size;
908 continue;
911 // C.14
912 if (size < 8)
913 size = 8;
915 // C.15
916 a[i] = ns;
917 ns += size;
920 return ns - 32;
923 static unsigned long arm64_pcs(int n, CType **type, unsigned long *a)
925 unsigned long stack;
927 // Return type:
928 if ((type[0]->t & VT_BTYPE) == VT_VOID)
929 a[0] = -1;
930 else {
931 arm64_pcs_aux(1, type, a);
932 assert(a[0] == 0 || a[0] == 1 || a[0] == 16);
935 // Argument types:
936 stack = arm64_pcs_aux(n, type + 1, a + 1);
938 if (0) {
939 int i;
940 for (i = 0; i <= n; i++) {
941 if (!i)
942 printf("arm64_pcs return: ");
943 else
944 printf("arm64_pcs arg %d: ", i);
945 if (a[i] == (unsigned long)-1)
946 printf("void\n");
947 else if (a[i] == 1 && !i)
948 printf("X8 pointer\n");
949 else if (a[i] < 16)
950 printf("X%lu%s\n", a[i] / 2, a[i] & 1 ? " pointer" : "");
951 else if (a[i] < 32)
952 printf("V%lu\n", a[i] / 2 - 8);
953 else
954 printf("stack %lu%s\n",
955 (a[i] - 32) & ~1, a[i] & 1 ? " pointer" : "");
959 return stack;
962 ST_FUNC void gfunc_call(int nb_args)
964 CType *return_type;
965 CType **t;
966 unsigned long *a, *a1;
967 unsigned long stack;
968 int i;
970 #ifdef CONFIG_TCC_BCHECK
971 if (tcc_state->do_bounds_check)
972 gbound_args(nb_args);
973 #endif
975 return_type = &vtop[-nb_args].type.ref->type;
976 if ((return_type->t & VT_BTYPE) == VT_STRUCT)
977 --nb_args;
979 t = tcc_malloc((nb_args + 1) * sizeof(*t));
980 a = tcc_malloc((nb_args + 1) * sizeof(*a));
981 a1 = tcc_malloc((nb_args + 1) * sizeof(*a1));
983 t[0] = return_type;
984 for (i = 0; i < nb_args; i++)
985 t[nb_args - i] = &vtop[-i].type;
987 stack = arm64_pcs(nb_args, t, a);
989 // Allocate space for structs replaced by pointer:
990 for (i = nb_args; i; i--)
991 if (a[i] & 1) {
992 SValue *arg = &vtop[i - nb_args];
993 int align, size = type_size(&arg->type, &align);
994 assert((arg->type.t & VT_BTYPE) == VT_STRUCT);
995 stack = (stack + align - 1) & -align;
996 a1[i] = stack;
997 stack += size;
1000 stack = (stack + 15) >> 4 << 4;
1002 if (stack >= 0x1000000) // 16Mb
1003 tcc_error("stack size too big %lu", stack);
1004 if (stack & 0xfff)
1005 o(0xd10003ff | (stack & 0xfff) << 10); // sub sp,sp,#(n)
1006 if (stack >> 12)
1007 o(0xd14003ff | (stack >> 12) << 10);
1009 // First pass: set all values on stack
1010 for (i = nb_args; i; i--) {
1011 vpushv(vtop - nb_args + i);
1013 if (a[i] & 1) {
1014 // struct replaced by pointer
1015 int r = get_reg(RC_INT);
1016 arm64_spoff(intr(r), a1[i]);
1017 vset(&vtop->type, r | VT_LVAL, 0);
1018 vswap();
1019 vstore();
1020 if (a[i] >= 32) {
1021 // pointer on stack
1022 r = get_reg(RC_INT);
1023 arm64_spoff(intr(r), a1[i]);
1024 arm64_strx(3, intr(r), 31, (a[i] - 32) >> 1 << 1);
1027 else if (a[i] >= 32) {
1028 // value on stack
1029 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
1030 int r = get_reg(RC_INT);
1031 arm64_spoff(intr(r), a[i] - 32);
1032 vset(&vtop->type, r | VT_LVAL, 0);
1033 vswap();
1034 vstore();
1036 else if (is_float(vtop->type.t)) {
1037 gv(RC_FLOAT);
1038 arm64_strv(arm64_type_size(vtop[0].type.t),
1039 fltr(vtop[0].r), 31, a[i] - 32);
1041 else {
1042 gv(RC_INT);
1043 arm64_strx(arm64_type_size(vtop[0].type.t),
1044 intr(vtop[0].r), 31, a[i] - 32);
1048 --vtop;
1051 // Second pass: assign values to registers
1052 for (i = nb_args; i; i--, vtop--) {
1053 if (a[i] < 16 && !(a[i] & 1)) {
1054 // value in general-purpose registers
1055 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
1056 int align, size = type_size(&vtop->type, &align);
1057 if (size) {
1058 vtop->type.t = VT_PTR;
1059 gaddrof();
1060 gv(RC_R(a[i] / 2));
1061 arm64_ldrs(a[i] / 2, size);
1064 else
1065 gv(RC_R(a[i] / 2));
1067 else if (a[i] < 16)
1068 // struct replaced by pointer in register
1069 arm64_spoff(a[i] / 2, a1[i]);
1070 else if (a[i] < 32) {
1071 // value in floating-point registers
1072 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
1073 uint32_t j, sz, n = arm64_hfa(&vtop->type, &sz);
1074 vtop->type.t = VT_PTR;
1075 gaddrof();
1076 gv(RC_R30);
1077 for (j = 0; j < n; j++)
1078 o(0x3d4003c0 |
1079 (sz & 16) << 19 | -(sz & 8) << 27 | (sz & 4) << 29 |
1080 (a[i] / 2 - 8 + j) |
1081 j << 10); // ldr ([sdq])(*),[x30,#(j * sz)]
1083 else
1084 gv(RC_F(a[i] / 2 - 8));
1088 if ((return_type->t & VT_BTYPE) == VT_STRUCT) {
1089 if (a[0] == 1) {
1090 // indirect return: set x8 and discard the stack value
1091 gv(RC_R(8));
1092 --vtop;
1094 else
1095 // return in registers: keep the address for after the call
1096 vswap();
1099 save_regs(0);
1100 arm64_gen_bl_or_b(0);
1101 --vtop;
1102 if (stack & 0xfff)
1103 o(0x910003ff | (stack & 0xfff) << 10); // add sp,sp,#(n)
1104 if (stack >> 12)
1105 o(0x914003ff | (stack >> 12) << 10);
1108 int rt = return_type->t;
1109 int bt = rt & VT_BTYPE;
1110 if (bt == VT_STRUCT && !(a[0] & 1)) {
1111 // A struct was returned in registers, so write it out:
1112 gv(RC_R(8));
1113 --vtop;
1114 if (a[0] == 0) {
1115 int align, size = type_size(return_type, &align);
1116 assert(size <= 16);
1117 if (size > 8)
1118 o(0xa9000500); // stp x0,x1,[x8]
1119 else if (size)
1120 arm64_strx(size > 4 ? 3 : size > 2 ? 2 : size > 1, 0, 8, 0);
1123 else if (a[0] == 16) {
1124 uint32_t j, sz, n = arm64_hfa(return_type, &sz);
1125 for (j = 0; j < n; j++)
1126 o(0x3d000100 |
1127 (sz & 16) << 19 | -(sz & 8) << 27 | (sz & 4) << 29 |
1128 (a[i] / 2 - 8 + j) |
1129 j << 10); // str ([sdq])(*),[x8,#(j * sz)]
1134 tcc_free(a1);
1135 tcc_free(a);
1136 tcc_free(t);
1139 static unsigned long arm64_func_va_list_stack;
1140 static int arm64_func_va_list_gr_offs;
1141 static int arm64_func_va_list_vr_offs;
1142 static int arm64_func_sub_sp_offset;
1144 ST_FUNC void gfunc_prolog(Sym *func_sym)
1146 CType *func_type = &func_sym->type;
1147 int n = 0;
1148 int i = 0;
1149 Sym *sym;
1150 CType **t;
1151 unsigned long *a;
1153 func_vc = 144; // offset of where x8 is stored
1155 for (sym = func_type->ref; sym; sym = sym->next)
1156 ++n;
1157 t = n ? tcc_malloc(n * sizeof(*t)) : NULL;
1158 a = n ? tcc_malloc(n * sizeof(*a)) : NULL;
1160 for (sym = func_type->ref; sym; sym = sym->next)
1161 t[i++] = &sym->type;
1163 arm64_func_va_list_stack = arm64_pcs(n - 1, t, a);
1165 o(0xa9b27bfd); // stp x29,x30,[sp,#-224]!
1166 o(0xad0087e0); // stp q0,q1,[sp,#16]
1167 o(0xad018fe2); // stp q2,q3,[sp,#48]
1168 o(0xad0297e4); // stp q4,q5,[sp,#80]
1169 o(0xad039fe6); // stp q6,q7,[sp,#112]
1170 o(0xa90923e8); // stp x8,x8,[sp,#144]
1171 o(0xa90a07e0); // stp x0,x1,[sp,#160]
1172 o(0xa90b0fe2); // stp x2,x3,[sp,#176]
1173 o(0xa90c17e4); // stp x4,x5,[sp,#192]
1174 o(0xa90d1fe6); // stp x6,x7,[sp,#208]
1176 arm64_func_va_list_gr_offs = -64;
1177 arm64_func_va_list_vr_offs = -128;
1179 for (i = 1, sym = func_type->ref->next; sym; i++, sym = sym->next) {
1180 int off = (a[i] < 16 ? 160 + a[i] / 2 * 8 :
1181 a[i] < 32 ? 16 + (a[i] - 16) / 2 * 16 :
1182 224 + ((a[i] - 32) >> 1 << 1));
1183 sym_push(sym->v & ~SYM_FIELD, &sym->type,
1184 (a[i] & 1 ? VT_LLOCAL : VT_LOCAL) | VT_LVAL,
1185 off);
1187 if (a[i] < 16) {
1188 int align, size = type_size(&sym->type, &align);
1189 arm64_func_va_list_gr_offs = (a[i] / 2 - 7 +
1190 (!(a[i] & 1) && size > 8)) * 8;
1192 else if (a[i] < 32) {
1193 uint32_t hfa = arm64_hfa(&sym->type, 0);
1194 arm64_func_va_list_vr_offs = (a[i] / 2 - 16 +
1195 (hfa ? hfa : 1)) * 16;
1198 // HFAs of float and double need to be written differently:
1199 if (16 <= a[i] && a[i] < 32 && (sym->type.t & VT_BTYPE) == VT_STRUCT) {
1200 uint32_t j, sz, k = arm64_hfa(&sym->type, &sz);
1201 if (sz < 16)
1202 for (j = 0; j < k; j++) {
1203 o(0x3d0003e0 | -(sz & 8) << 27 | (sz & 4) << 29 |
1204 ((a[i] - 16) / 2 + j) | (off / sz + j) << 10);
1205 // str ([sdq])(*),[sp,#(j * sz)]
1210 tcc_free(a);
1211 tcc_free(t);
1213 o(0x910003fd); // mov x29,sp
1214 arm64_func_sub_sp_offset = ind;
1215 // In gfunc_epilog these will be replaced with code to decrement SP:
1216 o(0xd503201f); // nop
1217 o(0xd503201f); // nop
1218 loc = 0;
1219 #ifdef CONFIG_TCC_BCHECK
1220 if (tcc_state->do_bounds_check)
1221 gen_bounds_prolog();
1222 #endif
1225 ST_FUNC void gen_va_start(void)
1227 int r;
1228 --vtop; // we don't need the "arg"
1229 gaddrof();
1230 r = intr(gv(RC_INT));
1232 if (arm64_func_va_list_stack) {
1233 //xx could use add (immediate) here
1234 arm64_movimm(30, arm64_func_va_list_stack + 224);
1235 o(0x8b1e03be); // add x30,x29,x30
1237 else
1238 o(0x910383be); // add x30,x29,#224
1239 o(0xf900001e | r << 5); // str x30,[x(r)]
1241 if (arm64_func_va_list_gr_offs) {
1242 if (arm64_func_va_list_stack)
1243 o(0x910383be); // add x30,x29,#224
1244 o(0xf900041e | r << 5); // str x30,[x(r),#8]
1247 if (arm64_func_va_list_vr_offs) {
1248 o(0x910243be); // add x30,x29,#144
1249 o(0xf900081e | r << 5); // str x30,[x(r),#16]
1252 arm64_movimm(30, arm64_func_va_list_gr_offs);
1253 o(0xb900181e | r << 5); // str w30,[x(r),#24]
1255 arm64_movimm(30, arm64_func_va_list_vr_offs);
1256 o(0xb9001c1e | r << 5); // str w30,[x(r),#28]
1258 --vtop;
1261 ST_FUNC void gen_va_arg(CType *t)
1263 int align, size = type_size(t, &align);
1264 unsigned fsize, hfa = arm64_hfa(t, &fsize);
1265 uint32_t r0, r1;
1267 if (is_float(t->t)) {
1268 hfa = 1;
1269 fsize = size;
1272 gaddrof();
1273 r0 = intr(gv(RC_INT));
1274 r1 = get_reg(RC_INT);
1275 vtop[0].r = r1 | VT_LVAL;
1276 r1 = intr(r1);
1278 if (!hfa) {
1279 uint32_t n = size > 16 ? 8 : (size + 7) & -8;
1280 o(0xb940181e | r0 << 5); // ldr w30,[x(r0),#24] // __gr_offs
1281 if (align == 16) {
1282 assert(0); // this path untested but needed for __uint128_t
1283 o(0x11003fde); // add w30,w30,#15
1284 o(0x121c6fde); // and w30,w30,#-16
1286 o(0x310003c0 | r1 | n << 10); // adds w(r1),w30,#(n)
1287 o(0x540000ad); // b.le .+20
1288 o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack
1289 o(0x9100001e | r1 << 5 | n << 10); // add x30,x(r1),#(n)
1290 o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack
1291 o(0x14000004); // b .+16
1292 o(0xb9001800 | r1 | r0 << 5); // str w(r1),[x(r0),#24] // __gr_offs
1293 o(0xf9400400 | r1 | r0 << 5); // ldr x(r1),[x(r0),#8] // __gr_top
1294 o(0x8b3ec000 | r1 | r1 << 5); // add x(r1),x(r1),w30,sxtw
1295 if (size > 16)
1296 o(0xf9400000 | r1 | r1 << 5); // ldr x(r1),[x(r1)]
1298 else {
1299 uint32_t rsz = hfa << 4;
1300 uint32_t ssz = (size + 7) & -(uint32_t)8;
1301 uint32_t b1, b2;
1302 o(0xb9401c1e | r0 << 5); // ldr w30,[x(r0),#28] // __vr_offs
1303 o(0x310003c0 | r1 | rsz << 10); // adds w(r1),w30,#(rsz)
1304 b1 = ind; o(0x5400000d); // b.le lab1
1305 o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack
1306 if (fsize == 16) {
1307 o(0x91003c00 | r1 | r1 << 5); // add x(r1),x(r1),#15
1308 o(0x927cec00 | r1 | r1 << 5); // and x(r1),x(r1),#-16
1310 o(0x9100001e | r1 << 5 | ssz << 10); // add x30,x(r1),#(ssz)
1311 o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack
1312 b2 = ind; o(0x14000000); // b lab2
1313 // lab1:
1314 write32le(cur_text_section->data + b1, 0x5400000d | (ind - b1) << 3);
1315 o(0xb9001c00 | r1 | r0 << 5); // str w(r1),[x(r0),#28] // __vr_offs
1316 o(0xf9400800 | r1 | r0 << 5); // ldr x(r1),[x(r0),#16] // __vr_top
1317 if (hfa == 1 || fsize == 16)
1318 o(0x8b3ec000 | r1 | r1 << 5); // add x(r1),x(r1),w30,sxtw
1319 else {
1320 // We need to change the layout of this HFA.
1321 // Get some space on the stack using global variable "loc":
1322 loc = (loc - size) & -(uint32_t)align;
1323 o(0x8b3ec000 | 30 | r1 << 5); // add x30,x(r1),w30,sxtw
1324 arm64_movimm(r1, loc);
1325 o(0x8b0003a0 | r1 | r1 << 16); // add x(r1),x29,x(r1)
1326 o(0x4c402bdc | (uint32_t)fsize << 7 |
1327 (uint32_t)(hfa == 2) << 15 |
1328 (uint32_t)(hfa == 3) << 14); // ld1 {v28.(4s|2d),...},[x30]
1329 o(0x0d00801c | r1 << 5 | (fsize == 8) << 10 |
1330 (uint32_t)(hfa != 2) << 13 |
1331 (uint32_t)(hfa != 3) << 21); // st(hfa) {v28.(s|d),...}[0],[x(r1)]
1333 // lab2:
1334 write32le(cur_text_section->data + b2, 0x14000000 | (ind - b2) >> 2);
1338 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret,
1339 int *align, int *regsize)
1341 return 0;
1344 ST_FUNC void gfunc_return(CType *func_type)
1346 CType *t = func_type;
1347 unsigned long a;
1349 arm64_pcs(0, &t, &a);
1350 switch (a) {
1351 case -1:
1352 break;
1353 case 0:
1354 if ((func_type->t & VT_BTYPE) == VT_STRUCT) {
1355 int align, size = type_size(func_type, &align);
1356 gaddrof();
1357 gv(RC_R(0));
1358 arm64_ldrs(0, size);
1360 else
1361 gv(RC_IRET);
1362 break;
1363 case 1: {
1364 CType type = *func_type;
1365 mk_pointer(&type);
1366 vset(&type, VT_LOCAL | VT_LVAL, func_vc);
1367 indir();
1368 vswap();
1369 vstore();
1370 break;
1372 case 16:
1373 if ((func_type->t & VT_BTYPE) == VT_STRUCT) {
1374 uint32_t j, sz, n = arm64_hfa(&vtop->type, &sz);
1375 gaddrof();
1376 gv(RC_R(0));
1377 for (j = 0; j < n; j++)
1378 o(0x3d400000 |
1379 (sz & 16) << 19 | -(sz & 8) << 27 | (sz & 4) << 29 |
1380 j | j << 10); // ldr ([sdq])(*),[x0,#(j * sz)]
1382 else
1383 gv(RC_FRET);
1384 break;
1385 default:
1386 assert(0);
1388 vtop--;
1391 ST_FUNC void gfunc_epilog(void)
1393 #ifdef CONFIG_TCC_BCHECK
1394 if (tcc_state->do_bounds_check)
1395 gen_bounds_epilog();
1396 #endif
1398 if (loc) {
1399 // Insert instructions to subtract size of stack frame from SP.
1400 unsigned char *ptr = cur_text_section->data + arm64_func_sub_sp_offset;
1401 uint64_t diff = (-loc + 15) & ~15;
1402 if (!(diff >> 24)) {
1403 if (diff & 0xfff) // sub sp,sp,#(diff & 0xfff)
1404 write32le(ptr, 0xd10003ff | (diff & 0xfff) << 10);
1405 if (diff >> 12) // sub sp,sp,#(diff >> 12),lsl #12
1406 write32le(ptr + 4, 0xd14003ff | (diff >> 12) << 10);
1408 else {
1409 // In this case we may subtract more than necessary,
1410 // but always less than 17/16 of what we were aiming for.
1411 int i = 0;
1412 int j = 0;
1413 while (diff >> 20) {
1414 diff = (diff + 0xffff) >> 16;
1415 ++i;
1417 while (diff >> 16) {
1418 diff = (diff + 1) >> 1;
1419 ++j;
1421 write32le(ptr, 0xd2800010 | diff << 5 | i << 21);
1422 // mov x16,#(diff),lsl #(16 * i)
1423 write32le(ptr + 4, 0xcb3063ff | j << 10);
1424 // sub sp,sp,x16,lsl #(j)
1427 o(0x910003bf); // mov sp,x29
1428 o(0xa8ce7bfd); // ldp x29,x30,[sp],#224
1430 o(0xd65f03c0); // ret
1433 ST_FUNC void gen_fill_nops(int bytes)
1435 if ((bytes & 3))
1436 tcc_error("alignment of code section not multiple of 4");
1437 while (bytes > 0) {
1438 o(0xd503201f); // nop
1439 bytes -= 4;
1443 // Generate forward branch to label:
1444 ST_FUNC int gjmp(int t)
1446 int r = ind;
1447 if (nocode_wanted)
1448 return t;
1449 o(t);
1450 return r;
1453 // Generate branch to known address:
1454 ST_FUNC void gjmp_addr(int a)
1456 assert(a - ind + 0x8000000 < 0x10000000);
1457 o(0x14000000 | ((a - ind) >> 2 & 0x3ffffff));
1460 ST_FUNC int gjmp_append(int n, int t)
1462 void *p;
1463 /* insert vtop->c jump list in t */
1464 if (n) {
1465 uint32_t n1 = n, n2;
1466 while ((n2 = read32le(p = cur_text_section->data + n1)))
1467 n1 = n2;
1468 write32le(p, t);
1469 t = n;
1471 return t;
1474 void arm64_vset_VT_CMP(int op)
1476 if (op >= TOK_ULT && op <= TOK_GT) {
1477 vtop->cmp_r = vtop->r;
1478 vset_VT_CMP(0x80);
1482 static void arm64_gen_opil(int op, uint32_t l);
1484 static void arm64_load_cmp(int r, SValue *sv)
1486 sv->r = sv->cmp_r;
1487 if (sv->c.i & 1) {
1488 vpushi(1);
1489 arm64_gen_opil('^', 0);
1491 if (r != sv->r) {
1492 load(r, sv);
1493 sv->r = r;
1497 ST_FUNC int gjmp_cond(int op, int t)
1499 int bt = vtop->type.t & VT_BTYPE;
1501 int inv = op & 1;
1502 vtop->r = vtop->cmp_r;
1504 if (bt == VT_LDOUBLE) {
1505 uint32_t a, b, f = fltr(gv(RC_FLOAT));
1506 a = get_reg(RC_INT);
1507 vpushi(0);
1508 vtop[0].r = a;
1509 b = get_reg(RC_INT);
1510 a = intr(a);
1511 b = intr(b);
1512 o(0x4e083c00 | a | f << 5); // mov x(a),v(f).d[0]
1513 o(0x4e183c00 | b | f << 5); // mov x(b),v(f).d[1]
1514 o(0xaa000400 | a | a << 5 | b << 16); // orr x(a),x(a),x(b),lsl #1
1515 o(0xb4000040 | a | !!inv << 24); // cbz/cbnz x(a),.+8
1516 --vtop;
1518 else if (bt == VT_FLOAT || bt == VT_DOUBLE) {
1519 uint32_t a = fltr(gv(RC_FLOAT));
1520 o(0x1e202008 | a << 5 | (bt != VT_FLOAT) << 22); // fcmp
1521 o(0x54000040 | !!inv); // b.eq/b.ne .+8
1523 else {
1524 uint32_t ll = (bt == VT_PTR || bt == VT_LLONG);
1525 uint32_t a = intr(gv(RC_INT));
1526 o(0x34000040 | a | !!inv << 24 | ll << 31); // cbz/cbnz wA,.+8
1528 return gjmp(t);
1531 static int arm64_iconst(uint64_t *val, SValue *sv)
1533 if ((sv->r & (VT_VALMASK | VT_LVAL | VT_SYM)) != VT_CONST)
1534 return 0;
1535 if (val) {
1536 int t = sv->type.t;
1537 int bt = t & VT_BTYPE;
1538 *val = ((bt == VT_LLONG || bt == VT_PTR) ? sv->c.i :
1539 (uint32_t)sv->c.i |
1540 (t & VT_UNSIGNED ? 0 : -(sv->c.i & 0x80000000)));
1542 return 1;
1545 static int arm64_gen_opic(int op, uint32_t l, int rev, uint64_t val,
1546 uint32_t x, uint32_t a)
1548 if (op == '-' && !rev) {
1549 val = -val;
1550 op = '+';
1552 val = l ? val : (uint32_t)val;
1554 switch (op) {
1556 case '+': {
1557 uint32_t s = l ? val >> 63 : val >> 31;
1558 val = s ? -val : val;
1559 val = l ? val : (uint32_t)val;
1560 if (!(val & ~(uint64_t)0xfff))
1561 o(0x11000000 | l << 31 | s << 30 | x | a << 5 | val << 10);
1562 else if (!(val & ~(uint64_t)0xfff000))
1563 o(0x11400000 | l << 31 | s << 30 | x | a << 5 | val >> 12 << 10);
1564 else {
1565 arm64_movimm(30, val); // use x30
1566 o(0x0b1e0000 | l << 31 | s << 30 | x | a << 5);
1568 return 1;
1571 case '-':
1572 if (!val)
1573 o(0x4b0003e0 | l << 31 | x | a << 16); // neg
1574 else if (val == (l ? (uint64_t)-1 : (uint32_t)-1))
1575 o(0x2a2003e0 | l << 31 | x | a << 16); // mvn
1576 else {
1577 arm64_movimm(30, val); // use x30
1578 o(0x4b0003c0 | l << 31 | x | a << 16); // sub
1580 return 1;
1582 case '^':
1583 if (val == -1 || (val == 0xffffffff && !l)) {
1584 o(0x2a2003e0 | l << 31 | x | a << 16); // mvn
1585 return 1;
1587 // fall through
1588 case '&':
1589 case '|': {
1590 int e = arm64_encode_bimm64(l ? val : val | val << 32);
1591 if (e < 0)
1592 return 0;
1593 o((op == '&' ? 0x12000000 :
1594 op == '|' ? 0x32000000 : 0x52000000) |
1595 l << 31 | x | a << 5 | (uint32_t)e << 10);
1596 return 1;
1599 case TOK_SAR:
1600 case TOK_SHL:
1601 case TOK_SHR: {
1602 uint32_t n = 32 << l;
1603 val = val & (n - 1);
1604 if (rev)
1605 return 0;
1606 if (!val) {
1607 // tcc_warning("shift count >= width of type");
1608 o(0x2a0003e0 | l << 31 | a << 16);
1609 return 1;
1611 else if (op == TOK_SHL)
1612 o(0x53000000 | l << 31 | l << 22 | x | a << 5 |
1613 (n - val) << 16 | (n - 1 - val) << 10); // lsl
1614 else
1615 o(0x13000000 | (op == TOK_SHR) << 30 | l << 31 | l << 22 |
1616 x | a << 5 | val << 16 | (n - 1) << 10); // lsr/asr
1617 return 1;
1621 return 0;
1624 static void arm64_gen_opil(int op, uint32_t l)
1626 uint32_t x, a, b;
1628 // Special treatment for operations with a constant operand:
1630 uint64_t val;
1631 int rev = 1;
1633 if (arm64_iconst(0, &vtop[0])) {
1634 vswap();
1635 rev = 0;
1637 if (arm64_iconst(&val, &vtop[-1])) {
1638 gv(RC_INT);
1639 a = intr(vtop[0].r);
1640 --vtop;
1641 x = get_reg(RC_INT);
1642 ++vtop;
1643 if (arm64_gen_opic(op, l, rev, val, intr(x), a)) {
1644 vtop[0].r = x;
1645 vswap();
1646 --vtop;
1647 return;
1650 if (!rev)
1651 vswap();
1654 gv2(RC_INT, RC_INT);
1655 assert(vtop[-1].r < VT_CONST && vtop[0].r < VT_CONST);
1656 a = intr(vtop[-1].r);
1657 b = intr(vtop[0].r);
1658 vtop -= 2;
1659 x = get_reg(RC_INT);
1660 ++vtop;
1661 vtop[0].r = x;
1662 x = intr(x);
1664 switch (op) {
1665 case '%':
1666 // Use x30 for quotient:
1667 o(0x1ac00c00 | l << 31 | 30 | a << 5 | b << 16); // sdiv
1668 o(0x1b008000 | l << 31 | x | (uint32_t)30 << 5 |
1669 b << 16 | a << 10); // msub
1670 break;
1671 case '&':
1672 o(0x0a000000 | l << 31 | x | a << 5 | b << 16); // and
1673 break;
1674 case '*':
1675 o(0x1b007c00 | l << 31 | x | a << 5 | b << 16); // mul
1676 break;
1677 case '+':
1678 o(0x0b000000 | l << 31 | x | a << 5 | b << 16); // add
1679 break;
1680 case '-':
1681 o(0x4b000000 | l << 31 | x | a << 5 | b << 16); // sub
1682 break;
1683 case '/':
1684 o(0x1ac00c00 | l << 31 | x | a << 5 | b << 16); // sdiv
1685 break;
1686 case '^':
1687 o(0x4a000000 | l << 31 | x | a << 5 | b << 16); // eor
1688 break;
1689 case '|':
1690 o(0x2a000000 | l << 31 | x | a << 5 | b << 16); // orr
1691 break;
1692 case TOK_EQ:
1693 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1694 o(0x1a9f17e0 | x); // cset wA,eq
1695 break;
1696 case TOK_GE:
1697 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1698 o(0x1a9fb7e0 | x); // cset wA,ge
1699 break;
1700 case TOK_GT:
1701 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1702 o(0x1a9fd7e0 | x); // cset wA,gt
1703 break;
1704 case TOK_LE:
1705 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1706 o(0x1a9fc7e0 | x); // cset wA,le
1707 break;
1708 case TOK_LT:
1709 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1710 o(0x1a9fa7e0 | x); // cset wA,lt
1711 break;
1712 case TOK_NE:
1713 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1714 o(0x1a9f07e0 | x); // cset wA,ne
1715 break;
1716 case TOK_SAR:
1717 o(0x1ac02800 | l << 31 | x | a << 5 | b << 16); // asr
1718 break;
1719 case TOK_SHL:
1720 o(0x1ac02000 | l << 31 | x | a << 5 | b << 16); // lsl
1721 break;
1722 case TOK_SHR:
1723 o(0x1ac02400 | l << 31 | x | a << 5 | b << 16); // lsr
1724 break;
1725 case TOK_UDIV:
1726 case TOK_PDIV:
1727 o(0x1ac00800 | l << 31 | x | a << 5 | b << 16); // udiv
1728 break;
1729 case TOK_UGE:
1730 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1731 o(0x1a9f37e0 | x); // cset wA,cs
1732 break;
1733 case TOK_UGT:
1734 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1735 o(0x1a9f97e0 | x); // cset wA,hi
1736 break;
1737 case TOK_ULT:
1738 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1739 o(0x1a9f27e0 | x); // cset wA,cc
1740 break;
1741 case TOK_ULE:
1742 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1743 o(0x1a9f87e0 | x); // cset wA,ls
1744 break;
1745 case TOK_UMOD:
1746 // Use x30 for quotient:
1747 o(0x1ac00800 | l << 31 | 30 | a << 5 | b << 16); // udiv
1748 o(0x1b008000 | l << 31 | x | (uint32_t)30 << 5 |
1749 b << 16 | a << 10); // msub
1750 break;
1751 default:
1752 assert(0);
1756 ST_FUNC void gen_opi(int op)
1758 arm64_gen_opil(op, 0);
1759 arm64_vset_VT_CMP(op);
1762 ST_FUNC void gen_opl(int op)
1764 arm64_gen_opil(op, 1);
1765 arm64_vset_VT_CMP(op);
1768 ST_FUNC void gen_opf(int op)
1770 uint32_t x, a, b, dbl;
1772 if (vtop[0].type.t == VT_LDOUBLE) {
1773 CType type = vtop[0].type;
1774 int func = 0;
1775 int cond = -1;
1776 switch (op) {
1777 case '*': func = TOK___multf3; break;
1778 case '+': func = TOK___addtf3; break;
1779 case '-': func = TOK___subtf3; break;
1780 case '/': func = TOK___divtf3; break;
1781 case TOK_EQ: func = TOK___eqtf2; cond = 1; break;
1782 case TOK_NE: func = TOK___netf2; cond = 0; break;
1783 case TOK_LT: func = TOK___lttf2; cond = 10; break;
1784 case TOK_GE: func = TOK___getf2; cond = 11; break;
1785 case TOK_LE: func = TOK___letf2; cond = 12; break;
1786 case TOK_GT: func = TOK___gttf2; cond = 13; break;
1787 default: assert(0); break;
1789 vpush_helper_func(func);
1790 vrott(3);
1791 gfunc_call(2);
1792 vpushi(0);
1793 vtop->r = cond < 0 ? REG_FRET : REG_IRET;
1794 if (cond < 0)
1795 vtop->type = type;
1796 else {
1797 o(0x7100001f); // cmp w0,#0
1798 o(0x1a9f07e0 | (uint32_t)cond << 12); // cset w0,(cond)
1800 return;
1803 dbl = vtop[0].type.t != VT_FLOAT;
1804 gv2(RC_FLOAT, RC_FLOAT);
1805 assert(vtop[-1].r < VT_CONST && vtop[0].r < VT_CONST);
1806 a = fltr(vtop[-1].r);
1807 b = fltr(vtop[0].r);
1808 vtop -= 2;
1809 switch (op) {
1810 case TOK_EQ: case TOK_NE:
1811 case TOK_LT: case TOK_GE: case TOK_LE: case TOK_GT:
1812 x = get_reg(RC_INT);
1813 ++vtop;
1814 vtop[0].r = x;
1815 x = intr(x);
1816 break;
1817 default:
1818 x = get_reg(RC_FLOAT);
1819 ++vtop;
1820 vtop[0].r = x;
1821 x = fltr(x);
1822 break;
1825 switch (op) {
1826 case '*':
1827 o(0x1e200800 | dbl << 22 | x | a << 5 | b << 16); // fmul
1828 break;
1829 case '+':
1830 o(0x1e202800 | dbl << 22 | x | a << 5 | b << 16); // fadd
1831 break;
1832 case '-':
1833 o(0x1e203800 | dbl << 22 | x | a << 5 | b << 16); // fsub
1834 break;
1835 case '/':
1836 o(0x1e201800 | dbl << 22 | x | a << 5 | b << 16); // fdiv
1837 break;
1838 case TOK_EQ:
1839 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1840 o(0x1a9f17e0 | x); // cset w(x),eq
1841 break;
1842 case TOK_GE:
1843 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1844 o(0x1a9fb7e0 | x); // cset w(x),ge
1845 break;
1846 case TOK_GT:
1847 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1848 o(0x1a9fd7e0 | x); // cset w(x),gt
1849 break;
1850 case TOK_LE:
1851 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1852 o(0x1a9f87e0 | x); // cset w(x),ls
1853 break;
1854 case TOK_LT:
1855 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1856 o(0x1a9f57e0 | x); // cset w(x),mi
1857 break;
1858 case TOK_NE:
1859 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1860 o(0x1a9f07e0 | x); // cset w(x),ne
1861 break;
1862 default:
1863 assert(0);
1865 arm64_vset_VT_CMP(op);
1868 // Generate sign extension from 32 to 64 bits:
1869 ST_FUNC void gen_cvt_sxtw(void)
1871 uint32_t r = intr(gv(RC_INT));
1872 o(0x93407c00 | r | r << 5); // sxtw x(r),w(r)
1875 /* char/short to int conversion */
1876 ST_FUNC void gen_cvt_csti(int t)
1878 int r = intr(gv(RC_INT));
1879 o(0x13001c00
1880 | ((t & VT_BTYPE) == VT_SHORT) << 13
1881 | (uint32_t)!!(t & VT_UNSIGNED) << 30
1882 | r | r << 5); // [su]xt[bh] w(r),w(r)
1885 ST_FUNC void gen_cvt_itof(int t)
1887 if (t == VT_LDOUBLE) {
1888 int f = vtop->type.t;
1889 int func = (f & VT_BTYPE) == VT_LLONG ?
1890 (f & VT_UNSIGNED ? TOK___floatunditf : TOK___floatditf) :
1891 (f & VT_UNSIGNED ? TOK___floatunsitf : TOK___floatsitf);
1892 vpush_helper_func(func);
1893 vrott(2);
1894 gfunc_call(1);
1895 vpushi(0);
1896 vtop->type.t = t;
1897 vtop->r = REG_FRET;
1898 return;
1900 else {
1901 int d, n = intr(gv(RC_INT));
1902 int s = !(vtop->type.t & VT_UNSIGNED);
1903 uint32_t l = ((vtop->type.t & VT_BTYPE) == VT_LLONG);
1904 --vtop;
1905 d = get_reg(RC_FLOAT);
1906 ++vtop;
1907 vtop[0].r = d;
1908 o(0x1e220000 | (uint32_t)!s << 16 |
1909 (uint32_t)(t != VT_FLOAT) << 22 | fltr(d) |
1910 l << 31 | n << 5); // [us]cvtf [sd](d),[wx](n)
1914 ST_FUNC void gen_cvt_ftoi(int t)
1916 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1917 int func = (t & VT_BTYPE) == VT_LLONG ?
1918 (t & VT_UNSIGNED ? TOK___fixunstfdi : TOK___fixtfdi) :
1919 (t & VT_UNSIGNED ? TOK___fixunstfsi : TOK___fixtfsi);
1920 vpush_helper_func(func);
1921 vrott(2);
1922 gfunc_call(1);
1923 vpushi(0);
1924 vtop->type.t = t;
1925 vtop->r = REG_IRET;
1926 return;
1928 else {
1929 int d, n = fltr(gv(RC_FLOAT));
1930 uint32_t l = ((vtop->type.t & VT_BTYPE) != VT_FLOAT);
1931 --vtop;
1932 d = get_reg(RC_INT);
1933 ++vtop;
1934 vtop[0].r = d;
1935 o(0x1e380000 |
1936 (uint32_t)!!(t & VT_UNSIGNED) << 16 |
1937 (uint32_t)((t & VT_BTYPE) == VT_LLONG) << 31 | intr(d) |
1938 l << 22 | n << 5); // fcvtz[su] [wx](d),[sd](n)
1942 ST_FUNC void gen_cvt_ftof(int t)
1944 int f = vtop[0].type.t & VT_BTYPE;
1945 assert(t == VT_FLOAT || t == VT_DOUBLE || t == VT_LDOUBLE);
1946 assert(f == VT_FLOAT || f == VT_DOUBLE || f == VT_LDOUBLE);
1947 if (t == f)
1948 return;
1950 if (t == VT_LDOUBLE || f == VT_LDOUBLE) {
1951 int func = (t == VT_LDOUBLE) ?
1952 (f == VT_FLOAT ? TOK___extendsftf2 : TOK___extenddftf2) :
1953 (t == VT_FLOAT ? TOK___trunctfsf2 : TOK___trunctfdf2);
1954 vpush_helper_func(func);
1955 vrott(2);
1956 gfunc_call(1);
1957 vpushi(0);
1958 vtop->type.t = t;
1959 vtop->r = REG_FRET;
1961 else {
1962 int x, a;
1963 gv(RC_FLOAT);
1964 assert(vtop[0].r < VT_CONST);
1965 a = fltr(vtop[0].r);
1966 --vtop;
1967 x = get_reg(RC_FLOAT);
1968 ++vtop;
1969 vtop[0].r = x;
1970 x = fltr(x);
1972 if (f == VT_FLOAT)
1973 o(0x1e22c000 | x | a << 5); // fcvt d(x),s(a)
1974 else
1975 o(0x1e624000 | x | a << 5); // fcvt s(x),d(a)
1979 ST_FUNC void ggoto(void)
1981 arm64_gen_bl_or_b(1);
1982 --vtop;
1985 ST_FUNC void gen_clear_cache(void)
1987 uint32_t beg, end, dsz, isz, p, lab1, b1;
1988 gv2(RC_INT, RC_INT);
1989 vpushi(0);
1990 vtop->r = get_reg(RC_INT);
1991 vpushi(0);
1992 vtop->r = get_reg(RC_INT);
1993 vpushi(0);
1994 vtop->r = get_reg(RC_INT);
1995 beg = intr(vtop[-4].r); // x0
1996 end = intr(vtop[-3].r); // x1
1997 dsz = intr(vtop[-2].r); // x2
1998 isz = intr(vtop[-1].r); // x3
1999 p = intr(vtop[0].r); // x4
2000 vtop -= 5;
2002 o(0xd53b0020 | isz); // mrs x(isz),ctr_el0
2003 o(0x52800080 | p); // mov w(p),#4
2004 o(0x53104c00 | dsz | isz << 5); // ubfx w(dsz),w(isz),#16,#4
2005 o(0x1ac02000 | dsz | p << 5 | dsz << 16); // lsl w(dsz),w(p),w(dsz)
2006 o(0x12000c00 | isz | isz << 5); // and w(isz),w(isz),#15
2007 o(0x1ac02000 | isz | p << 5 | isz << 16); // lsl w(isz),w(p),w(isz)
2008 o(0x51000400 | p | dsz << 5); // sub w(p),w(dsz),#1
2009 o(0x8a240004 | p | beg << 5 | p << 16); // bic x(p),x(beg),x(p)
2010 b1 = ind; o(0x14000000); // b
2011 lab1 = ind;
2012 o(0xd50b7b20 | p); // dc cvau,x(p)
2013 o(0x8b000000 | p | p << 5 | dsz << 16); // add x(p),x(p),x(dsz)
2014 write32le(cur_text_section->data + b1, 0x14000000 | (ind - b1) >> 2);
2015 o(0xeb00001f | p << 5 | end << 16); // cmp x(p),x(end)
2016 o(0x54ffffa3 | ((lab1 - ind) << 3 & 0xffffe0)); // b.cc lab1
2017 o(0xd5033b9f); // dsb ish
2018 o(0x51000400 | p | isz << 5); // sub w(p),w(isz),#1
2019 o(0x8a240004 | p | beg << 5 | p << 16); // bic x(p),x(beg),x(p)
2020 b1 = ind; o(0x14000000); // b
2021 lab1 = ind;
2022 o(0xd50b7520 | p); // ic ivau,x(p)
2023 o(0x8b000000 | p | p << 5 | isz << 16); // add x(p),x(p),x(isz)
2024 write32le(cur_text_section->data + b1, 0x14000000 | (ind - b1) >> 2);
2025 o(0xeb00001f | p << 5 | end << 16); // cmp x(p),x(end)
2026 o(0x54ffffa3 | ((lab1 - ind) << 3 & 0xffffe0)); // b.cc lab1
2027 o(0xd5033b9f); // dsb ish
2028 o(0xd5033fdf); // isb
2031 ST_FUNC void gen_vla_sp_save(int addr) {
2032 uint32_t r = intr(get_reg(RC_INT));
2033 o(0x910003e0 | r); // mov x(r),sp
2034 arm64_strx(3, r, 29, addr);
2037 ST_FUNC void gen_vla_sp_restore(int addr) {
2038 // Use x30 because this function can be called when there
2039 // is a live return value in x0 but there is nothing on
2040 // the value stack to prevent get_reg from returning x0.
2041 uint32_t r = 30;
2042 arm64_ldrx(0, 3, r, 29, addr);
2043 o(0x9100001f | r << 5); // mov sp,x(r)
2046 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2047 uint32_t r;
2048 #if defined(CONFIG_TCC_BCHECK)
2049 if (tcc_state->do_bounds_check)
2050 vpushv(vtop);
2051 #endif
2052 r = intr(gv(RC_INT));
2053 #if defined(CONFIG_TCC_BCHECK)
2054 if (tcc_state->do_bounds_check)
2055 o(0x91004000 | r | r << 5); // add x(r),x(r),#15+1
2056 else
2057 #endif
2058 o(0x91003c00 | r | r << 5); // add x(r),x(r),#15
2059 o(0x927cec00 | r | r << 5); // bic x(r),x(r),#15
2060 o(0xcb2063ff | r << 16); // sub sp,sp,x(r)
2061 vpop();
2062 #if defined(CONFIG_TCC_BCHECK)
2063 if (tcc_state->do_bounds_check) {
2064 vpushi(0);
2065 vtop->r = TREG_R(0);
2066 o(0x910003e0 | vtop->r); // mov r0,sp
2067 vswap();
2068 vpush_helper_func(TOK___bound_new_region);
2069 vrott(3);
2070 gfunc_call(2);
2071 func_bound_add_epilog = 1;
2073 #endif
2076 /* end of A64 code generator */
2077 /*************************************************************/
2078 #endif
2079 /*************************************************************/