Add arm64 (AArch64) as a target architecture.
[tinycc.git] / arm64-gen.c
blob09e74167ee039494e5ffd0fd98584c7bf3ccb419
1 /*
2 * A64 code generator for TCC
4 * Copyright (c) 2014-2015 Edmund Grimley Evans
6 * Copying and distribution of this file, with or without modification,
7 * are permitted in any medium without royalty provided the copyright
8 * notice and this notice are preserved. This file is offered as-is,
9 * without any warranty.
12 #ifdef TARGET_DEFS_ONLY
14 // Number of registers available to allocator:
15 #define NB_REGS 28 // x0-x18, x30, v0-v7
17 #define TREG_R(x) (x) // x = 0..18
18 #define TREG_R30 19
19 #define TREG_F(x) (x + 20) // x = 0..7
21 // Register classes sorted from more general to more precise:
22 #define RC_INT (1 << 0)
23 #define RC_FLOAT (1 << 1)
24 #define RC_R(x) (1 << (2 + (x))) // x = 0..18
25 #define RC_R30 (1 << 21)
26 #define RC_F(x) (1 << (22 + (x))) // x = 0..7
28 #define RC_IRET (RC_R(0)) // int return register class
29 #define RC_FRET (RC_F(0)) // float return register class
31 #define REG_IRET (TREG_R(0)) // int return register number
32 #define REG_FRET (TREG_F(0)) // float return register number
34 #define PTR_SIZE 8
36 #define LDOUBLE_SIZE 16
37 #define LDOUBLE_ALIGN 16
39 #define MAX_ALIGN 16
41 #define CHAR_IS_UNSIGNED
43 /******************************************************/
44 /* ELF defines */
46 #define EM_TCC_TARGET EM_AARCH64
48 #define R_DATA_32 R_AARCH64_ABS32
49 #define R_DATA_PTR R_AARCH64_ABS64
50 #define R_JMP_SLOT R_AARCH64_JUMP_SLOT
51 #define R_COPY R_AARCH64_COPY
53 #define ELF_START_ADDR 0x00400000
54 #define ELF_PAGE_SIZE 0x1000
56 /******************************************************/
57 #else /* ! TARGET_DEFS_ONLY */
58 /******************************************************/
59 #include "tcc.h"
60 #include <assert.h>
62 ST_DATA const int reg_classes[NB_REGS] = {
63 RC_INT | RC_R(0),
64 RC_INT | RC_R(1),
65 RC_INT | RC_R(2),
66 RC_INT | RC_R(3),
67 RC_INT | RC_R(4),
68 RC_INT | RC_R(5),
69 RC_INT | RC_R(6),
70 RC_INT | RC_R(7),
71 RC_INT | RC_R(8),
72 RC_INT | RC_R(9),
73 RC_INT | RC_R(10),
74 RC_INT | RC_R(11),
75 RC_INT | RC_R(12),
76 RC_INT | RC_R(13),
77 RC_INT | RC_R(14),
78 RC_INT | RC_R(15),
79 RC_INT | RC_R(16),
80 RC_INT | RC_R(17),
81 RC_INT | RC_R(18),
82 RC_R30, // not in RC_INT as we make special use of x30
83 RC_FLOAT | RC_F(0),
84 RC_FLOAT | RC_F(1),
85 RC_FLOAT | RC_F(2),
86 RC_FLOAT | RC_F(3),
87 RC_FLOAT | RC_F(4),
88 RC_FLOAT | RC_F(5),
89 RC_FLOAT | RC_F(6),
90 RC_FLOAT | RC_F(7)
93 #define IS_FREG(x) ((x) >= TREG_F(0))
95 static uint32_t intr(int r)
97 assert(TREG_R(0) <= r && r <= TREG_R30);
98 return r < TREG_R30 ? r : 30;
101 static uint32_t fltr(int r)
103 assert(TREG_F(0) <= r && r <= TREG_F(7));
104 return r - TREG_F(0);
107 // Add an instruction to text section:
108 ST_FUNC void o(unsigned int c)
110 int ind1 = ind + 4;
111 if (ind1 > cur_text_section->data_allocated)
112 section_realloc(cur_text_section, ind1);
113 *(uint32_t *)(cur_text_section->data + ind) = c;
114 ind = ind1;
117 static int arm64_encode_bimm64(uint64_t x)
119 int neg = x & 1;
120 int rep, pos, len;
122 if (neg)
123 x = ~x;
124 if (!x)
125 return -1;
127 if (x >> 2 == (x & (((uint64_t)1 << (64 - 2)) - 1)))
128 rep = 2, x &= ((uint64_t)1 << 2) - 1;
129 else if (x >> 4 == (x & (((uint64_t)1 << (64 - 4)) - 1)))
130 rep = 4, x &= ((uint64_t)1 << 4) - 1;
131 else if (x >> 8 == (x & (((uint64_t)1 << (64 - 8)) - 1)))
132 rep = 8, x &= ((uint64_t)1 << 8) - 1;
133 else if (x >> 16 == (x & (((uint64_t)1 << (64 - 16)) - 1)))
134 rep = 16, x &= ((uint64_t)1 << 16) - 1;
135 else if (x >> 32 == (x & (((uint64_t)1 << (64 - 32)) - 1)))
136 rep = 32, x &= ((uint64_t)1 << 32) - 1;
137 else
138 rep = 64;
140 pos = 0;
141 if (!(x & (((uint64_t)1 << 32) - 1))) x >>= 32, pos += 32;
142 if (!(x & (((uint64_t)1 << 16) - 1))) x >>= 16, pos += 16;
143 if (!(x & (((uint64_t)1 << 8) - 1))) x >>= 8, pos += 8;
144 if (!(x & (((uint64_t)1 << 4) - 1))) x >>= 4, pos += 4;
145 if (!(x & (((uint64_t)1 << 2) - 1))) x >>= 2, pos += 2;
146 if (!(x & (((uint64_t)1 << 1) - 1))) x >>= 1, pos += 1;
148 len = 0;
149 if (!(~x & (((uint64_t)1 << 32) - 1))) x >>= 32, len += 32;
150 if (!(~x & (((uint64_t)1 << 16) - 1))) x >>= 16, len += 16;
151 if (!(~x & (((uint64_t)1 << 8) - 1))) x >>= 8, len += 8;
152 if (!(~x & (((uint64_t)1 << 4) - 1))) x >>= 4, len += 4;
153 if (!(~x & (((uint64_t)1 << 2) - 1))) x >>= 2, len += 2;
154 if (!(~x & (((uint64_t)1 << 1) - 1))) x >>= 1, len += 1;
156 if (x)
157 return -1;
158 if (neg) {
159 pos = (pos + len) & (rep - 1);
160 len = rep - len;
162 return ((0x1000 & rep << 6) | (((rep - 1) ^ 31) << 1 & 63) |
163 ((rep - pos) & (rep - 1)) << 6 | (len - 1));
166 static uint32_t arm64_movi(int r, uint64_t x)
168 uint64_t m = 0xffff;
169 int e;
170 if (!(x & ~m))
171 return 0x52800000 | r | x << 5; // movz w(r),#(x)
172 if (!(x & ~(m << 16)))
173 return 0x52a00000 | r | x >> 11; // movz w(r),#(x >> 16),lsl #16
174 if (!(x & ~(m << 32)))
175 return 0xd2c00000 | r | x >> 27; // movz x(r),#(x >> 32),lsl #32
176 if (!(x & ~(m << 48)))
177 return 0xd2e00000 | r | x >> 43; // movz x(r),#(x >> 48),lsl #48
178 if ((x & ~m) == m << 16)
179 return (0x12800000 | r |
180 (~x << 5 & 0x1fffe0)); // movn w(r),#(~x)
181 if ((x & ~(m << 16)) == m)
182 return (0x12a00000 | r |
183 (~x >> 11 & 0x1fffe0)); // movn w(r),#(~x >> 16),lsl #16
184 if (!~(x | m))
185 return (0x92800000 | r |
186 (~x << 5 & 0x1fffe0)); // movn x(r),#(~x)
187 if (!~(x | m << 16))
188 return (0x92a00000 | r |
189 (~x >> 11 & 0x1fffe0)); // movn x(r),#(~x >> 16),lsl #16
190 if (!~(x | m << 32))
191 return (0x92c00000 | r |
192 (~x >> 27 & 0x1fffe0)); // movn x(r),#(~x >> 32),lsl #32
193 if (!~(x | m << 48))
194 return (0x92e00000 | r |
195 (~x >> 43 & 0x1fffe0)); // movn x(r),#(~x >> 32),lsl #32
196 if (!(x >> 32) && (e = arm64_encode_bimm64(x | x << 32)) >= 0)
197 return 0x320003e0 | r | (uint32_t)e << 10; // movi w(r),#(x)
198 if ((e = arm64_encode_bimm64(x)) >= 0)
199 return 0xb20003e0 | r | (uint32_t)e << 10; // movi x(r),#(x)
200 return 0;
203 static void arm64_movimm(int r, uint64_t x)
205 uint32_t i;
206 if ((i = arm64_movi(r, x)))
207 o(i);
208 else {
209 // This could be improved:
210 o(0x52800000 | r | (x & 0xffff) << 5); // movz w(r),#(x & 0xffff)
211 for (i = 1; i < 4; i++)
212 if (x >> 16 * i & 0xffff) {
213 o(0xf2800000 | r | (x >> 16 * i & 0xffff) << 5 | i << 21);
214 // movk w(r),#(*),lsl #(*)
220 // Patch all branches in list pointed to by t to branch to a:
221 ST_FUNC void gsym_addr(int t_, int a_)
223 uint32_t t = t_;
224 uint32_t a = a_;
225 while (t) {
226 uint32_t *ptr = (uint32_t *)(cur_text_section->data + t);
227 uint32_t next = *ptr;
228 if (a - t + 0x8000000 >= 0x10000000)
229 tcc_error("branch out of range");
230 *ptr = (a - t == 4 ? 0xd503201f : // nop
231 0x14000000 | ((a - t) >> 2 & 0x3ffffff)); // b
232 t = next;
236 // Patch all branches in list pointed to by t to branch to current location:
237 ST_FUNC void gsym(int t)
239 gsym_addr(t, ind);
242 static int arm64_type_size(int t)
244 switch (t & VT_BTYPE) {
245 case VT_INT: return 2;
246 case VT_BYTE: return 0;
247 case VT_SHORT: return 1;
248 case VT_PTR: return 3;
249 case VT_ENUM: return 2;
250 case VT_FUNC: return 3;
251 case VT_FLOAT: return 2;
252 case VT_DOUBLE: return 3;
253 case VT_LDOUBLE: return 4;
254 case VT_BOOL: return 0;
255 case VT_LLONG: return 3;
257 assert(0);
258 return 0;
261 static void gen_stack_addr(int reg, uint64_t off)
263 arm64_movimm(30, off); // use x30 for offset
264 o(0x8b3e63e0 | reg);
267 static void gen_load(int sg, int sz, int dst, int bas, uint64_t off)
269 if (sz >= 2)
270 sg = 0;
271 if (!(off & ~(0xfff << sz)))
272 o(0x39400000 | dst | bas << 5 | off << (10 - sz) |
273 !!sg << 23 | sz << 30);
274 else if (off < 256 || -off <= 256)
275 o(0x38400000 | dst | bas << 5 | (off & 511) << 12 |
276 !!sg << 23 | sz << 30);
277 else {
278 arm64_movimm(30, off); // use x30 for offset
279 o(0x38206800 | dst | bas << 5 | 30 << 16 |
280 (!!sg + 1) << 22 | sz << 30);
284 static void gen_fload(int sz, int dst, int bas, uint64_t off)
286 if (!(off & ~(0xfff << sz)))
287 o(0x3d400000 | dst | bas << 5 | off << (10 - sz) |
288 (sz & 4) << 21 | (sz & 3) << 30);
289 else if (off < 256 || -off <= 256)
290 o(0x3c400000 | dst | bas << 5 | (off & 511) << 12 |
291 (sz & 4) << 21 | (sz & 3) << 30);
292 else {
293 arm64_movimm(30, off); // use x30 for offset
294 o(0x3c606800 | dst | bas << 5 | 30 << 16 | sz << 30 | (sz & 4) << 21);
298 static void gen_sload(int reg, int size)
300 // Use x30 for intermediate value in some cases.
301 switch (size) {
302 default: assert(0); break;
303 case 1:
304 gen_load(0, 0, reg, reg, 0);
305 break;
306 case 2:
307 gen_load(0, 1, reg, reg, 0);
308 break;
309 case 3:
310 gen_load(0, 1, 30, reg, 0);
311 gen_load(0, 0, reg, reg, 2);
312 o(0x2a0043c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #16
313 break;
314 case 4:
315 gen_load(0, 2, reg, reg, 0);
316 break;
317 case 5:
318 gen_load(0, 2, 30, reg, 0);
319 gen_load(0, 0, reg, reg, 4);
320 o(0xaa0083c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #32
321 break;
322 case 6:
323 gen_load(0, 2, 30, reg, 0);
324 gen_load(0, 1, reg, reg, 4);
325 o(0xaa0083c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #32
326 break;
327 case 7:
328 gen_load(0, 2, 30, reg, 0);
329 gen_load(0, 2, reg, reg, 3);
330 o(0x53087c00 | reg | reg << 5); // lsr w(reg), w(reg), #8
331 o(0xaa0083c0 | reg | reg << 16); // orr x(reg),x30,x(reg),lsl #32
332 break;
333 case 8:
334 gen_load(0, 3, reg, reg, 0);
335 break;
336 case 9:
337 gen_load(0, 0, reg + 1, reg, 8);
338 gen_load(0, 3, reg, reg, 0);
339 break;
340 case 10:
341 gen_load(0, 1, reg + 1, reg, 8);
342 gen_load(0, 3, reg, reg, 0);
343 break;
344 case 11:
345 gen_load(0, 2, reg + 1, reg, 7);
346 o(0x53087c00 | (reg+1) | (reg+1) << 5); // lsr w(reg+1), w(reg+1), #8
347 gen_load(0, 3, reg, reg, 0);
348 break;
349 case 12:
350 gen_load(0, 2, reg + 1, reg, 8);
351 gen_load(0, 3, reg, reg, 0);
352 break;
353 case 13:
354 gen_load(0, 3, reg + 1, reg, 5);
355 o(0xd358fc00 | (reg+1) | (reg+1) << 5); // lsr x(reg+1), x(reg+1), #24
356 gen_load(0, 3, reg, reg, 0);
357 break;
358 case 14:
359 gen_load(0, 3, reg + 1, reg, 6);
360 o(0xd350fc00 | (reg+1) | (reg+1) << 5); // lsr x(reg+1), x(reg+1), #16
361 gen_load(0, 3, reg, reg, 0);
362 break;
363 case 15:
364 gen_load(0, 3, reg + 1, reg, 7);
365 o(0xd348fc00 | (reg+1) | (reg+1) << 5); // lsr x(reg+1), x(reg+1), #8
366 gen_load(0, 3, reg, reg, 0);
367 break;
368 case 16:
369 o(0xa9400000 | reg | (reg+1) << 10 | reg << 5);
370 // ldp x(reg),x(reg+1),[x(reg)]
371 break;
375 static void gen_store(int sz, int dst, int bas, uint64_t off)
377 if (!(off & ~(0xfff << sz)))
378 o(0x39000000 | dst | bas << 5 | off << (10 - sz) | sz << 30);
379 else if (off < 256 || -off <= 256)
380 o(0x38000000 | dst | bas << 5 | (off & 511) << 12 | sz << 30);
381 else {
382 arm64_movimm(30, off); // use x30 for offset
383 o(0x38206800 | dst | bas << 5 | 30 << 16 | sz << 30);
387 static void gen_fstore(int sz, int dst, int bas, uint64_t off)
389 if (!(off & ~(0xfff << sz)))
390 o(0x3d000000 | dst | bas << 5 | off << (10 - sz) |
391 (sz & 4) << 21 | (sz & 3) << 30);
392 else if (off < 256 || -off <= 256)
393 o(0x3c000000 | dst | bas << 5 | (off & 511) << 12 |
394 (sz & 4) << 21 | (sz & 3) << 30);
395 else {
396 arm64_movimm(30, off); // use x30 for offset
397 o(0x3c206800 | dst | bas << 5 | 30 << 16 | sz << 30 | (sz & 4) << 21);
401 static void gen_addr(int r, Sym *sym, unsigned long addend)
403 #if 0
404 // This is normally the right way to do it, I think,
405 // but it does not work with "-run" when stdin or stderr is
406 // used by the program: "R_AARCH64_ADR_PREL_PG_HI21 relocation failed".
407 greloca(cur_text_section, sym, ind, R_AARCH64_ADR_PREL_PG_HI21, addend);
408 o(0x90000000 | r);
409 greloca(cur_text_section, sym, ind, R_AARCH64_ADD_ABS_LO12_NC, addend);
410 o(0x91000000 | r | r << 5);
411 #else
412 // This seems to work in all cases, unless you try to use an old buggy
413 // GCC for linking, which says: "unresolvable R_AARCH64_MOVW_UABS_G0_NC
414 // relocation against symbol `stderr@@GLIBC_2.17'".
415 greloca(cur_text_section, sym, ind, R_AARCH64_MOVW_UABS_G0_NC, addend);
416 o(0xf2800000 | r); // movk x(rt),#...,lsl #0
417 greloca(cur_text_section, sym, ind, R_AARCH64_MOVW_UABS_G1_NC, addend);
418 o(0xf2a00000 | r); // movk x(rt),#...,lsl #16
419 greloca(cur_text_section, sym, ind, R_AARCH64_MOVW_UABS_G2_NC, addend);
420 o(0xf2c00000 | r); // movk x(rt),#...,lsl #32
421 greloca(cur_text_section, sym, ind, R_AARCH64_MOVW_UABS_G3, addend);
422 o(0xf2e00000 | r); // movk x(rt),#...,lsl #48
423 #endif
426 ST_FUNC void load(int r, SValue *sv)
428 int svtt = sv->type.t;
429 int svr = sv->r & ~VT_LVAL_TYPE;
430 int svrv = svr & VT_VALMASK;
431 uint64_t svcul = (int32_t)sv->c.ul;
433 if (svr == (VT_LOCAL | VT_LVAL)) {
434 if (IS_FREG(r))
435 gen_fload(arm64_type_size(svtt), fltr(r), 29, svcul);
436 else
437 gen_load(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
438 intr(r), 29, svcul);
439 return;
442 if ((svr & ~VT_VALMASK) == VT_LVAL && svrv < VT_CONST) {
443 if (IS_FREG(r))
444 gen_fload(arm64_type_size(svtt),
445 fltr(r), intr(svrv), 0);
446 else
447 gen_load(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
448 intr(r), intr(svrv), 0);
449 return;
452 if (svr == (VT_CONST | VT_LVAL | VT_SYM)) {
453 gen_addr(30, sv->sym, svcul); // use x30 for address
454 if (IS_FREG(r))
455 gen_fload(arm64_type_size(svtt), fltr(r), 30, 0);
456 else
457 gen_load(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
458 intr(r), 30, 0);
459 return;
462 if (svr == (VT_CONST | VT_SYM)) {
463 gen_addr(intr(r), sv->sym, svcul);
464 return;
467 if (svr == VT_CONST) {
468 if ((svtt & VT_BTYPE) != VT_VOID)
469 arm64_movimm(intr(r),
470 arm64_type_size(svtt) == 3 ? sv->c.ull : svcul);
471 return;
474 if (svr < VT_CONST) {
475 if (IS_FREG(r) && IS_FREG(svr))
476 if (svtt == VT_LDOUBLE)
477 o(0x4ea01c00 | fltr(r) | fltr(svr) << 5);
478 // mov v(r).16b,v(svr).16b
479 else
480 o(0x1e604000 | fltr(r) | fltr(svr) << 5); // fmov d(r),d(svr)
481 else if (!IS_FREG(r) && !IS_FREG(svr))
482 o(0xaa0003e0 | intr(r) | intr(svr) << 16); // mov x(r),x(svr)
483 else
484 assert(0);
485 return;
488 if (svr == VT_LOCAL) {
489 if (-svcul < 0x1000)
490 o(0xd10003a0 | intr(r) | -svcul << 10); // sub x(r),x29,#...
491 else {
492 arm64_movimm(30, -svcul); // use x30 for offset
493 o(0xcb0003a0 | intr(r) | 30 << 16); // sub x(r),x29,x30
495 return;
498 if (svr == VT_JMP || svr == VT_JMPI) {
499 int t = (svr == VT_JMPI);
500 arm64_movimm(intr(r), t);
501 o(0x14000002); // b .+8
502 gsym(svcul);
503 arm64_movimm(intr(r), t ^ 1);
504 return;
507 if (svr == (VT_LLOCAL | VT_LVAL)) {
508 gen_load(0, 3, 30, 29, svcul); // use x30 for offset
509 if (IS_FREG(r))
510 gen_fload(arm64_type_size(svtt), fltr(r), 30, 0);
511 else
512 gen_load(!(svtt & VT_UNSIGNED), arm64_type_size(svtt),
513 intr(r), 30, 0);
514 return;
517 printf("load(%x, (%x, %x, %llx))\n", r, svtt, sv->r, (long long)svcul);
518 assert(0);
521 ST_FUNC void store(int r, SValue *sv)
523 int svtt = sv->type.t;
524 int svr = sv->r & ~VT_LVAL_TYPE;
525 int svrv = svr & VT_VALMASK;
526 uint64_t svcul = (int32_t)sv->c.ul;
528 if (svr == (VT_LOCAL | VT_LVAL)) {
529 if (IS_FREG(r))
530 gen_fstore(arm64_type_size(svtt), fltr(r), 29, svcul);
531 else
532 gen_store(arm64_type_size(svtt), intr(r), 29, svcul);
533 return;
536 if ((svr & ~VT_VALMASK) == VT_LVAL && svrv < VT_CONST) {
537 if (IS_FREG(r))
538 gen_fstore(arm64_type_size(svtt), fltr(r), intr(svrv), 0);
539 else
540 gen_store(arm64_type_size(svtt), intr(r), intr(svrv), 0);
541 return;
544 if (svr == (VT_CONST | VT_LVAL | VT_SYM)) {
545 gen_addr(30, sv->sym, svcul); // use x30 for address
546 if (IS_FREG(r))
547 gen_fstore(arm64_type_size(svtt), fltr(r), 30, 0);
548 else
549 gen_store(arm64_type_size(svtt), intr(r), 30, 0);
550 return;
553 printf("store(%x, (%x, %x, %llx))\n", r, svtt, sv->r, (long long)svcul);
554 assert(0);
557 static void arm64_gen_bl_or_b(int b)
559 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
560 assert(!b);
561 if (vtop->r & VT_SYM)
562 greloc(cur_text_section, vtop->sym, ind, R_AARCH64_CALL26);
563 else
564 assert(0);
565 o(0x94000000); // bl .
567 else
568 o(0xd61f0000 | !b << 21 | intr(gv(RC_R30)) << 5); // br/blr
571 static int arm64_hfa_aux(CType *type, int *fsize, int num)
573 if (is_float(type->t)) {
574 int a, n = type_size(type, &a);
575 if (num >= 4 || (*fsize && *fsize != n))
576 return -1;
577 *fsize = n;
578 return num + 1;
580 else if ((type->t & VT_BTYPE) == VT_STRUCT) {
581 int is_struct = 0; // rather than union
582 Sym *field;
583 for (field = type->ref->next; field; field = field->next)
584 if (field->c) {
585 is_struct = 1;
586 break;
588 if (is_struct) {
589 int num0 = num;
590 for (field = type->ref->next; field; field = field->next) {
591 if (field->c != (num - num0) * *fsize)
592 return -1;
593 num = arm64_hfa_aux(&field->type, fsize, num);
594 if (num == -1)
595 return -1;
597 if (type->ref->c != (num - num0) * *fsize)
598 return -1;
599 return num;
601 else { // union
602 int num0 = num;
603 for (field = type->ref->next; field; field = field->next) {
604 int num1 = arm64_hfa_aux(&field->type, fsize, num0);
605 if (num1 == -1)
606 return -1;
607 num = num1 < num ? num : num1;
609 if (type->ref->c != (num - num0) * *fsize)
610 return -1;
611 return num;
614 else if (type->t & VT_ARRAY) {
615 int num1;
616 if (!type->ref->c)
617 return num;
618 num1 = arm64_hfa_aux(&type->ref->type, fsize, num);
619 if (num1 == -1 || (num1 != num && type->ref->c > 4))
620 return -1;
621 num1 = num + type->ref->c * (num1 - num);
622 if (num1 > 4)
623 return -1;
624 return num1;
626 return -1;
629 static int arm64_hfa(CType *type, int *fsize)
631 if ((type->t & VT_BTYPE) == VT_STRUCT || (type->t & VT_ARRAY)) {
632 int sz = 0;
633 int n = arm64_hfa_aux(type, &sz, 0);
634 if (0 < n && n <= 4) {
635 if (fsize)
636 *fsize = sz;
637 return n;
640 return 0;
643 static unsigned long arm64_pcs_aux(int n, CType **type, unsigned long *a)
645 int nx = 0; // next integer register
646 int nv = 0; // next vector register
647 unsigned long ns = 32; // next stack offset
648 int i;
650 for (i = 0; i < n; i++) {
651 int hfa = arm64_hfa(type[i], 0);
652 int size, align;
654 if ((type[i]->t & VT_ARRAY) ||
655 (type[i]->t & VT_BTYPE) == VT_FUNC)
656 size = align = 8;
657 else
658 size = type_size(type[i], &align);
660 if (hfa)
661 // B.2
663 else if (size > 16) {
664 // B.3: replace with pointer
665 if (nx < 8)
666 a[i] = nx++ << 1 | 1;
667 else {
668 ns = (ns + 7) & ~7;
669 a[i] = ns | 1;
670 ns += 8;
672 continue;
674 else if ((type[i]->t & VT_BTYPE) == VT_STRUCT)
675 // B.4
676 size = (size + 7) & ~7;
678 // C.1
679 if (is_float(type[i]->t) && nv < 8) {
680 a[i] = 16 + (nv++ << 1);
681 continue;
684 // C.2
685 if (hfa && nv + hfa <= 8) {
686 a[i] = 16 + (nv << 1);
687 nv += hfa;
688 continue;
691 // C.3
692 if (hfa) {
693 nv = 8;
694 size = (size + 7) & ~7;
697 // C.4
698 if (hfa || (type[i]->t & VT_BTYPE) == VT_LDOUBLE) {
699 ns = (ns + 7) & ~7;
700 ns = (ns + align - 1) & -align;
703 // C.5
704 if ((type[i]->t & VT_BTYPE) == VT_FLOAT)
705 size = 8;
707 // C.6
708 if (hfa || is_float(type[i]->t)) {
709 a[i] = ns;
710 ns += size;
711 continue;
714 // C.7
715 if ((type[i]->t & VT_BTYPE) != VT_STRUCT && size <= 8 && nx < 8) {
716 a[i] = nx++ << 1;
717 continue;
720 // C.8
721 if (align == 16)
722 nx = (nx + 1) & ~1;
724 // C.9
725 if ((type[i]->t & VT_BTYPE) != VT_STRUCT && size == 16 && nx < 7) {
726 a[i] = nx << 1;
727 nx += 2;
728 continue;
731 // C.10
732 if ((type[i]->t & VT_BTYPE) == VT_STRUCT && size <= (8 - nx) * 8) {
733 a[i] = nx << 1;
734 nx += (size + 7) >> 3;
735 continue;
738 // C.11
739 nx = 8;
741 // C.12
742 ns = (ns + 7) & ~7;
743 ns = (ns + align - 1) & -align;
745 // C.13
746 if ((type[i]->t & VT_BTYPE) == VT_STRUCT) {
747 a[i] = ns;
748 ns += size;
749 continue;
752 // C.14
753 if (size < 8)
754 size = 8;
756 // C.15
757 a[i] = ns;
758 ns += size;
761 return ns - 32;
764 static unsigned long arm64_pcs(int n, CType **type, unsigned long *a)
766 unsigned long stack;
768 // Return type:
769 if ((type[0]->t & VT_BTYPE) == VT_VOID)
770 a[0] = -1;
771 else {
772 arm64_pcs_aux(1, type, a);
773 assert(a[0] == 0 || a[0] == 1 || a[0] == 16);
776 // Argument types:
777 stack = arm64_pcs_aux(n, type + 1, a + 1);
779 if (0) {
780 int i;
781 for (i = 0; i <= n; i++) {
782 if (!i)
783 printf("arm64_pcs return: ");
784 else
785 printf("arm64_pcs arg %d: ", i);
786 if (a[i] == (unsigned long)-1)
787 printf("void\n");
788 else if (a[i] == 1 && !i)
789 printf("X8 pointer\n");
790 else if (a[i] < 16)
791 printf("X%lu%s\n", a[i] / 2, a[i] & 1 ? " pointer" : "");
792 else if (a[i] < 32)
793 printf("V%lu\n", a[i] / 2 - 8);
794 else
795 printf("stack %lu%s\n",
796 (a[i] - 32) & ~1, a[i] & 1 ? " pointer" : "");
800 return stack;
803 ST_FUNC void gfunc_call(int nb_args)
805 CType *return_type;
806 CType **t;
807 unsigned long *a, *a1;
808 unsigned long stack;
809 int i;
811 return_type = &vtop[-nb_args].type.ref->type;
812 if ((return_type->t & VT_BTYPE) == VT_STRUCT)
813 --nb_args;
815 t = tcc_malloc((nb_args + 1) * sizeof(*t));
816 a = tcc_malloc((nb_args + 1) * sizeof(*a));
817 a1 = tcc_malloc((nb_args + 1) * sizeof(*a1));
819 t[0] = return_type;
820 for (i = 0; i < nb_args; i++)
821 t[nb_args - i] = &vtop[-i].type;
823 stack = arm64_pcs(nb_args, t, a);
825 // Allocate space for structs replaced by pointer:
826 for (i = nb_args; i; i--)
827 if (a[i] & 1) {
828 SValue *arg = &vtop[i - nb_args];
829 int align, size = type_size(&arg->type, &align);
830 assert((arg->type.t & VT_BTYPE) == VT_STRUCT);
831 stack = (stack + align - 1) & -align;
832 a1[i] = stack;
833 stack += size;
836 stack = (stack + 15) >> 4 << 4;
838 assert(stack < 0x1000);
839 if (stack)
840 o(0xd10003ff | stack << 10); // sub sp,sp,#(n)
842 // First pass: set all values on stack
843 for (i = nb_args; i; i--) {
844 vpushv(vtop - nb_args + i);
846 if (a[i] & 1) {
847 // struct replaced by pointer
848 int r = get_reg(RC_INT);
849 gen_stack_addr(intr(r), a1[i]);
850 vset(&vtop->type, r | VT_LVAL, 0);
851 vswap();
852 vstore();
853 if (a[i] >= 32) {
854 // pointer on stack
855 r = get_reg(RC_INT);
856 gen_stack_addr(intr(r), a1[i]);
857 gen_store(3, intr(r), 31, (a[i] - 32) >> 1 << 1);
860 else if (a[i] >= 32) {
861 // value on stack
862 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
863 int r = get_reg(RC_INT);
864 gen_stack_addr(intr(r), a[i] - 32);
865 vset(&vtop->type, r | VT_LVAL, 0);
866 vswap();
867 vstore();
869 else if (is_float(vtop->type.t)) {
870 gv(RC_FLOAT);
871 gen_fstore(arm64_type_size(vtop[0].type.t),
872 fltr(vtop[0].r), 31, a[i] - 32);
874 else {
875 gv(RC_INT);
876 gen_store(arm64_type_size(vtop[0].type.t),
877 intr(vtop[0].r), 31, a[i] - 32);
881 --vtop;
884 // Second pass: assign values to registers
885 for (i = nb_args; i; i--, vtop--) {
886 if (a[i] < 16 && !(a[i] & 1)) {
887 // value in general-purpose registers
888 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
889 int align, size = type_size(&vtop->type, &align);
890 vtop->type.t = VT_PTR;
891 gaddrof();
892 gv(RC_R(a[i] / 2));
893 gen_sload(a[i] / 2, size);
895 else
896 gv(RC_R(a[i] / 2));
898 else if (a[i] < 16)
899 // struct replaced by pointer in register
900 gen_stack_addr(a[i] / 2, a1[i]);
901 else if (a[i] < 32) {
902 // value in floating-point registers
903 if ((vtop->type.t & VT_BTYPE) == VT_STRUCT) {
904 int j, sz, n = arm64_hfa(&vtop->type, &sz);
905 vtop->type.t = VT_PTR;
906 gaddrof();
907 gv(RC_R30);
908 for (j = 0; j < n; j++)
909 o(0x3d4003c0 |
910 (sz & 16) << 19 | -(sz & 8) << 27 | (sz & 4) << 29 |
911 (a[i] / 2 - 8 + j) |
912 j << 10); // ldr ([sdq])(*),[x30,#(j * sz)]
914 else
915 gv(RC_F(a[i] / 2 - 8));
919 if ((return_type->t & VT_BTYPE) == VT_STRUCT) {
920 if (a[0] == 1) {
921 // indirect return: set x8 and discard the stack value
922 gv(RC_R(8));
923 --vtop;
925 else
926 // return in registers: keep the address for after the call
927 vswap();
930 save_regs(0);
931 arm64_gen_bl_or_b(0);
932 --vtop;
933 if (stack)
934 o(0x910003ff | stack << 10); // add sp,sp,#(n)
937 int rt = return_type->t;
938 int bt = rt & VT_BTYPE;
939 if (bt == VT_BYTE || bt == VT_SHORT)
940 // Promote small integers:
941 o(0x13001c00 | (bt == VT_SHORT) << 13 |
942 !!(rt & VT_UNSIGNED) << 30); // [su]xt[bh] w0,w0
943 else if (bt == VT_STRUCT && !(a[0] & 1)) {
944 // A struct was returned in registers, so write it out:
945 gv(RC_R(8));
946 --vtop;
947 if (a[0] == 0) {
948 int align, size = type_size(return_type, &align);
949 assert(size <= 16);
950 if (size > 8)
951 o(0xa9000500); // stp x0,x1,[x8]
952 else if (size)
953 gen_store(size > 4 ? 3 : size > 2 ? 2 : size > 1,
954 0, 8, 0);
957 else if (a[0] == 16) {
958 int j, sz, n = arm64_hfa(return_type, &sz);
959 for (j = 0; j < n; j++)
960 o(0x3d000100 |
961 (sz & 16) << 19 | -(sz & 8) << 27 | (sz & 4) << 29 |
962 (a[i] / 2 - 8 + j) |
963 j << 10); // str ([sdq])(*),[x8,#(j * sz)]
968 tcc_free(a1);
969 tcc_free(a);
970 tcc_free(t);
973 static unsigned long arm64_func_va_list_stack;
974 static int arm64_func_va_list_gr_offs;
975 static int arm64_func_va_list_vr_offs;
976 static int arm64_func_sub_sp_offset;
978 ST_FUNC void gfunc_prolog(CType *func_type)
980 int n = 0;
981 int i = 0;
982 Sym *sym;
983 CType **t;
984 unsigned long *a;
986 // Why doesn't the caller (gen_function) set func_vt?
987 func_vt = func_type->ref->type;
988 func_vc = 144; // offset of where x8 is stored
990 for (sym = func_type->ref; sym; sym = sym->next)
991 ++n;
992 t = tcc_malloc(n * sizeof(*t));
993 a = tcc_malloc(n * sizeof(*a));
995 for (sym = func_type->ref; sym; sym = sym->next)
996 t[i++] = &sym->type;
998 arm64_func_va_list_stack = arm64_pcs(n - 1, t, a);
1000 o(0xa9b27bfd); // stp x29,x30,[sp,#-224]!
1001 o(0xad0087e0); // stp q0,q1,[sp,#16]
1002 o(0xad018fe2); // stp q2,q3,[sp,#48]
1003 o(0xad0297e4); // stp q4,q5,[sp,#80]
1004 o(0xad039fe6); // stp q6,q7,[sp,#112]
1005 o(0xa90923e8); // stp x8,x8,[sp,#144]
1006 o(0xa90a07e0); // stp x0,x1,[sp,#160]
1007 o(0xa90b0fe2); // stp x2,x3,[sp,#176]
1008 o(0xa90c17e4); // stp x4,x5,[sp,#192]
1009 o(0xa90d1fe6); // stp x6,x7,[sp,#208]
1011 arm64_func_va_list_gr_offs = -64;
1012 arm64_func_va_list_vr_offs = -128;
1014 for (i = 1, sym = func_type->ref->next; sym; i++, sym = sym->next) {
1015 int off = (a[i] < 16 ? 160 + a[i] / 2 * 8 :
1016 a[i] < 32 ? 16 + (a[i] - 16) / 2 * 16 :
1017 224 + ((a[i] - 32) >> 1 << 1));
1018 sym_push(sym->v & ~SYM_FIELD, &sym->type,
1019 (a[i] & 1 ? VT_LLOCAL : VT_LOCAL) | lvalue_type(sym->type.t),
1020 off);
1022 if (a[i] < 16) {
1023 int align, size = type_size(&sym->type, &align);
1024 arm64_func_va_list_gr_offs = (a[i] / 2 - 7 +
1025 (!(a[i] & 1) && size > 8)) * 8;
1027 else if (a[i] < 32) {
1028 int hfa = arm64_hfa(&sym->type, 0);
1029 arm64_func_va_list_vr_offs = (a[i] / 2 - 16 +
1030 (hfa ? hfa : 1)) * 16;
1033 // HFAs of float and double need to be written differently:
1034 if (16 <= a[i] && a[i] < 32 && (sym->type.t & VT_BTYPE) == VT_STRUCT) {
1035 int j, sz, k = arm64_hfa(&sym->type, &sz);
1036 if (sz < 16)
1037 for (j = 0; j < k; j++) {
1038 o(0x3d0003e0 | -(sz & 8) << 27 | (sz & 4) << 29 |
1039 ((a[i] - 16) / 2 + j) | (off / sz + j) << 10);
1040 // str ([sdq])(*),[sp,#(j * sz)]
1045 tcc_free(a);
1046 tcc_free(t);
1048 o(0x910003fd); // mov x29,sp
1049 arm64_func_sub_sp_offset = ind;
1050 // In gfunc_epilog these will be replaced with code to decrement SP:
1051 o(0xd503201f); // nop
1052 o(0xd503201f); // nop
1053 loc = 0;
1056 ST_FUNC void gen_va_start(void)
1058 int r;
1059 --vtop; // we don't need the "arg"
1060 gaddrof();
1061 r = intr(gv(RC_INT));
1063 if (arm64_func_va_list_stack) {
1064 //xx could use add (immediate) here
1065 arm64_movimm(30, arm64_func_va_list_stack + 224);
1066 o(0x8b1e03be); // add x30,x29,x30
1068 else
1069 o(0x910383be); // add x30,x29,#224
1070 o(0xf900001e | r << 5); // str x30,[x(r)]
1072 if (arm64_func_va_list_gr_offs) {
1073 if (arm64_func_va_list_stack)
1074 o(0x910383be); // add x30,x29,#224
1075 o(0xf900041e | r << 5); // str x30,[x(r),#8]
1078 if (arm64_func_va_list_vr_offs) {
1079 o(0x910243be); // add x30,x29,#144
1080 o(0xf900081e | r << 5); // str x30,[x(r),#16]
1083 arm64_movimm(30, arm64_func_va_list_gr_offs);
1084 o(0xb900181e | r << 5); // str w30,[x(r),#24]
1086 arm64_movimm(30, arm64_func_va_list_vr_offs);
1087 o(0xb9001c1e | r << 5); // str w30,[x(r),#28]
1089 --vtop;
1092 ST_FUNC void gen_va_arg(CType *t)
1094 int align, size = type_size(t, &align);
1095 int fsize, hfa = arm64_hfa(t, &fsize);
1096 int r0, r1;
1098 if (is_float(t->t)) {
1099 hfa = 1;
1100 fsize = size;
1103 gaddrof();
1104 r0 = intr(gv(RC_INT));
1105 r1 = get_reg(RC_INT);
1106 vtop[0].r = r1 | lvalue_type(t->t);
1107 r1 = intr(r1);
1109 if (!hfa) {
1110 uint32_t n = size > 16 ? 8 : (size + 7) & -8;
1111 if (size == 16 && align == 16)
1112 tcc_error("va_arg(ap, __uint128_t) unimplemented");
1113 o(0xb940181e | r0 << 5); // ldr w30,[x(r0),#24] // __gr_offs
1114 o(0x310003c0 | r1 | n << 10); // adds w(r1),w30,#(n)
1115 o(0x540000ad); // b.le .+20
1116 o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack
1117 o(0x9100001e | r1 << 5 | n << 10); // add x30,x(r1),#(n)
1118 o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack
1119 o(0x14000004); // b .+16
1120 o(0xb9001800 | r1 | r0 << 5); // str w(r1),[x(r0),#24] // __gr_offs
1121 o(0xf9400400 | r1 | r0 << 5); // ldr x(r1),[x(r0),#8] // __gr_top
1122 o(0x8b3ec000 | r1 | r1 << 5); // add x(r1),x(r1),w30,sxtw
1123 if (size > 16)
1124 o(0xf9400000 | r1 | r1 << 5); // ldr x(r1),[x(r1)]
1126 else {
1127 uint32_t rsz = hfa << 4;
1128 uint32_t ssz = (size + 7) & -(uint32_t)8;
1129 uint32_t b1;
1130 if (hfa > 1 && fsize < 16)
1131 // We may need to change the layout of this HFA
1132 tcc_error("va_arg(ap, HFA) unimplemented");
1133 o(0xb9401c1e | r0 << 5); // ldr w30,[x(r0),#28] // __vr_offs
1134 o(0x310003c0 | r1 | rsz << 10); // adds w(r1),w30,#(rsz)
1135 b1 = ind; o(0x5400000d); // b.le lab1
1136 o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack
1137 if (fsize == 16) {
1138 o(0x91003c00 | r1 | r1 << 5); // add x(r1),x(r1),#15
1139 o(0x927cec00 | r1 | r1 << 5); // and x(r1),x(r1),#-16
1141 o(0x9100001e | r1 << 5 | ssz << 10); // add x30,x(r1),#(ssz)
1142 o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack
1143 o(0x14000004); // b .+16
1144 // lab1:
1145 *(uint32_t *)(cur_text_section->data + b1) =
1146 (0x5400000d | (ind - b1) << 3);
1147 o(0xb9001c00 | r1 | r0 << 5); // str w(r1),[x(r0),#28] // __vr_offs
1148 o(0xf9400800 | r1 | r0 << 5); // ldr x(r1),[x(r0),#16] // __vr_top
1149 o(0x8b3ec000 | r1 | r1 << 5); // add x(r1),x(r1),w30,sxtw
1153 ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret, int *align)
1155 return 0;
1158 ST_FUNC void greturn(void)
1160 CType *t = &func_vt;
1161 unsigned long a;
1163 arm64_pcs(0, &t, &a);
1164 switch (a) {
1165 case -1:
1166 break;
1167 case 0:
1168 if ((func_vt.t & VT_BTYPE) == VT_STRUCT) {
1169 int align, size = type_size(&func_vt, &align);
1170 gaddrof();
1171 gv(RC_R(0));
1172 gen_sload(0, size);
1174 else
1175 gv(RC_IRET);
1176 break;
1177 case 1: {
1178 CType type = func_vt;
1179 mk_pointer(&type);
1180 vset(&type, VT_LOCAL | VT_LVAL, func_vc);
1181 indir();
1182 vswap();
1183 vstore();
1184 break;
1186 case 16:
1187 if ((func_vt.t & VT_BTYPE) == VT_STRUCT) {
1188 int j, sz, n = arm64_hfa(&vtop->type, &sz);
1189 gaddrof();
1190 gv(RC_R(0));
1191 for (j = 0; j < n; j++)
1192 o(0x3d400000 |
1193 (sz & 16) << 19 | -(sz & 8) << 27 | (sz & 4) << 29 |
1194 j | j << 10); // ldr ([sdq])(*),[x0,#(j * sz)]
1196 else
1197 gv(RC_FRET);
1198 break;
1199 default:
1200 assert(0);
1204 ST_FUNC void gfunc_epilog(void)
1206 if (loc) {
1207 // Insert instructions to subtract size of stack frame from SP.
1208 uint32_t *ptr =
1209 (uint32_t *)(cur_text_section->data + arm64_func_sub_sp_offset);
1210 uint64_t diff = (-loc + 15) & ~15;
1211 if (!(diff >> 24)) {
1212 if (diff & 0xfff) // sub sp,sp,#(diff & 0xfff)
1213 ptr[0] = 0xd10003ff | (diff & 0xfff) << 10;
1214 if (diff >> 12) // sub sp,sp,#(diff >> 12),lsl #12
1215 ptr[1] = 0xd14003ff | (diff >> 12) << 10;
1217 else {
1218 // In this case we may subtract more than necessary,
1219 // but always less than 17/16 of what we were aiming for.
1220 int i = 0;
1221 int j = 0;
1222 while (diff >> 20) {
1223 diff = (diff + 0xffff) >> 16;
1224 ++i;
1226 while (diff >> 16) {
1227 diff = (diff + 1) >> 1;
1228 ++j;
1230 ptr[0] = 0xd2800010 | diff << 5 | i << 21;
1231 // mov x16,#(diff),lsl #(16 * i)
1232 ptr[1] = 0xcb3063ff | j << 10;
1233 // sub sp,sp,x16,lsl #(j)
1236 o(0x910003bf); // mov sp,x29
1237 o(0xa8ce7bfd); // ldp x29,x30,[sp],#224
1239 o(0xd65f03c0); // ret
1242 // Generate forward branch to label:
1243 ST_FUNC int gjmp(int t)
1245 int r = ind;
1246 o(t);
1247 return r;
1250 // Generate branch to known address:
1251 ST_FUNC void gjmp_addr(int a)
1253 assert(a - ind + 0x8000000 < 0x10000000);
1254 o(0x14000000 | ((a - ind) >> 2 & 0x3ffffff));
1257 ST_FUNC int gtst(int inv, int t)
1259 int bt = vtop->type.t & VT_BTYPE;
1260 if (bt == VT_LDOUBLE) {
1261 int a, b, f = fltr(gv(RC_FLOAT));
1262 a = get_reg(RC_INT);
1263 vpushi(0);
1264 vtop[0].r = a;
1265 b = get_reg(RC_INT);
1266 a = intr(a);
1267 b = intr(b);
1268 o(0x4e083c00 | a | f << 5); // mov x(a),v(f).d[0]
1269 o(0x4e183c00 | b | f << 5); // mov x(b),v(f).d[1]
1270 o(0xaa000400 | a | a << 5 | b << 16); // orr x(a),x(a),x(b),lsl #1
1271 o(0xb4000040 | a | !!inv << 24); // cbz/cbnz x(a),.+8
1272 --vtop;
1274 else if (bt == VT_FLOAT || bt == VT_DOUBLE) {
1275 int a = fltr(gv(RC_FLOAT));
1276 o(0x1e202008 | a << 5 | (bt != VT_FLOAT) << 22); // fcmp
1277 o(0x54000040 | !!inv); // b.eq/b.ne .+8
1279 else {
1280 int ll = (bt == VT_PTR || bt == VT_LLONG);
1281 int a = intr(gv(RC_INT));
1282 o(0x34000040 | a | !!inv << 24 | ll << 31); // cbz/cbnz wA,.+8
1284 --vtop;
1285 return gjmp(t);
1288 static void arm64_gen_opil(int op, int l)
1290 int x, a, b;
1291 gv2(RC_INT, RC_INT);
1292 assert(vtop[-1].r < VT_CONST && vtop[0].r < VT_CONST);
1293 a = intr(vtop[-1].r);
1294 b = intr(vtop[0].r);
1295 vtop -= 2;
1296 x = get_reg(RC_INT);
1297 ++vtop;
1298 vtop[0].r = x;
1299 x = intr(x);
1301 switch (op) {
1302 case '%':
1303 // Use x30 for quotient:
1304 o(0x1ac00c00 | l << 31 | 30 | a << 5 | b << 16); // sdiv
1305 o(0x1b008000 | l << 31 | x | 30 << 5 | b << 16 | a << 10); // msub
1306 break;
1307 case '&':
1308 o(0x0a000000 | l << 31 | x | a << 5 | b << 16); // and
1309 break;
1310 case '*':
1311 o(0x1b007c00 | l << 31 | x | a << 5 | b << 16); // mul
1312 break;
1313 case '+':
1314 o(0x0b000000 | l << 31 | x | a << 5 | b << 16); // add
1315 break;
1316 case '-':
1317 o(0x4b000000 | l << 31 | x | a << 5 | b << 16); // sub
1318 break;
1319 case '/':
1320 o(0x1ac00c00 | l << 31 | x | a << 5 | b << 16); // sdiv
1321 break;
1322 case '^':
1323 o(0x4a000000 | l << 31 | x | a << 5 | b << 16); // eor
1324 break;
1325 case '|':
1326 o(0x2a000000 | l << 31 | x | a << 5 | b << 16); // orr
1327 break;
1328 case TOK_EQ:
1329 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1330 o(0x1a9f17e0 | x); // cset wA,eq
1331 break;
1332 case TOK_GE:
1333 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1334 o(0x1a9fb7e0 | x); // cset wA,ge
1335 break;
1336 case TOK_GT:
1337 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1338 o(0x1a9fd7e0 | x); // cset wA,gt
1339 break;
1340 case TOK_LE:
1341 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1342 o(0x1a9fc7e0 | x); // cset wA,le
1343 break;
1344 case TOK_LT:
1345 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1346 o(0x1a9fa7e0 | x); // cset wA,lt
1347 break;
1348 case TOK_NE:
1349 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1350 o(0x1a9f07e0 | x); // cset wA,ne
1351 break;
1352 case TOK_SAR:
1353 o(0x1ac02800 | l << 31 | x | a << 5 | b << 16); // asr
1354 break;
1355 case TOK_SHL:
1356 o(0x1ac02000 | l << 31 | x | a << 5 | b << 16); // lsl
1357 break;
1358 case TOK_SHR:
1359 o(0x1ac02400 | l << 31 | x | a << 5 | b << 16); // lsr
1360 break;
1361 case TOK_UDIV:
1362 case TOK_PDIV:
1363 o(0x1ac00800 | l << 31 | x | a << 5 | b << 16); // udiv
1364 break;
1365 case TOK_UGE:
1366 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1367 o(0x1a9f37e0 | x); // cset wA,cs
1368 break;
1369 case TOK_UGT:
1370 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1371 o(0x1a9f97e0 | x); // cset wA,hi
1372 break;
1373 case TOK_ULT:
1374 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1375 o(0x1a9f27e0 | x); // cset wA,cc
1376 break;
1377 case TOK_ULE:
1378 o(0x6b00001f | l << 31 | a << 5 | b << 16); // cmp
1379 o(0x1a9f87e0 | x); // cset wA,ls
1380 break;
1381 case TOK_UMOD:
1382 // Use x30 for quotient:
1383 o(0x1ac00800 | l << 31 | 30 | a << 5 | b << 16); // udiv
1384 o(0x1b008000 | l << 31 | x | 30 << 5 | b << 16 | a << 10); // msub
1385 break;
1386 default:
1387 assert(0);
1391 ST_FUNC void gen_opi(int op)
1393 arm64_gen_opil(op, 0);
1396 ST_FUNC void gen_opl(int op)
1398 arm64_gen_opil(op, 1);
1401 ST_FUNC void gen_opf(int op)
1403 int x, a, b, dbl;
1405 if (vtop[0].type.t == VT_LDOUBLE) {
1406 CType type = vtop[0].type;
1407 int func = 0;
1408 int cond = -1;
1409 switch (op) {
1410 case '*': func = TOK___multf3; break;
1411 case '+': func = TOK___addtf3; break;
1412 case '-': func = TOK___subtf3; break;
1413 case '/': func = TOK___divtf3; break;
1414 case TOK_EQ: func = TOK___eqtf2; cond = 1; break;
1415 case TOK_NE: func = TOK___netf2; cond = 0; break;
1416 case TOK_LT: func = TOK___lttf2; cond = 10; break;
1417 case TOK_GE: func = TOK___getf2; cond = 11; break;
1418 case TOK_LE: func = TOK___letf2; cond = 12; break;
1419 case TOK_GT: func = TOK___gttf2; cond = 13; break;
1420 default: assert(0); break;
1422 vpush_global_sym(&func_old_type, func);
1423 vrott(3);
1424 gfunc_call(2);
1425 vpushi(0);
1426 vtop->r = cond < 0 ? REG_FRET : REG_IRET;
1427 if (cond < 0)
1428 vtop->type = type;
1429 else {
1430 o(0x7100001f); // cmp w0,#0
1431 o(0x1a9f07e0 | cond << 12); // cset w0,(cond)
1433 return;
1436 dbl = vtop[0].type.t != VT_FLOAT;
1437 gv2(RC_FLOAT, RC_FLOAT);
1438 assert(vtop[-1].r < VT_CONST && vtop[0].r < VT_CONST);
1439 a = fltr(vtop[-1].r);
1440 b = fltr(vtop[0].r);
1441 vtop -= 2;
1442 switch (op) {
1443 case TOK_EQ: case TOK_NE:
1444 case TOK_LT: case TOK_GE: case TOK_LE: case TOK_GT:
1445 x = get_reg(RC_INT);
1446 ++vtop;
1447 vtop[0].r = x;
1448 x = intr(x);
1449 break;
1450 default:
1451 x = get_reg(RC_FLOAT);
1452 ++vtop;
1453 vtop[0].r = x;
1454 x = fltr(x);
1455 break;
1458 switch (op) {
1459 case '*':
1460 o(0x1e200800 | dbl << 22 | x | a << 5 | b << 16); // fmul
1461 break;
1462 case '+':
1463 o(0x1e202800 | dbl << 22 | x | a << 5 | b << 16); // fadd
1464 break;
1465 case '-':
1466 o(0x1e203800 | dbl << 22 | x | a << 5 | b << 16); // fsub
1467 break;
1468 case '/':
1469 o(0x1e201800 | dbl << 22 | x | a << 5 | b << 16); // fdiv
1470 break;
1471 case TOK_EQ:
1472 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1473 o(0x1a9f17e0 | x); // cset w(x),eq
1474 break;
1475 case TOK_GE:
1476 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1477 o(0x1a9fb7e0 | x); // cset w(x),ge
1478 break;
1479 case TOK_GT:
1480 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1481 o(0x1a9fd7e0 | x); // cset w(x),gt
1482 break;
1483 case TOK_LE:
1484 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1485 o(0x1a9f87e0 | x); // cset w(x),ls
1486 break;
1487 case TOK_LT:
1488 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1489 o(0x1a9f57e0 | x); // cset w(x),mi
1490 break;
1491 case TOK_NE:
1492 o(0x1e202000 | dbl << 22 | a << 5 | b << 16); // fcmp
1493 o(0x1a9f07e0 | x); // cset w(x),ne
1494 break;
1495 default:
1496 assert(0);
1500 // Generate sign extension from 32 to 64 bits:
1501 ST_FUNC void gen_cvt_sxtw(void)
1503 int r = intr(gv(RC_INT));
1504 o(0x93407c00 | r | r << 5); // sxtw x(r),w(r)
1507 ST_FUNC void gen_cvt_itof(int t)
1509 if (t == VT_LDOUBLE) {
1510 int f = vtop->type.t;
1511 int func = (f & VT_BTYPE) == VT_LLONG ?
1512 (f & VT_UNSIGNED ? TOK___floatunditf : TOK___floatditf) :
1513 (f & VT_UNSIGNED ? TOK___floatunsitf : TOK___floatsitf);
1514 vpush_global_sym(&func_old_type, func);
1515 vrott(2);
1516 gfunc_call(1);
1517 vpushi(0);
1518 vtop->type.t = t;
1519 vtop->r = REG_FRET;
1520 return;
1522 else {
1523 int d, n = intr(gv(RC_INT));
1524 int s = !(vtop->type.t & VT_UNSIGNED);
1525 int l = ((vtop->type.t & VT_BTYPE) == VT_LLONG);
1526 --vtop;
1527 d = get_reg(RC_FLOAT);
1528 ++vtop;
1529 vtop[0].r = d;
1530 o(0x1e220000 | !s << 16 | (t != VT_FLOAT) << 22 | fltr(d) |
1531 l << 31 | n << 5); // [us]cvtf [sd](d),[wx](n)
1535 ST_FUNC void gen_cvt_ftoi(int t)
1537 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1538 int func = (t & VT_BTYPE) == VT_LLONG ?
1539 (t & VT_UNSIGNED ? TOK___fixunstfdi : TOK___fixtfdi) :
1540 (t & VT_UNSIGNED ? TOK___fixunstfsi : TOK___fixtfsi);
1541 vpush_global_sym(&func_old_type, func);
1542 vrott(2);
1543 gfunc_call(1);
1544 vpushi(0);
1545 vtop->type.t = t;
1546 vtop->r = REG_IRET;
1547 return;
1549 else {
1550 int d, n = fltr(gv(RC_FLOAT));
1551 int l = ((vtop->type.t & VT_BTYPE) != VT_FLOAT);
1552 --vtop;
1553 d = get_reg(RC_INT);
1554 ++vtop;
1555 vtop[0].r = d;
1556 o(0x1e380000 |
1557 !!(t & VT_UNSIGNED) << 16 |
1558 ((t & VT_BTYPE) == VT_LLONG) << 31 | intr(d) |
1559 l << 22 | n << 5); // fcvtz[su] [wx](d),[sd](n)
1563 ST_FUNC void gen_cvt_ftof(int t)
1565 int f = vtop[0].type.t;
1566 assert(t == VT_FLOAT || t == VT_DOUBLE || t == VT_LDOUBLE);
1567 assert(f == VT_FLOAT || f == VT_DOUBLE || f == VT_LDOUBLE);
1568 if (t == f)
1569 return;
1571 if (t == VT_LDOUBLE || f == VT_LDOUBLE) {
1572 int func = (t == VT_LDOUBLE) ?
1573 (f == VT_FLOAT ? TOK___extendsftf2 : TOK___extenddftf2) :
1574 (t == VT_FLOAT ? TOK___trunctfsf2 : TOK___trunctfdf2);
1575 vpush_global_sym(&func_old_type, func);
1576 vrott(2);
1577 gfunc_call(1);
1578 vpushi(0);
1579 vtop->type.t = t;
1580 vtop->r = REG_FRET;
1582 else {
1583 int x, a;
1584 gv(RC_FLOAT);
1585 assert(vtop[0].r < VT_CONST);
1586 a = fltr(vtop[0].r);
1587 --vtop;
1588 x = get_reg(RC_FLOAT);
1589 ++vtop;
1590 vtop[0].r = x;
1591 x = fltr(x);
1593 if (f == VT_FLOAT)
1594 o(0x1e22c000 | x | a << 5); // fcvt d(x),s(a)
1595 else
1596 o(0x1e624000 | x | a << 5); // fcvt s(x),d(a)
1600 ST_FUNC void ggoto(void)
1602 arm64_gen_bl_or_b(1);
1603 --vtop;
1606 ST_FUNC void gen_vla_sp_save(int addr) {
1607 tcc_error("variable length arrays unsupported for this target");
1610 ST_FUNC void gen_vla_sp_restore(int addr) {
1611 tcc_error("variable length arrays unsupported for this target");
1614 ST_FUNC void gen_vla_alloc(CType *type, int align) {
1615 tcc_error("variable length arrays unsupported for this target");
1618 /* end of A64 code generator */
1619 /*************************************************************/
1620 #endif
1621 /*************************************************************/