beta-0.89.2
[luatex.git] / source / libs / luajit / LuaJIT-src / src / lj_ccall.c
blob5ab5b60daab7974b3e62f58dcca9732c89a1b65a
1 /*
2 ** FFI C call handling.
3 ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
4 */
6 #include "lj_obj.h"
8 #if LJ_HASFFI
10 #include "lj_gc.h"
11 #include "lj_err.h"
12 #include "lj_tab.h"
13 #include "lj_ctype.h"
14 #include "lj_cconv.h"
15 #include "lj_cdata.h"
16 #include "lj_ccall.h"
17 #include "lj_trace.h"
19 /* Target-specific handling of register arguments. */
20 #if LJ_TARGET_X86
21 /* -- x86 calling conventions --------------------------------------------- */
23 #if LJ_ABI_WIN
25 #define CCALL_HANDLE_STRUCTRET \
26 /* Return structs bigger than 8 by reference (on stack only). */ \
27 cc->retref = (sz > 8); \
28 if (cc->retref) cc->stack[nsp++] = (GPRArg)dp;
30 #define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
32 #else
34 #if LJ_TARGET_OSX
36 #define CCALL_HANDLE_STRUCTRET \
37 /* Return structs of size 1, 2, 4 or 8 in registers. */ \
38 cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \
39 if (cc->retref) { \
40 if (ngpr < maxgpr) \
41 cc->gpr[ngpr++] = (GPRArg)dp; \
42 else \
43 cc->stack[nsp++] = (GPRArg)dp; \
44 } else { /* Struct with single FP field ends up in FPR. */ \
45 cc->resx87 = ccall_classify_struct(cts, ctr); \
48 #define CCALL_HANDLE_STRUCTRET2 \
49 if (cc->resx87) sp = (uint8_t *)&cc->fpr[0]; \
50 memcpy(dp, sp, ctr->size);
52 #else
54 #define CCALL_HANDLE_STRUCTRET \
55 cc->retref = 1; /* Return all structs by reference (in reg or on stack). */ \
56 if (ngpr < maxgpr) \
57 cc->gpr[ngpr++] = (GPRArg)dp; \
58 else \
59 cc->stack[nsp++] = (GPRArg)dp;
61 #endif
63 #define CCALL_HANDLE_COMPLEXRET \
64 /* Return complex float in GPRs and complex double by reference. */ \
65 cc->retref = (sz > 8); \
66 if (cc->retref) { \
67 if (ngpr < maxgpr) \
68 cc->gpr[ngpr++] = (GPRArg)dp; \
69 else \
70 cc->stack[nsp++] = (GPRArg)dp; \
73 #endif
75 #define CCALL_HANDLE_COMPLEXRET2 \
76 if (!cc->retref) \
77 *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
79 #define CCALL_HANDLE_STRUCTARG \
80 ngpr = maxgpr; /* Pass all structs by value on the stack. */
82 #define CCALL_HANDLE_COMPLEXARG \
83 isfp = 1; /* Pass complex by value on stack. */
85 #define CCALL_HANDLE_REGARG \
86 if (!isfp) { /* Only non-FP values may be passed in registers. */ \
87 if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \
88 if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \
89 } else if (ngpr + 1 <= maxgpr) { \
90 dp = &cc->gpr[ngpr]; \
91 ngpr += n; \
92 goto done; \
93 } \
96 #elif LJ_TARGET_X64 && LJ_ABI_WIN
97 /* -- Windows/x64 calling conventions ------------------------------------- */
99 #define CCALL_HANDLE_STRUCTRET \
100 /* Return structs of size 1, 2, 4 or 8 in a GPR. */ \
101 cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \
102 if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
104 #define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
106 #define CCALL_HANDLE_COMPLEXRET2 \
107 if (!cc->retref) \
108 *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
110 #define CCALL_HANDLE_STRUCTARG \
111 /* Pass structs of size 1, 2, 4 or 8 in a GPR by value. */ \
112 if (!(sz == 1 || sz == 2 || sz == 4 || sz == 8)) { \
113 rp = cdataptr(lj_cdata_new(cts, did, sz)); \
114 sz = CTSIZE_PTR; /* Pass all other structs by reference. */ \
117 #define CCALL_HANDLE_COMPLEXARG \
118 /* Pass complex float in a GPR and complex double by reference. */ \
119 if (sz != 2*sizeof(float)) { \
120 rp = cdataptr(lj_cdata_new(cts, did, sz)); \
121 sz = CTSIZE_PTR; \
124 /* Windows/x64 argument registers are strictly positional (use ngpr). */
125 #define CCALL_HANDLE_REGARG \
126 if (isfp) { \
127 if (ngpr < maxgpr) { dp = &cc->fpr[ngpr++]; nfpr = ngpr; goto done; } \
128 } else { \
129 if (ngpr < maxgpr) { dp = &cc->gpr[ngpr++]; goto done; } \
132 #elif LJ_TARGET_X64
133 /* -- POSIX/x64 calling conventions --------------------------------------- */
135 #define CCALL_HANDLE_STRUCTRET \
136 int rcl[2]; rcl[0] = rcl[1] = 0; \
137 if (ccall_classify_struct(cts, ctr, rcl, 0)) { \
138 cc->retref = 1; /* Return struct by reference. */ \
139 cc->gpr[ngpr++] = (GPRArg)dp; \
140 } else { \
141 cc->retref = 0; /* Return small structs in registers. */ \
144 #define CCALL_HANDLE_STRUCTRET2 \
145 int rcl[2]; rcl[0] = rcl[1] = 0; \
146 ccall_classify_struct(cts, ctr, rcl, 0); \
147 ccall_struct_ret(cc, rcl, dp, ctr->size);
149 #define CCALL_HANDLE_COMPLEXRET \
150 /* Complex values are returned in one or two FPRs. */ \
151 cc->retref = 0;
153 #define CCALL_HANDLE_COMPLEXRET2 \
154 if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPR. */ \
155 *(int64_t *)dp = cc->fpr[0].l[0]; \
156 } else { /* Copy non-contiguous complex double from FPRs. */ \
157 ((int64_t *)dp)[0] = cc->fpr[0].l[0]; \
158 ((int64_t *)dp)[1] = cc->fpr[1].l[0]; \
161 #define CCALL_HANDLE_STRUCTARG \
162 int rcl[2]; rcl[0] = rcl[1] = 0; \
163 if (!ccall_classify_struct(cts, d, rcl, 0)) { \
164 cc->nsp = nsp; cc->ngpr = ngpr; cc->nfpr = nfpr; \
165 if (ccall_struct_arg(cc, cts, d, rcl, o, narg)) goto err_nyi; \
166 nsp = cc->nsp; ngpr = cc->ngpr; nfpr = cc->nfpr; \
167 continue; \
168 } /* Pass all other structs by value on stack. */
170 #define CCALL_HANDLE_COMPLEXARG \
171 isfp = 2; /* Pass complex in FPRs or on stack. Needs postprocessing. */
173 #define CCALL_HANDLE_REGARG \
174 if (isfp) { /* Try to pass argument in FPRs. */ \
175 int n2 = ctype_isvector(d->info) ? 1 : n; \
176 if (nfpr + n2 <= CCALL_NARG_FPR) { \
177 dp = &cc->fpr[nfpr]; \
178 nfpr += n2; \
179 goto done; \
181 } else { /* Try to pass argument in GPRs. */ \
182 /* Note that reordering is explicitly allowed in the x64 ABI. */ \
183 if (n <= 2 && ngpr + n <= maxgpr) { \
184 dp = &cc->gpr[ngpr]; \
185 ngpr += n; \
186 goto done; \
190 #elif LJ_TARGET_ARM
191 /* -- ARM calling conventions --------------------------------------------- */
193 #if LJ_ABI_SOFTFP
195 #define CCALL_HANDLE_STRUCTRET \
196 /* Return structs of size <= 4 in a GPR. */ \
197 cc->retref = !(sz <= 4); \
198 if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
200 #define CCALL_HANDLE_COMPLEXRET \
201 cc->retref = 1; /* Return all complex values by reference. */ \
202 cc->gpr[ngpr++] = (GPRArg)dp;
204 #define CCALL_HANDLE_COMPLEXRET2 \
205 UNUSED(dp); /* Nothing to do. */
207 #define CCALL_HANDLE_STRUCTARG \
208 /* Pass all structs by value in registers and/or on the stack. */
210 #define CCALL_HANDLE_COMPLEXARG \
211 /* Pass complex by value in 2 or 4 GPRs. */
213 #define CCALL_HANDLE_REGARG_FP1
214 #define CCALL_HANDLE_REGARG_FP2
216 #else
218 #define CCALL_HANDLE_STRUCTRET \
219 cc->retref = !ccall_classify_struct(cts, ctr, ct); \
220 if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
222 #define CCALL_HANDLE_STRUCTRET2 \
223 if (ccall_classify_struct(cts, ctr, ct) > 1) sp = (uint8_t *)&cc->fpr[0]; \
224 memcpy(dp, sp, ctr->size);
226 #define CCALL_HANDLE_COMPLEXRET \
227 if (!(ct->info & CTF_VARARG)) cc->retref = 0; /* Return complex in FPRs. */
229 #define CCALL_HANDLE_COMPLEXRET2 \
230 if (!(ct->info & CTF_VARARG)) memcpy(dp, &cc->fpr[0], ctr->size);
232 #define CCALL_HANDLE_STRUCTARG \
233 isfp = (ccall_classify_struct(cts, d, ct) > 1);
234 /* Pass all structs by value in registers and/or on the stack. */
236 #define CCALL_HANDLE_COMPLEXARG \
237 isfp = 1; /* Pass complex by value in FPRs or on stack. */
239 #define CCALL_HANDLE_REGARG_FP1 \
240 if (isfp && !(ct->info & CTF_VARARG)) { \
241 if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \
242 if (nfpr + (n >> 1) <= CCALL_NARG_FPR) { \
243 dp = &cc->fpr[nfpr]; \
244 nfpr += (n >> 1); \
245 goto done; \
247 } else { \
248 if (sz > 1 && fprodd != nfpr) fprodd = 0; \
249 if (fprodd) { \
250 if (2*nfpr+n <= 2*CCALL_NARG_FPR+1) { \
251 dp = (void *)&cc->fpr[fprodd-1].f[1]; \
252 nfpr += (n >> 1); \
253 if ((n & 1)) fprodd = 0; else fprodd = nfpr-1; \
254 goto done; \
256 } else { \
257 if (2*nfpr+n <= 2*CCALL_NARG_FPR) { \
258 dp = (void *)&cc->fpr[nfpr]; \
259 nfpr += (n >> 1); \
260 if ((n & 1)) fprodd = ++nfpr; else fprodd = 0; \
261 goto done; \
265 fprodd = 0; /* No reordering after the first FP value is on stack. */ \
266 } else {
268 #define CCALL_HANDLE_REGARG_FP2 }
270 #endif
272 #define CCALL_HANDLE_REGARG \
273 CCALL_HANDLE_REGARG_FP1 \
274 if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \
275 if (ngpr < maxgpr) \
276 ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
278 if (ngpr < maxgpr) { \
279 dp = &cc->gpr[ngpr]; \
280 if (ngpr + n > maxgpr) { \
281 nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
282 if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
283 ngpr = maxgpr; \
284 } else { \
285 ngpr += n; \
287 goto done; \
288 } CCALL_HANDLE_REGARG_FP2
290 #define CCALL_HANDLE_RET \
291 if ((ct->info & CTF_VARARG)) sp = (uint8_t *)&cc->gpr[0];
293 #elif LJ_TARGET_ARM64
294 /* -- ARM64 calling conventions ------------------------------------------- */
296 #define CCALL_HANDLE_STRUCTRET \
297 cc->retref = !ccall_classify_struct(cts, ctr); \
298 if (cc->retref) cc->retp = dp;
300 #define CCALL_HANDLE_STRUCTRET2 \
301 unsigned int cl = ccall_classify_struct(cts, ctr); \
302 if ((cl & 4)) { /* Combine float HFA from separate registers. */ \
303 CTSize i = (cl >> 8) - 1; \
304 do { ((uint32_t *)dp)[i] = cc->fpr[i].u32; } while (i--); \
305 } else { \
306 if (cl > 1) sp = (uint8_t *)&cc->fpr[0]; \
307 memcpy(dp, sp, ctr->size); \
310 #define CCALL_HANDLE_COMPLEXRET \
311 /* Complex values are returned in one or two FPRs. */ \
312 cc->retref = 0;
314 #define CCALL_HANDLE_COMPLEXRET2 \
315 if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
316 ((float *)dp)[0] = cc->fpr[0].f; \
317 ((float *)dp)[1] = cc->fpr[1].f; \
318 } else { /* Copy complex double from FPRs. */ \
319 ((double *)dp)[0] = cc->fpr[0].d; \
320 ((double *)dp)[1] = cc->fpr[1].d; \
323 #define CCALL_HANDLE_STRUCTARG \
324 unsigned int cl = ccall_classify_struct(cts, d); \
325 if (cl == 0) { /* Pass struct by reference. */ \
326 rp = cdataptr(lj_cdata_new(cts, did, sz)); \
327 sz = CTSIZE_PTR; \
328 } else if (cl > 1) { /* Pass struct in FPRs or on stack. */ \
329 isfp = (cl & 4) ? 2 : 1; \
330 } /* else: Pass struct in GPRs or on stack. */
332 #define CCALL_HANDLE_COMPLEXARG \
333 /* Pass complex by value in separate (!) FPRs or on stack. */ \
334 isfp = ctr->size == 2*sizeof(float) ? 2 : 1;
336 #define CCALL_HANDLE_REGARG \
337 if (LJ_TARGET_IOS && isva) { \
338 /* IOS: All variadic arguments are on the stack. */ \
339 } else if (isfp) { /* Try to pass argument in FPRs. */ \
340 int n2 = ctype_isvector(d->info) ? 1 : n*isfp; \
341 if (nfpr + n2 <= CCALL_NARG_FPR) { \
342 dp = &cc->fpr[nfpr]; \
343 nfpr += n2; \
344 goto done; \
345 } else { \
346 nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \
347 if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \
349 } else { /* Try to pass argument in GPRs. */ \
350 if (!LJ_TARGET_IOS && (d->info & CTF_ALIGN) > CTALIGN_PTR) \
351 ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
352 if (ngpr + n <= maxgpr) { \
353 dp = &cc->gpr[ngpr]; \
354 ngpr += n; \
355 goto done; \
356 } else { \
357 ngpr = maxgpr; /* Prevent reordering. */ \
358 if (LJ_TARGET_IOS && d->size < 8) goto err_nyi; \
362 #elif LJ_TARGET_PPC
363 /* -- PPC calling conventions --------------------------------------------- */
365 #define CCALL_HANDLE_STRUCTRET \
366 cc->retref = 1; /* Return all structs by reference. */ \
367 cc->gpr[ngpr++] = (GPRArg)dp;
369 #define CCALL_HANDLE_COMPLEXRET \
370 /* Complex values are returned in 2 or 4 GPRs. */ \
371 cc->retref = 0;
373 #define CCALL_HANDLE_COMPLEXRET2 \
374 memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */
376 #define CCALL_HANDLE_STRUCTARG \
377 rp = cdataptr(lj_cdata_new(cts, did, sz)); \
378 sz = CTSIZE_PTR; /* Pass all structs by reference. */
380 #define CCALL_HANDLE_COMPLEXARG \
381 /* Pass complex by value in 2 or 4 GPRs. */
383 #define CCALL_HANDLE_REGARG \
384 if (isfp) { /* Try to pass argument in FPRs. */ \
385 if (nfpr + 1 <= CCALL_NARG_FPR) { \
386 dp = &cc->fpr[nfpr]; \
387 nfpr += 1; \
388 d = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
389 goto done; \
391 } else { /* Try to pass argument in GPRs. */ \
392 if (n > 1) { \
393 lua_assert(n == 2 || n == 4); /* int64_t or complex (float). */ \
394 if (ctype_isinteger(d->info)) \
395 ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
396 else if (ngpr + n > maxgpr) \
397 ngpr = maxgpr; /* Prevent reordering. */ \
399 if (ngpr + n <= maxgpr) { \
400 dp = &cc->gpr[ngpr]; \
401 ngpr += n; \
402 goto done; \
406 #define CCALL_HANDLE_RET \
407 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
408 ctr = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */
410 #elif LJ_TARGET_MIPS
411 /* -- MIPS calling conventions -------------------------------------------- */
413 #define CCALL_HANDLE_STRUCTRET \
414 cc->retref = 1; /* Return all structs by reference. */ \
415 cc->gpr[ngpr++] = (GPRArg)dp;
417 #define CCALL_HANDLE_COMPLEXRET \
418 /* Complex values are returned in 1 or 2 FPRs. */ \
419 cc->retref = 0;
421 #define CCALL_HANDLE_COMPLEXRET2 \
422 if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
423 ((float *)dp)[0] = cc->fpr[0].f; \
424 ((float *)dp)[1] = cc->fpr[1].f; \
425 } else { /* Copy complex double from FPRs. */ \
426 ((double *)dp)[0] = cc->fpr[0].d; \
427 ((double *)dp)[1] = cc->fpr[1].d; \
430 #define CCALL_HANDLE_STRUCTARG \
431 /* Pass all structs by value in registers and/or on the stack. */
433 #define CCALL_HANDLE_COMPLEXARG \
434 /* Pass complex by value in 2 or 4 GPRs. */
436 #define CCALL_HANDLE_REGARG \
437 if (isfp && nfpr < CCALL_NARG_FPR && !(ct->info & CTF_VARARG)) { \
438 /* Try to pass argument in FPRs. */ \
439 dp = n == 1 ? (void *)&cc->fpr[nfpr].f : (void *)&cc->fpr[nfpr].d; \
440 nfpr++; ngpr += n; \
441 goto done; \
442 } else { /* Try to pass argument in GPRs. */ \
443 nfpr = CCALL_NARG_FPR; \
444 if ((d->info & CTF_ALIGN) > CTALIGN_PTR) \
445 ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
446 if (ngpr < maxgpr) { \
447 dp = &cc->gpr[ngpr]; \
448 if (ngpr + n > maxgpr) { \
449 nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
450 if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
451 ngpr = maxgpr; \
452 } else { \
453 ngpr += n; \
455 goto done; \
459 #define CCALL_HANDLE_RET \
460 if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
461 sp = (uint8_t *)&cc->fpr[0].f;
463 #else
464 #error "Missing calling convention definitions for this architecture"
465 #endif
467 #ifndef CCALL_HANDLE_STRUCTRET2
468 #define CCALL_HANDLE_STRUCTRET2 \
469 memcpy(dp, sp, ctr->size); /* Copy struct return value from GPRs. */
470 #endif
472 /* -- x86 OSX ABI struct classification ----------------------------------- */
474 #if LJ_TARGET_X86 && LJ_TARGET_OSX
476 /* Check for struct with single FP field. */
477 static int ccall_classify_struct(CTState *cts, CType *ct)
479 CTSize sz = ct->size;
480 if (!(sz == sizeof(float) || sz == sizeof(double))) return 0;
481 if ((ct->info & CTF_UNION)) return 0;
482 while (ct->sib) {
483 ct = ctype_get(cts, ct->sib);
484 if (ctype_isfield(ct->info)) {
485 CType *sct = ctype_rawchild(cts, ct);
486 if (ctype_isfp(sct->info)) {
487 if (sct->size == sz)
488 return (sz >> 2); /* Return 1 for float or 2 for double. */
489 } else if (ctype_isstruct(sct->info)) {
490 if (sct->size)
491 return ccall_classify_struct(cts, sct);
492 } else {
493 break;
495 } else if (ctype_isbitfield(ct->info)) {
496 break;
497 } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
498 CType *sct = ctype_rawchild(cts, ct);
499 if (sct->size)
500 return ccall_classify_struct(cts, sct);
503 return 0;
506 #endif
508 /* -- x64 struct classification ------------------------------------------- */
510 #if LJ_TARGET_X64 && !LJ_ABI_WIN
512 /* Register classes for x64 struct classification. */
513 #define CCALL_RCL_INT 1
514 #define CCALL_RCL_SSE 2
515 #define CCALL_RCL_MEM 4
516 /* NYI: classify vectors. */
518 static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs);
520 /* Classify a C type. */
521 static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
523 if (ctype_isarray(ct->info)) {
524 CType *cct = ctype_rawchild(cts, ct);
525 CTSize eofs, esz = cct->size, asz = ct->size;
526 for (eofs = 0; eofs < asz; eofs += esz)
527 ccall_classify_ct(cts, cct, rcl, ofs+eofs);
528 } else if (ctype_isstruct(ct->info)) {
529 ccall_classify_struct(cts, ct, rcl, ofs);
530 } else {
531 int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT;
532 lua_assert(ctype_hassize(ct->info));
533 if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */
534 rcl[(ofs >= 8)] |= cl;
538 /* Recursively classify a struct based on its fields. */
539 static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
541 if (ct->size > 16) return CCALL_RCL_MEM; /* Too big, gets memory class. */
542 while (ct->sib) {
543 CTSize fofs;
544 ct = ctype_get(cts, ct->sib);
545 fofs = ofs+ct->size;
546 if (ctype_isfield(ct->info))
547 ccall_classify_ct(cts, ctype_rawchild(cts, ct), rcl, fofs);
548 else if (ctype_isbitfield(ct->info))
549 rcl[(fofs >= 8)] |= CCALL_RCL_INT; /* NYI: unaligned bitfields? */
550 else if (ctype_isxattrib(ct->info, CTA_SUBTYPE))
551 ccall_classify_struct(cts, ctype_rawchild(cts, ct), rcl, fofs);
553 return ((rcl[0]|rcl[1]) & CCALL_RCL_MEM); /* Memory class? */
556 /* Try to split up a small struct into registers. */
557 static int ccall_struct_reg(CCallState *cc, GPRArg *dp, int *rcl)
559 MSize ngpr = cc->ngpr, nfpr = cc->nfpr;
560 uint32_t i;
561 for (i = 0; i < 2; i++) {
562 lua_assert(!(rcl[i] & CCALL_RCL_MEM));
563 if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
564 if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */
565 cc->gpr[ngpr++] = dp[i];
566 } else if ((rcl[i] & CCALL_RCL_SSE)) {
567 if (nfpr >= CCALL_NARG_FPR) return 1; /* Register overflow. */
568 cc->fpr[nfpr++].l[0] = dp[i];
571 cc->ngpr = ngpr; cc->nfpr = nfpr;
572 return 0; /* Ok. */
575 /* Pass a small struct argument. */
576 static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl,
577 TValue *o, int narg)
579 GPRArg dp[2];
580 dp[0] = dp[1] = 0;
581 /* Convert to temp. struct. */
582 lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
583 if (ccall_struct_reg(cc, dp, rcl)) { /* Register overflow? Pass on stack. */
584 MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1;
585 if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */
586 cc->nsp = nsp + n;
587 memcpy(&cc->stack[nsp], dp, n*CTSIZE_PTR);
589 return 0; /* Ok. */
592 /* Combine returned small struct. */
593 static void ccall_struct_ret(CCallState *cc, int *rcl, uint8_t *dp, CTSize sz)
595 GPRArg sp[2];
596 MSize ngpr = 0, nfpr = 0;
597 uint32_t i;
598 for (i = 0; i < 2; i++) {
599 if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
600 sp[i] = cc->gpr[ngpr++];
601 } else if ((rcl[i] & CCALL_RCL_SSE)) {
602 sp[i] = cc->fpr[nfpr++].l[0];
605 memcpy(dp, sp, sz);
607 #endif
609 /* -- ARM hard-float ABI struct classification ---------------------------- */
611 #if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
613 /* Classify a struct based on its fields. */
614 static unsigned int ccall_classify_struct(CTState *cts, CType *ct, CType *ctf)
616 CTSize sz = ct->size;
617 unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION);
618 if ((ctf->info & CTF_VARARG)) goto noth;
619 while (ct->sib) {
620 CType *sct;
621 ct = ctype_get(cts, ct->sib);
622 if (ctype_isfield(ct->info)) {
623 sct = ctype_rawchild(cts, ct);
624 if (ctype_isfp(sct->info)) {
625 r |= sct->size;
626 if (!isu) n++; else if (n == 0) n = 1;
627 } else if (ctype_iscomplex(sct->info)) {
628 r |= (sct->size >> 1);
629 if (!isu) n += 2; else if (n < 2) n = 2;
630 } else if (ctype_isstruct(sct->info)) {
631 goto substruct;
632 } else {
633 goto noth;
635 } else if (ctype_isbitfield(ct->info)) {
636 goto noth;
637 } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
638 sct = ctype_rawchild(cts, ct);
639 substruct:
640 if (sct->size > 0) {
641 unsigned int s = ccall_classify_struct(cts, sct, ctf);
642 if (s <= 1) goto noth;
643 r |= (s & 255);
644 if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8);
648 if ((r == 4 || r == 8) && n <= 4)
649 return r + (n << 8);
650 noth: /* Not a homogeneous float/double aggregate. */
651 return (sz <= 4); /* Return structs of size <= 4 in a GPR. */
654 #endif
656 /* -- ARM64 ABI struct classification ------------------------------------- */
658 #if LJ_TARGET_ARM64
660 /* Classify a struct based on its fields. */
661 static unsigned int ccall_classify_struct(CTState *cts, CType *ct)
663 CTSize sz = ct->size;
664 unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION);
665 while (ct->sib) {
666 CType *sct;
667 ct = ctype_get(cts, ct->sib);
668 if (ctype_isfield(ct->info)) {
669 sct = ctype_rawchild(cts, ct);
670 if (ctype_isfp(sct->info)) {
671 r |= sct->size;
672 if (!isu) n++; else if (n == 0) n = 1;
673 } else if (ctype_iscomplex(sct->info)) {
674 r |= (sct->size >> 1);
675 if (!isu) n += 2; else if (n < 2) n = 2;
676 } else if (ctype_isstruct(sct->info)) {
677 goto substruct;
678 } else {
679 goto noth;
681 } else if (ctype_isbitfield(ct->info)) {
682 goto noth;
683 } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
684 sct = ctype_rawchild(cts, ct);
685 substruct:
686 if (sct->size > 0) {
687 unsigned int s = ccall_classify_struct(cts, sct);
688 if (s <= 1) goto noth;
689 r |= (s & 255);
690 if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8);
694 if ((r == 4 || r == 8) && n <= 4)
695 return r + (n << 8);
696 noth: /* Not a homogeneous float/double aggregate. */
697 return (sz <= 16); /* Return structs of size <= 16 in GPRs. */
700 #endif
702 /* -- Common C call handling ---------------------------------------------- */
704 /* Infer the destination CTypeID for a vararg argument. */
705 CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o)
707 if (tvisnumber(o)) {
708 return CTID_DOUBLE;
709 } else if (tviscdata(o)) {
710 CTypeID id = cdataV(o)->ctypeid;
711 CType *s = ctype_get(cts, id);
712 if (ctype_isrefarray(s->info)) {
713 return lj_ctype_intern(cts,
714 CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(s->info)), CTSIZE_PTR);
715 } else if (ctype_isstruct(s->info) || ctype_isfunc(s->info)) {
716 /* NYI: how to pass a struct by value in a vararg argument? */
717 return lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR);
718 } else if (ctype_isfp(s->info) && s->size == sizeof(float)) {
719 return CTID_DOUBLE;
720 } else {
721 return id;
723 } else if (tvisstr(o)) {
724 return CTID_P_CCHAR;
725 } else if (tvisbool(o)) {
726 return CTID_BOOL;
727 } else {
728 return CTID_P_VOID;
732 /* Setup arguments for C call. */
733 static int ccall_set_args(lua_State *L, CTState *cts, CType *ct,
734 CCallState *cc)
736 int gcsteps = 0;
737 TValue *o, *top = L->top;
738 CTypeID fid;
739 CType *ctr;
740 MSize maxgpr, ngpr = 0, nsp = 0, narg;
741 #if CCALL_NARG_FPR
742 MSize nfpr = 0;
743 #if LJ_TARGET_ARM
744 MSize fprodd = 0;
745 #endif
746 #endif
748 /* Clear unused regs to get some determinism in case of misdeclaration. */
749 memset(cc->gpr, 0, sizeof(cc->gpr));
750 #if CCALL_NUM_FPR
751 memset(cc->fpr, 0, sizeof(cc->fpr));
752 #endif
754 #if LJ_TARGET_X86
755 /* x86 has several different calling conventions. */
756 cc->resx87 = 0;
757 switch (ctype_cconv(ct->info)) {
758 case CTCC_FASTCALL: maxgpr = 2; break;
759 case CTCC_THISCALL: maxgpr = 1; break;
760 default: maxgpr = 0; break;
762 #else
763 maxgpr = CCALL_NARG_GPR;
764 #endif
766 /* Perform required setup for some result types. */
767 ctr = ctype_rawchild(cts, ct);
768 if (ctype_isvector(ctr->info)) {
769 if (!(CCALL_VECTOR_REG && (ctr->size == 8 || ctr->size == 16)))
770 goto err_nyi;
771 } else if (ctype_iscomplex(ctr->info) || ctype_isstruct(ctr->info)) {
772 /* Preallocate cdata object and anchor it after arguments. */
773 CTSize sz = ctr->size;
774 GCcdata *cd = lj_cdata_new(cts, ctype_cid(ct->info), sz);
775 void *dp = cdataptr(cd);
776 setcdataV(L, L->top++, cd);
777 if (ctype_isstruct(ctr->info)) {
778 CCALL_HANDLE_STRUCTRET
779 } else {
780 CCALL_HANDLE_COMPLEXRET
782 #if LJ_TARGET_X86
783 } else if (ctype_isfp(ctr->info)) {
784 cc->resx87 = ctr->size == sizeof(float) ? 1 : 2;
785 #endif
788 /* Skip initial attributes. */
789 fid = ct->sib;
790 while (fid) {
791 CType *ctf = ctype_get(cts, fid);
792 if (!ctype_isattrib(ctf->info)) break;
793 fid = ctf->sib;
796 /* Walk through all passed arguments. */
797 for (o = L->base+1, narg = 1; o < top; o++, narg++) {
798 CTypeID did;
799 CType *d;
800 CTSize sz;
801 MSize n, isfp = 0, isva = 0;
802 void *dp, *rp = NULL;
804 if (fid) { /* Get argument type from field. */
805 CType *ctf = ctype_get(cts, fid);
806 fid = ctf->sib;
807 lua_assert(ctype_isfield(ctf->info));
808 did = ctype_cid(ctf->info);
809 } else {
810 if (!(ct->info & CTF_VARARG))
811 lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too many arguments. */
812 did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */
813 isva = 1;
815 d = ctype_raw(cts, did);
816 sz = d->size;
818 /* Find out how (by value/ref) and where (GPR/FPR) to pass an argument. */
819 if (ctype_isnum(d->info)) {
820 if (sz > 8) goto err_nyi;
821 if ((d->info & CTF_FP))
822 isfp = 1;
823 } else if (ctype_isvector(d->info)) {
824 if (CCALL_VECTOR_REG && (sz == 8 || sz == 16))
825 isfp = 1;
826 else
827 goto err_nyi;
828 } else if (ctype_isstruct(d->info)) {
829 CCALL_HANDLE_STRUCTARG
830 } else if (ctype_iscomplex(d->info)) {
831 CCALL_HANDLE_COMPLEXARG
832 } else {
833 sz = CTSIZE_PTR;
835 sz = (sz + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
836 n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */
838 CCALL_HANDLE_REGARG /* Handle register arguments. */
840 /* Otherwise pass argument on stack. */
841 if (CCALL_ALIGN_STACKARG && !rp && (d->info & CTF_ALIGN) > CTALIGN_PTR) {
842 MSize align = (1u << ctype_align(d->info-CTALIGN_PTR)) -1;
843 nsp = (nsp + align) & ~align; /* Align argument on stack. */
845 if (nsp + n > CCALL_MAXSTACK) { /* Too many arguments. */
846 err_nyi:
847 lj_err_caller(L, LJ_ERR_FFI_NYICALL);
849 dp = &cc->stack[nsp];
850 nsp += n;
851 isva = 0;
853 done:
854 if (rp) { /* Pass by reference. */
855 gcsteps++;
856 *(void **)dp = rp;
857 dp = rp;
859 lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
860 /* Extend passed integers to 32 bits at least. */
861 if (ctype_isinteger_or_bool(d->info) && d->size < 4) {
862 if (d->info & CTF_UNSIGNED)
863 *(uint32_t *)dp = d->size == 1 ? (uint32_t)*(uint8_t *)dp :
864 (uint32_t)*(uint16_t *)dp;
865 else
866 *(int32_t *)dp = d->size == 1 ? (int32_t)*(int8_t *)dp :
867 (int32_t)*(int16_t *)dp;
869 #if LJ_TARGET_X64 && LJ_ABI_WIN
870 if (isva) { /* Windows/x64 mirrors varargs in both register sets. */
871 if (nfpr == ngpr)
872 cc->gpr[ngpr-1] = cc->fpr[ngpr-1].l[0];
873 else
874 cc->fpr[ngpr-1].l[0] = cc->gpr[ngpr-1];
876 #else
877 UNUSED(isva);
878 #endif
879 #if LJ_TARGET_X64 && !LJ_ABI_WIN
880 if (isfp == 2 && n == 2 && (uint8_t *)dp == (uint8_t *)&cc->fpr[nfpr-2]) {
881 cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */
882 cc->fpr[nfpr-2].d[1] = 0;
884 #elif LJ_TARGET_ARM64
885 if (isfp == 2 && (uint8_t *)dp < (uint8_t *)cc->stack) {
886 /* Split float HFA or complex float into separate registers. */
887 CTSize i = (sz >> 2) - 1;
888 do { ((uint64_t *)dp)[i] = ((uint32_t *)dp)[i]; } while (i--);
890 #else
891 UNUSED(isfp);
892 #endif
894 if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */
896 #if LJ_TARGET_X64 || LJ_TARGET_PPC
897 cc->nfpr = nfpr; /* Required for vararg functions. */
898 #endif
899 cc->nsp = nsp;
900 cc->spadj = (CCALL_SPS_FREE + CCALL_SPS_EXTRA)*CTSIZE_PTR;
901 if (nsp > CCALL_SPS_FREE)
902 cc->spadj += (((nsp-CCALL_SPS_FREE)*CTSIZE_PTR + 15u) & ~15u);
903 return gcsteps;
906 /* Get results from C call. */
907 static int ccall_get_results(lua_State *L, CTState *cts, CType *ct,
908 CCallState *cc, int *ret)
910 CType *ctr = ctype_rawchild(cts, ct);
911 uint8_t *sp = (uint8_t *)&cc->gpr[0];
912 if (ctype_isvoid(ctr->info)) {
913 *ret = 0; /* Zero results. */
914 return 0; /* No additional GC step. */
916 *ret = 1; /* One result. */
917 if (ctype_isstruct(ctr->info)) {
918 /* Return cdata object which is already on top of stack. */
919 if (!cc->retref) {
920 void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
921 CCALL_HANDLE_STRUCTRET2
923 return 1; /* One GC step. */
925 if (ctype_iscomplex(ctr->info)) {
926 /* Return cdata object which is already on top of stack. */
927 void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
928 CCALL_HANDLE_COMPLEXRET2
929 return 1; /* One GC step. */
931 if (LJ_BE && ctype_isinteger_or_bool(ctr->info) && ctr->size < CTSIZE_PTR)
932 sp += (CTSIZE_PTR - ctr->size);
933 #if CCALL_NUM_FPR
934 if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info))
935 sp = (uint8_t *)&cc->fpr[0];
936 #endif
937 #ifdef CCALL_HANDLE_RET
938 CCALL_HANDLE_RET
939 #endif
940 /* No reference types end up here, so there's no need for the CTypeID. */
941 lua_assert(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info)));
942 return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp);
945 /* Call C function. */
946 int lj_ccall_func(lua_State *L, GCcdata *cd)
948 CTState *cts = ctype_cts(L);
949 CType *ct = ctype_raw(cts, cd->ctypeid);
950 CTSize sz = CTSIZE_PTR;
951 if (ctype_isptr(ct->info)) {
952 sz = ct->size;
953 ct = ctype_rawchild(cts, ct);
955 if (ctype_isfunc(ct->info)) {
956 CCallState cc;
957 int gcsteps, ret;
958 cc.func = (void (*)(void))cdata_getptr(cdataptr(cd), sz);
959 gcsteps = ccall_set_args(L, cts, ct, &cc);
960 ct = (CType *)((intptr_t)ct-(intptr_t)cts->tab);
961 cts->cb.slot = ~0u;
962 lj_vm_ffi_call(&cc);
963 if (cts->cb.slot != ~0u) { /* Blacklist function that called a callback. */
964 TValue tv;
965 setlightudV(&tv, (void *)cc.func);
966 setboolV(lj_tab_set(L, cts->miscmap, &tv), 1);
968 ct = (CType *)((intptr_t)ct+(intptr_t)cts->tab); /* May be reallocated. */
969 gcsteps += ccall_get_results(L, cts, ct, &cc, &ret);
970 #if LJ_TARGET_X86 && LJ_ABI_WIN
971 /* Automatically detect __stdcall and fix up C function declaration. */
972 if (cc.spadj && ctype_cconv(ct->info) == CTCC_CDECL) {
973 CTF_INSERT(ct->info, CCONV, CTCC_STDCALL);
974 lj_trace_abort(G(L));
976 #endif
977 while (gcsteps-- > 0)
978 lj_gc_check(L);
979 return ret;
981 return -1; /* Not a function. */
984 #endif