1 /* Copyright (c) 2020 Kalray
3 Permission is hereby granted, free of charge, to any person obtaining
4 a copy of this software and associated documentation files (the
5 ``Software''), to deal in the Software without restriction, including
6 without limitation the rights to use, copy, modify, merge, publish,
7 distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so, subject to
9 the following conditions:
11 The above copyright notice and this permission notice shall be
12 included in all copies or substantial portions of the Software.
14 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
15 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
18 CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
20 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
26 #include <fficonfig.h>
28 #include "ffi_common.h"
31 #define ALIGN(x, a) ALIGN_MASK(x, (typeof(x))(a) - 1)
32 #define ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
33 #define KVX_ABI_STACK_ALIGNMENT (32)
34 #define KVX_ABI_STACK_ARG_ALIGNMENT (8)
35 #define max(a,b) ((a) > (b) ? (a) : (b))
38 #define DEBUG_PRINT(...) do{ fprintf( stderr, __VA_ARGS__ ); } while(0)
40 #define DEBUG_PRINT(...)
50 extern struct ret_value
ffi_call_SYSV(unsigned total_size
,
53 unsigned *rvalue_addr
,
55 unsigned int_ext_method
);
57 /* Perform machine dependent cif processing */
58 ffi_status
ffi_prep_cif_machdep(ffi_cif
*cif
)
60 cif
->flags
= cif
->rtype
->size
;
64 /* ffi_prep_args is called by the assembly routine once stack space
65 has been allocated for the function's arguments */
67 void *ffi_prep_args(char *stack
, unsigned int arg_slots_size
, extended_cif
*ecif
)
69 char *stacktemp
= stack
;
70 char *current_arg_passed_by_value
= stack
+ arg_slots_size
;
74 ffi_cif
*cif
= ecif
->cif
;
75 void **argv
= ecif
->avalue
;
79 DEBUG_PRINT("stack: %p\n", stack
);
80 DEBUG_PRINT("arg_slots_size: %u\n", arg_slots_size
);
81 DEBUG_PRINT("current_arg_passed_by_value: %p\n", current_arg_passed_by_value
);
82 DEBUG_PRINT("ecif: %p\n", ecif
);
83 DEBUG_PRINT("ecif->avalue: %p\n", ecif
->avalue
);
85 for (i
= 0; i
< cif
->nargs
; i
++) {
87 s
= KVX_ABI_SLOT_SIZE
;
88 switch((*arg
)->type
) {
99 case FFI_TYPE_POINTER
:
100 DEBUG_PRINT("INT64/32/16/8/FLOAT/DOUBLE or POINTER @%p\n", stack
);
101 *(uint64_t *) stack
= *(uint64_t *)(* argv
);
104 case FFI_TYPE_COMPLEX
:
105 if ((*arg
)->size
== 8)
106 *(_Complex
float *) stack
= *(_Complex
float *)(* argv
);
107 else if ((*arg
)->size
== 16) {
108 *(_Complex
double *) stack
= *(_Complex
double *)(* argv
);
113 case FFI_TYPE_STRUCT
: {
115 unsigned int written_size
= 0;
116 DEBUG_PRINT("struct by value @%p\n", stack
);
117 if ((*arg
)->size
> KVX_ABI_MAX_AGGREGATE_IN_REG_SIZE
) {
118 DEBUG_PRINT("big struct\n");
119 *(uint64_t *) stack
= (uintptr_t)current_arg_passed_by_value
;
120 value
= current_arg_passed_by_value
;
121 current_arg_passed_by_value
+= (*arg
)->size
;
122 written_size
= KVX_ABI_SLOT_SIZE
;
125 written_size
= (*arg
)->size
;
127 memcpy(value
, *argv
, (*arg
)->size
);
128 s
= ALIGN(written_size
, KVX_ABI_STACK_ARG_ALIGNMENT
);
132 printf("Error: unsupported arg type %d\n", (*arg
)->type
);
143 FFI_ASSERT(((intptr_t)(stacktemp
+ REG_ARGS_SIZE
) & (KVX_ABI_STACK_ALIGNMENT
-1)) == 0);
145 return stacktemp
+ REG_ARGS_SIZE
;
148 /* Perform machine dependent cif processing when we have a variadic function */
150 ffi_status
ffi_prep_cif_machdep_var(ffi_cif
*cif
, unsigned int nfixedargs
,
151 unsigned int ntotalargs
)
153 cif
->flags
= cif
->rtype
->size
;
157 static unsigned long handle_small_int_ext(kvx_intext_method
*int_ext_method
,
158 const ffi_type
*rtype
)
160 switch (rtype
->type
) {
162 *int_ext_method
= KVX_RET_SXBD
;
163 return KVX_REGISTER_SIZE
;
165 case FFI_TYPE_SINT16
:
166 *int_ext_method
= KVX_RET_SXHD
;
167 return KVX_REGISTER_SIZE
;
169 case FFI_TYPE_SINT32
:
170 *int_ext_method
= KVX_RET_SXWD
;
171 return KVX_REGISTER_SIZE
;
174 *int_ext_method
= KVX_RET_ZXBD
;
175 return KVX_REGISTER_SIZE
;
177 case FFI_TYPE_UINT16
:
178 *int_ext_method
= KVX_RET_ZXHD
;
179 return KVX_REGISTER_SIZE
;
181 case FFI_TYPE_UINT32
:
182 *int_ext_method
= KVX_RET_ZXWD
;
183 return KVX_REGISTER_SIZE
;
186 *int_ext_method
= KVX_RET_NONE
;
191 void ffi_call(ffi_cif
*cif
, void (*fn
)(void), void *rvalue
, void **avalue
)
194 unsigned long int slot_fitting_args_size
= 0;
195 unsigned long int total_size
= 0;
196 unsigned long int big_struct_size
= 0;
197 kvx_intext_method int_extension_method
;
199 struct ret_value local_rvalue
= {0};
203 /* Calculate size to allocate on stack */
204 for (i
= 0, arg
= cif
->arg_types
; i
< cif
->nargs
; i
++, arg
++) {
205 DEBUG_PRINT("argument %d, type %d, size %lu\n", i
, (*arg
)->type
, (*arg
)->size
);
206 if (((*arg
)->type
== FFI_TYPE_STRUCT
) || ((*arg
)->type
== FFI_TYPE_COMPLEX
)) {
207 if ((*arg
)->size
<= KVX_ABI_MAX_AGGREGATE_IN_REG_SIZE
) {
208 slot_fitting_args_size
+= ALIGN((*arg
)->size
, KVX_ABI_SLOT_SIZE
);
210 slot_fitting_args_size
+= KVX_ABI_SLOT_SIZE
; /* aggregate passed by reference */
211 big_struct_size
+= ALIGN((*arg
)->size
, KVX_ABI_SLOT_SIZE
);
213 } else if ((*arg
)->size
<= KVX_ABI_SLOT_SIZE
) {
214 slot_fitting_args_size
+= KVX_ABI_SLOT_SIZE
;
216 printf("Error: unsupported arg size %ld arg type %d\n", (*arg
)->size
, (*arg
)->type
);
217 abort(); /* should never happen? */
223 ecif
.avalue
= avalue
;
224 ecif
.rvalue
= rvalue
;
226 /* This implementation allocates anyway for all register based args */
227 slot_fitting_args_size
= max(slot_fitting_args_size
, REG_ARGS_SIZE
);
228 total_size
= slot_fitting_args_size
+ big_struct_size
;
229 total_size
= ALIGN(total_size
, KVX_ABI_STACK_ALIGNMENT
);
231 /* wb_size: write back size, the size we will need to write back to user
232 * provided buffer. In theory it should always be cif->flags which is
233 * cif->rtype->size. But libffi API mandates that for integral types
234 * of size <= system register size, then we *MUST* write back
235 * the size of system register size.
236 * in our case, if size <= 8 bytes we must write back 8 bytes.
237 * floats, complex and structs are not affected, only integrals.
239 wb_size
= handle_small_int_ext(&int_extension_method
, cif
->rtype
);
243 DEBUG_PRINT("total_size: %lu\n", total_size
);
244 DEBUG_PRINT("slot fitting args size: %lu\n", slot_fitting_args_size
);
245 DEBUG_PRINT("rvalue: %p\n", rvalue
);
246 DEBUG_PRINT("fn: %p\n", fn
);
247 DEBUG_PRINT("rsize: %u\n", cif
->flags
);
248 DEBUG_PRINT("wb_size: %u\n", wb_size
);
249 DEBUG_PRINT("int_extension_method: %u\n", int_extension_method
);
250 local_rvalue
= ffi_call_SYSV(total_size
, slot_fitting_args_size
,
251 &ecif
, rvalue
, fn
, int_extension_method
);
252 if ((cif
->flags
<= KVX_ABI_MAX_AGGREGATE_IN_REG_SIZE
)
253 && (cif
->rtype
->type
!= FFI_TYPE_VOID
))
254 memcpy(rvalue
, &local_rvalue
, wb_size
);
262 /* Closures not supported yet */
264 ffi_prep_closure_loc (ffi_closure
* closure
,
266 void (*fun
)(ffi_cif
*,void*,void**,void*),
273 #endif /* (__kvx__) */