1 /* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
3 Permission is hereby granted, free of charge, to any person obtaining
4 a copy of this software and associated documentation files (the
5 ``Software''), to deal in the Software without restriction, including
6 without limitation the rights to use, copy, modify, merge, publish,
7 distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so, subject to
9 the following conditions:
11 The above copyright notice and this permission notice shall be
12 included in all copies or substantial portions of the Software.
14 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
15 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
18 CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
20 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
23 #include <fficonfig.h>
28 #ifdef HAVE_MACHINE_ASM_H
29 #include <machine/asm.h>
31 #ifdef __USER_LABEL_PREFIX__
32 #define CONCAT1(a, b) CONCAT2(a, b)
33 #define CONCAT2(a, b) a ## b
35 /* Use the right prefix for global labels. */
36 #define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
49 #define PTR_REG(n) w##n
51 #define PTR_REG(n) x##n
64 extern void ffi_call_SYSV (void *stack, void *frame,
65 void (*fn)(void), void *rvalue,
66 int flags, void *closure);
68 Therefore on entry we have:
80 /* Use a stack frame allocated by our caller. */
85 cfi_def_cfa_register(x29)
86 cfi_rel_offset (x29, 0)
87 cfi_rel_offset (x30, 8)
89 mov x9, x2 /* save fn */
90 mov x8, x3 /* install structure return */
91 #ifdef FFI_GO_CLOSURES
92 mov x18, x5 /* install static chain */
94 stp x3, x4, [x29, #16] /* save rvalue and flags */
96 /* Load the vector argument passing registers, if necessary. */
97 tbz w4, #AARCH64_FLAG_ARG_V_BIT, 1f
100 ldp q4, q5, [sp, #64]
101 ldp q6, q7, [sp, #96]
103 /* Load the core argument passing registers, including
104 the structure return pointer. */
105 ldp x0, x1, [sp, #16*N_V_ARG_REG + 0]
106 ldp x2, x3, [sp, #16*N_V_ARG_REG + 16]
107 ldp x4, x5, [sp, #16*N_V_ARG_REG + 32]
108 ldp x6, x7, [sp, #16*N_V_ARG_REG + 48]
110 /* Deallocate the context, leaving the stacked arguments. */
111 add sp, sp, #CALL_CONTEXT_SIZE
115 ldp x3, x4, [x29, #16] /* reload rvalue and flags */
117 /* Partially deconstruct the stack frame. */
119 cfi_def_cfa_register (sp)
122 /* Save the return value as directed. */
124 and w4, w4, #AARCH64_RET_MASK
125 add x5, x5, x4, lsl #3
128 /* Note that each table entry is 2 insns, and thus 8 bytes.
129 For integer data, note that we're storing into ffi_arg
130 and therefore we want to extend to 64 bits; these types
131 have two consecutive entries allocated for them. */
135 1: str x0, [x3] /* INT64 */
137 2: stp x0, x1, [x3] /* INT128 */
139 3: brk #1000 /* UNUSED */
141 4: brk #1000 /* UNUSED */
143 5: brk #1000 /* UNUSED */
145 6: brk #1000 /* UNUSED */
147 7: brk #1000 /* UNUSED */
149 8: st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */
151 9: st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */
153 10: stp s0, s1, [x3] /* S2 */
155 11: str s0, [x3] /* S1 */
157 12: st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */
159 13: st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */
161 14: stp d0, d1, [x3] /* D2 */
163 15: str d0, [x3] /* D1 */
165 16: str q3, [x3, #48] /* Q4 */
167 17: str q2, [x3, #32] /* Q3 */
169 18: stp q0, q1, [x3] /* Q2 */
171 19: str q0, [x3] /* Q1 */
173 20: uxtb w0, w0 /* UINT8 */
175 21: ret /* reserved */
177 22: uxth w0, w0 /* UINT16 */
179 23: ret /* reserved */
181 24: mov w0, w0 /* UINT32 */
183 25: ret /* reserved */
185 26: sxtb x0, w0 /* SINT8 */
187 27: ret /* reserved */
189 28: sxth x0, w0 /* SINT16 */
191 29: ret /* reserved */
193 30: sxtw x0, w0 /* SINT32 */
195 31: ret /* reserved */
200 .globl CNAME(ffi_call_SYSV)
202 .type CNAME(ffi_call_SYSV), #function
203 .hidden CNAME(ffi_call_SYSV)
204 .size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV)
209 Closure invocation glue. This is the low level code invoked directly by
210 the closure trampoline to setup and call a closure.
212 On entry x17 points to a struct ffi_closure, x16 has been clobbered
213 all other registers are preserved.
215 We allocate a call context and save the argument passing registers,
216 then invoked the generic C ffi_closure_SYSV_inner() function to do all
217 the real work, on return we load the result passing registers back from
221 #define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64)
224 CNAME(ffi_closure_SYSV_V):
226 stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
227 cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
228 cfi_rel_offset (x29, 0)
229 cfi_rel_offset (x30, 8)
231 /* Save the argument passing vector registers. */
232 stp q0, q1, [sp, #16 + 0]
233 stp q2, q3, [sp, #16 + 32]
234 stp q4, q5, [sp, #16 + 64]
235 stp q6, q7, [sp, #16 + 96]
239 .globl CNAME(ffi_closure_SYSV_V)
241 .type CNAME(ffi_closure_SYSV_V), #function
242 .hidden CNAME(ffi_closure_SYSV_V)
243 .size CNAME(ffi_closure_SYSV_V), . - CNAME(ffi_closure_SYSV_V)
248 CNAME(ffi_closure_SYSV):
249 stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
250 cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
251 cfi_rel_offset (x29, 0)
252 cfi_rel_offset (x30, 8)
256 /* Save the argument passing core registers. */
257 stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
258 stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
259 stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
260 stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
262 /* Load ffi_closure_inner arguments. */
263 ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */
264 ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */
266 add x3, sp, #16 /* load context */
267 add x4, sp, #ffi_closure_SYSV_FS /* load stack */
268 add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */
269 mov x6, x8 /* load struct_rval */
270 bl CNAME(ffi_closure_SYSV_inner)
272 /* Load the return value as directed. */
274 and w0, w0, #AARCH64_RET_MASK
275 add x1, x1, x0, lsl #3
276 add x3, sp, #16+CALL_CONTEXT_SIZE
279 /* Note that each table entry is 2 insns, and thus 8 bytes. */
283 1: ldr x0, [x3] /* INT64 */
285 2: ldp x0, x1, [x3] /* INT128 */
287 3: brk #1000 /* UNUSED */
289 4: brk #1000 /* UNUSED */
291 5: brk #1000 /* UNUSED */
293 6: brk #1000 /* UNUSED */
295 7: brk #1000 /* UNUSED */
297 8: ldr s3, [x3, #12] /* S4 */
299 9: ldr s2, [x2, #8] /* S3 */
301 10: ldp s0, s1, [x3] /* S2 */
303 11: ldr s0, [x3] /* S1 */
305 12: ldr d3, [x3, #24] /* D4 */
307 13: ldr d2, [x3, #16] /* D3 */
309 14: ldp d0, d1, [x3] /* D2 */
311 15: ldr d0, [x3] /* D1 */
313 16: ldr q3, [x3, #48] /* Q4 */
315 17: ldr q2, [x3, #32] /* Q3 */
317 18: ldp q0, q1, [x3] /* Q2 */
319 19: ldr q0, [x3] /* Q1 */
321 20: ldrb w0, [x3, #BE(7)] /* UINT8 */
323 21: brk #1000 /* reserved */
325 22: ldrh w0, [x3, #BE(6)] /* UINT16 */
327 23: brk #1000 /* reserved */
329 24: ldr w0, [x3, #BE(4)] /* UINT32 */
331 25: brk #1000 /* reserved */
333 26: ldrsb x0, [x3, #BE(7)] /* SINT8 */
335 27: brk #1000 /* reserved */
337 28: ldrsh x0, [x3, #BE(6)] /* SINT16 */
339 29: brk #1000 /* reserved */
341 30: ldrsw x0, [x3, #BE(4)] /* SINT32 */
344 99: ldp x29, x30, [sp], #ffi_closure_SYSV_FS
345 cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS)
351 .globl CNAME(ffi_closure_SYSV)
353 .type CNAME(ffi_closure_SYSV), #function
354 .hidden CNAME(ffi_closure_SYSV)
355 .size CNAME(ffi_closure_SYSV), . - CNAME(ffi_closure_SYSV)
358 #if FFI_EXEC_TRAMPOLINE_TABLE
360 CNAME(ffi_closure_trampoline_table_page):
361 .rept 16384 / FFI_TRAMPOLINE_SIZE
369 .globl CNAME(ffi_closure_trampoline_table_page)
371 .type CNAME(ffi_closure_trampoline_table_page), #function
372 .hidden CNAME(ffi_closure_trampoline_table_page)
373 .size CNAME(ffi_closure_trampoline_table_page), . - CNAME(ffi_closure_trampoline_table_page)
377 #ifdef FFI_GO_CLOSURES
379 CNAME(ffi_go_closure_SYSV_V):
381 stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
382 cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
383 cfi_rel_offset (x29, 0)
384 cfi_rel_offset (x30, 8)
386 /* Save the argument passing vector registers. */
387 stp q0, q1, [sp, #16 + 0]
388 stp q2, q3, [sp, #16 + 32]
389 stp q4, q5, [sp, #16 + 64]
390 stp q6, q7, [sp, #16 + 96]
394 .globl CNAME(ffi_go_closure_SYSV_V)
396 .type CNAME(ffi_go_closure_SYSV_V), #function
397 .hidden CNAME(ffi_go_closure_SYSV_V)
398 .size CNAME(ffi_go_closure_SYSV_V), . - CNAME(ffi_go_closure_SYSV_V)
403 CNAME(ffi_go_closure_SYSV):
404 stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
405 cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
406 cfi_rel_offset (x29, 0)
407 cfi_rel_offset (x30, 8)
411 /* Save the argument passing core registers. */
412 stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
413 stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
414 stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
415 stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
417 /* Load ffi_closure_inner arguments. */
418 ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */
419 mov x2, x18 /* load user_data */
423 .globl CNAME(ffi_go_closure_SYSV)
425 .type CNAME(ffi_go_closure_SYSV), #function
426 .hidden CNAME(ffi_go_closure_SYSV)
427 .size CNAME(ffi_go_closure_SYSV), . - CNAME(ffi_go_closure_SYSV)
429 #endif /* FFI_GO_CLOSURES */
431 #if defined __ELF__ && defined __linux__
432 .section .note.GNU-stack,"",%progbits