1 /* PLT trampolines. x86-64 version.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
23 .section
SECTION(.text
),"ax",@progbits
24 #ifdef _dl_runtime_resolve
26 # undef REGISTER_SAVE_AREA
27 # undef LOCAL_STORAGE_AREA
30 # if (STATE_SAVE_ALIGNMENT % 16) != 0
31 # error STATE_SAVE_ALIGNMENT must be multples of 16
34 # if (STATE_SAVE_OFFSET % STATE_SAVE_ALIGNMENT) != 0
35 # error STATE_SAVE_OFFSET must be multples of STATE_SAVE_ALIGNMENT
38 # if DL_RUNTIME_RESOLVE_REALIGN_STACK
39 /* Local stack area before jumping to function address: RBX. */
40 # define LOCAL_STORAGE_AREA 8
43 /* Use fxsave to save XMM registers. */
44 # define REGISTER_SAVE_AREA (512 + STATE_SAVE_OFFSET)
45 # if (REGISTER_SAVE_AREA % 16) != 0
46 # error REGISTER_SAVE_AREA must be multples of 16
51 # error USE_FXSAVE must be defined
53 /* Use fxsave to save XMM registers. */
54 # define REGISTER_SAVE_AREA (512 + STATE_SAVE_OFFSET + 8)
55 /* Local stack area before jumping to function address: All saved
57 # define LOCAL_STORAGE_AREA REGISTER_SAVE_AREA
59 # if (REGISTER_SAVE_AREA % 16) != 8
60 # error REGISTER_SAVE_AREA must be odd multples of 8
64 .globl _dl_runtime_resolve
65 .hidden _dl_runtime_resolve
66 .type _dl_runtime_resolve
, @function
70 cfi_adjust_cfa_offset(16) # Incorporate PLT
72 # if DL_RUNTIME_RESOLVE_REALIGN_STACK
73 # if LOCAL_STORAGE_AREA != 8
74 # error LOCAL_STORAGE_AREA must be 8
76 pushq
%rbx
# push subtracts stack by 8.
77 cfi_adjust_cfa_offset(8)
78 cfi_rel_offset(%rbx
, 0)
80 cfi_def_cfa_register(%rbx
)
81 and $
-STATE_SAVE_ALIGNMENT
, %RSP_LP
83 # ifdef REGISTER_SAVE_AREA
84 sub $REGISTER_SAVE_AREA
, %RSP_LP
85 # if !DL_RUNTIME_RESOLVE_REALIGN_STACK
86 cfi_adjust_cfa_offset(REGISTER_SAVE_AREA
)
89 # Allocate stack space of the required size to save the state.
91 sub _rtld_local_ro
+RTLD_GLOBAL_RO_DL_X86_CPU_FEATURES_OFFSET
+XSAVE_STATE_SIZE_OFFSET(%rip
), %RSP_LP
93 sub _dl_x86_cpu_features
+XSAVE_STATE_SIZE_OFFSET(%rip
), %RSP_LP
96 # Preserve registers otherwise clobbered.
97 movq
%rax
, REGISTER_SAVE_RAX(%rsp
)
98 movq
%rcx
, REGISTER_SAVE_RCX(%rsp
)
99 movq
%rdx
, REGISTER_SAVE_RDX(%rsp
)
100 movq
%rsi
, REGISTER_SAVE_RSI(%rsp
)
101 movq
%rdi
, REGISTER_SAVE_RDI(%rsp
)
102 movq
%r8
, REGISTER_SAVE_R8(%rsp
)
103 movq
%r9
, REGISTER_SAVE_R9(%rsp
)
105 fxsave
STATE_SAVE_OFFSET(%rsp
)
107 movl $STATE_SAVE_MASK
, %eax
109 # Clear the XSAVE Header.
111 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512)(%rsp
)
112 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8)(%rsp
)
114 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 2)(%rsp
)
115 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 3)(%rsp
)
116 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 4)(%rsp
)
117 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 5)(%rsp
)
118 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 6)(%rsp
)
119 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 7)(%rsp
)
121 xsave
STATE_SAVE_OFFSET(%rsp
)
123 xsavec
STATE_SAVE_OFFSET(%rsp
)
126 # Copy args pushed by PLT in register.
127 # %rdi: link_map, %rsi: reloc_index
128 mov (LOCAL_STORAGE_AREA
+ 8)(%BASE
), %RSI_LP
129 mov
LOCAL_STORAGE_AREA(%BASE
), %RDI_LP
130 call _dl_fixup
# Call resolver.
131 mov
%RAX_LP
, %R11_LP
# Save return value
132 # Get register content back.
134 fxrstor
STATE_SAVE_OFFSET(%rsp
)
136 movl $STATE_SAVE_MASK
, %eax
138 xrstor
STATE_SAVE_OFFSET(%rsp
)
140 movq
REGISTER_SAVE_R9(%rsp
), %r9
141 movq
REGISTER_SAVE_R8(%rsp
), %r8
142 movq
REGISTER_SAVE_RDI(%rsp
), %rdi
143 movq
REGISTER_SAVE_RSI(%rsp
), %rsi
144 movq
REGISTER_SAVE_RDX(%rsp
), %rdx
145 movq
REGISTER_SAVE_RCX(%rsp
), %rcx
146 movq
REGISTER_SAVE_RAX(%rsp
), %rax
147 # if DL_RUNTIME_RESOLVE_REALIGN_STACK
149 cfi_def_cfa_register(%rsp
)
153 # Adjust stack(PLT did 2 pushes)
154 add $
(LOCAL_STORAGE_AREA
+ 16), %RSP_LP
155 cfi_adjust_cfa_offset(-(LOCAL_STORAGE_AREA
+ 16))
156 jmp
*%r11
# Jump to function address.
158 .size _dl_runtime_resolve
, .-_dl_runtime_resolve
162 #if !defined PROF && defined _dl_runtime_profile
163 # if (LR_VECTOR_OFFSET % VEC_SIZE) != 0
164 # error LR_VECTOR_OFFSET must be multples of VEC_SIZE
167 .globl _dl_runtime_profile
168 .hidden _dl_runtime_profile
169 .type _dl_runtime_profile
, @function
173 cfi_adjust_cfa_offset(16) # Incorporate PLT
175 /* The La_x86_64_regs data structure pointed to by the
176 fourth paramater must be VEC_SIZE-byte aligned. This must
177 be explicitly enforced. We have the set up a dynamically
178 sized stack frame. %rbx points to the top half which
179 has a fixed size and preserves the original stack pointer. */
181 sub $
32, %RSP_LP
# Allocate the local storage.
182 cfi_adjust_cfa_offset(32)
184 cfi_rel_offset(%rbx
, 0)
187 56(%rbx) parameter #1
188 48(%rbx) return address
193 24(%rbx) La_x86_64_regs pointer
201 cfi_def_cfa_register(%rbx
)
203 /* Actively align the La_x86_64_regs structure. */
204 and $
-VEC_SIZE
, %RSP_LP
205 /* sizeof(La_x86_64_regs). Need extra space for 8 SSE registers
206 to detect if any xmm0-xmm7 registers are changed by audit
208 sub $
(LR_SIZE
+ XMM_SIZE
*8), %RSP_LP
211 /* Fill the La_x86_64_regs structure. */
212 movq
%rdx
, LR_RDX_OFFSET(%rsp
)
213 movq
%r8
, LR_R8_OFFSET(%rsp
)
214 movq
%r9
, LR_R9_OFFSET(%rsp
)
215 movq
%rcx
, LR_RCX_OFFSET(%rsp
)
216 movq
%rsi
, LR_RSI_OFFSET(%rsp
)
217 movq
%rdi
, LR_RDI_OFFSET(%rsp
)
218 movq
%rbp
, LR_RBP_OFFSET(%rsp
)
220 lea
48(%rbx
), %RAX_LP
221 movq
%rax
, LR_RSP_OFFSET(%rsp
)
223 /* We always store the XMM registers even if AVX is available.
224 This is to provide backward binary compatibility for existing
226 VMOVA
%xmm0
, (LR_XMM_OFFSET
+ XMM_SIZE
*0)(%rsp
)
227 VMOVA
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
*1)(%rsp
)
228 VMOVA
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
229 VMOVA
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
230 VMOVA
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
231 VMOVA
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
232 VMOVA
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
233 VMOVA
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
236 /* This is to support AVX audit modules. */
237 VMOVA
%VEC(0), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*0)(%rsp
)
238 VMOVA
%VEC(1), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*1)(%rsp
)
239 VMOVA
%VEC(2), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
240 VMOVA
%VEC(3), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
241 VMOVA
%VEC(4), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
242 VMOVA
%VEC(5), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
243 VMOVA
%VEC(6), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
244 VMOVA
%VEC(7), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
246 /* Save xmm0-xmm7 registers to detect if any of them are
247 changed by audit module. */
248 vmovdqa
%xmm0
, (LR_SIZE
+ XMM_SIZE
*0)(%rsp
)
249 vmovdqa
%xmm1
, (LR_SIZE
+ XMM_SIZE
*1)(%rsp
)
250 vmovdqa
%xmm2
, (LR_SIZE
+ XMM_SIZE
*2)(%rsp
)
251 vmovdqa
%xmm3
, (LR_SIZE
+ XMM_SIZE
*3)(%rsp
)
252 vmovdqa
%xmm4
, (LR_SIZE
+ XMM_SIZE
*4)(%rsp
)
253 vmovdqa
%xmm5
, (LR_SIZE
+ XMM_SIZE
*5)(%rsp
)
254 vmovdqa
%xmm6
, (LR_SIZE
+ XMM_SIZE
*6)(%rsp
)
255 vmovdqa
%xmm7
, (LR_SIZE
+ XMM_SIZE
*7)(%rsp
)
258 mov
%RSP_LP
, %RCX_LP
# La_x86_64_regs pointer to %rcx.
259 mov
48(%rbx
), %RDX_LP
# Load return address if needed.
260 mov
40(%rbx
), %RSI_LP
# Copy args pushed by PLT in register.
261 mov
32(%rbx
), %RDI_LP
# %rdi: link_map, %rsi: reloc_index
262 lea
16(%rbx
), %R8_LP
# Address of framesize
263 call _dl_profile_fixup
# Call resolver.
265 mov
%RAX_LP
, %R11_LP
# Save return value.
267 movq
8(%rbx
), %rax
# Get back register content.
268 movq
LR_RDX_OFFSET(%rsp
), %rdx
269 movq
LR_R8_OFFSET(%rsp
), %r8
270 movq
LR_R9_OFFSET(%rsp
), %r9
272 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*0)(%rsp
), %xmm0
273 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*1)(%rsp
), %xmm1
274 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
), %xmm2
275 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
), %xmm3
276 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
), %xmm4
277 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
), %xmm5
278 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
), %xmm6
279 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
), %xmm7
282 /* Check if any xmm0-xmm7 registers are changed by audit
284 vpcmpeqb (LR_SIZE
)(%rsp
), %xmm0
, %xmm8
285 vpmovmskb
%xmm8
, %esi
288 vmovdqa
%xmm0
, (LR_VECTOR_OFFSET
)(%rsp
)
290 2: VMOVA (LR_VECTOR_OFFSET
)(%rsp
), %VEC(0)
291 vmovdqa
%xmm0
, (LR_XMM_OFFSET
)(%rsp
)
293 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm8
294 vpmovmskb
%xmm8
, %esi
297 vmovdqa
%xmm1
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
299 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
), %VEC(1)
300 vmovdqa
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
)
302 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*2)(%rsp
), %xmm2
, %xmm8
303 vpmovmskb
%xmm8
, %esi
306 vmovdqa
%xmm2
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
308 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
), %VEC(2)
309 vmovdqa
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
311 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*3)(%rsp
), %xmm3
, %xmm8
312 vpmovmskb
%xmm8
, %esi
315 vmovdqa
%xmm3
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
317 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
), %VEC(3)
318 vmovdqa
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
320 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*4)(%rsp
), %xmm4
, %xmm8
321 vpmovmskb
%xmm8
, %esi
324 vmovdqa
%xmm4
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
326 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
), %VEC(4)
327 vmovdqa
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
329 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*5)(%rsp
), %xmm5
, %xmm8
330 vpmovmskb
%xmm8
, %esi
333 vmovdqa
%xmm5
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
335 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
), %VEC(5)
336 vmovdqa
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
338 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*6)(%rsp
), %xmm6
, %xmm8
339 vpmovmskb
%xmm8
, %esi
342 vmovdqa
%xmm6
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
344 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
), %VEC(6)
345 vmovdqa
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
347 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*7)(%rsp
), %xmm7
, %xmm8
348 vpmovmskb
%xmm8
, %esi
351 vmovdqa
%xmm7
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
353 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
), %VEC(7)
354 vmovdqa
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
359 mov
16(%rbx
), %RCX_LP
# Anything in framesize?
360 test
%RCX_LP
, %RCX_LP
363 /* There's nothing in the frame size, so there
364 will be no call to the _dl_audit_pltexit. */
366 /* Get back registers content. */
367 movq
LR_RCX_OFFSET(%rsp
), %rcx
368 movq
LR_RSI_OFFSET(%rsp
), %rsi
369 movq
LR_RDI_OFFSET(%rsp
), %rdi
374 cfi_def_cfa_register(%rsp
)
376 add $
48, %RSP_LP
# Adjust the stack to the return value
377 # (eats the reloc index and link_map)
378 cfi_adjust_cfa_offset(-48)
379 jmp
*%r11
# Jump to function address.
382 cfi_adjust_cfa_offset(48)
383 cfi_rel_offset(%rbx
, 0)
384 cfi_def_cfa_register(%rbx
)
386 /* At this point we need to prepare new stack for the function
387 which has to be called. We copy the original stack to a
388 temporary buffer of the size specified by the 'framesize'
389 returned from _dl_profile_fixup */
391 lea
LR_RSP_OFFSET(%rbx
), %RSI_LP
# stack
398 movq
24(%rdi
), %rcx
# Get back register content.
404 mov
24(%rbx
), %RSP_LP
# Drop the copied stack content
406 /* Now we have to prepare the La_x86_64_retval structure for the
407 _dl_audit_pltexit. The La_x86_64_regs is being pointed by rsp now,
408 so we just need to allocate the sizeof(La_x86_64_retval) space on
409 the stack, since the alignment has already been taken care of. */
411 /* sizeof(La_x86_64_retval). Need extra space for 2 SSE
412 registers to detect if xmm0/xmm1 registers are changed
413 by audit module. Since rsp is aligned to VEC_SIZE, we
414 need to make sure that the address of La_x86_64_retval +
415 LRV_VECTOR0_OFFSET is aligned to VEC_SIZE. */
416 # define LRV_SPACE (LRV_SIZE + XMM_SIZE*2)
417 # define LRV_MISALIGNED ((LRV_SIZE + LRV_VECTOR0_OFFSET) & (VEC_SIZE - 1))
418 # if LRV_MISALIGNED == 0
419 sub $LRV_SPACE
, %RSP_LP
421 sub $
(LRV_SPACE
+ VEC_SIZE
- LRV_MISALIGNED
), %RSP_LP
424 sub $LRV_SIZE
, %RSP_LP
# sizeof(La_x86_64_retval)
426 mov
%RSP_LP
, %RCX_LP
# La_x86_64_retval argument to %rcx.
428 /* Fill in the La_x86_64_retval structure. */
429 movq
%rax
, LRV_RAX_OFFSET(%rcx
)
430 movq
%rdx
, LRV_RDX_OFFSET(%rcx
)
432 VMOVA
%xmm0
, LRV_XMM0_OFFSET(%rcx
)
433 VMOVA
%xmm1
, LRV_XMM1_OFFSET(%rcx
)
436 /* This is to support AVX audit modules. */
437 VMOVA
%VEC(0), LRV_VECTOR0_OFFSET(%rcx
)
438 VMOVA
%VEC(1), LRV_VECTOR1_OFFSET(%rcx
)
440 /* Save xmm0/xmm1 registers to detect if they are changed
442 vmovdqa
%xmm0
, (LRV_SIZE
+ XMM_SIZE
*0)(%rcx
)
443 vmovdqa
%xmm1
, (LRV_SIZE
+ XMM_SIZE
*1)(%rcx
)
446 fstpt
LRV_ST0_OFFSET(%rcx
)
447 fstpt
LRV_ST1_OFFSET(%rcx
)
449 movq
24(%rbx
), %rdx
# La_x86_64_regs argument to %rdx.
450 movq
40(%rbx
), %rsi
# Copy args pushed by PLT in register.
451 movq
32(%rbx
), %rdi
# %rdi: link_map, %rsi: reloc_index
452 call _dl_audit_pltexit
454 /* Restore return registers. */
455 movq
LRV_RAX_OFFSET(%rsp
), %rax
456 movq
LRV_RDX_OFFSET(%rsp
), %rdx
458 VMOVA
LRV_XMM0_OFFSET(%rsp
), %xmm0
459 VMOVA
LRV_XMM1_OFFSET(%rsp
), %xmm1
462 /* Check if xmm0/xmm1 registers are changed by audit module. */
463 vpcmpeqb (LRV_SIZE
)(%rsp
), %xmm0
, %xmm2
464 vpmovmskb
%xmm2
, %esi
467 VMOVA
LRV_VECTOR0_OFFSET(%rsp
), %VEC(0)
469 1: vpcmpeqb (LRV_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm2
470 vpmovmskb
%xmm2
, %esi
473 VMOVA
LRV_VECTOR1_OFFSET(%rsp
), %VEC(1)
478 fldt
LRV_ST1_OFFSET(%rsp
)
479 fldt
LRV_ST0_OFFSET(%rsp
)
484 cfi_def_cfa_register(%rsp
)
486 add $
48, %RSP_LP
# Adjust the stack to the return value
487 # (eats the reloc index and link_map)
488 cfi_adjust_cfa_offset(-48)
492 .size _dl_runtime_profile
, .-_dl_runtime_profile