1 /* PLT trampolines. x86-64 version.
2 Copyright (C) 2009-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
23 .section
SECTION(.text
),"ax",@progbits
24 #ifdef _dl_runtime_resolve
26 # undef REGISTER_SAVE_AREA
27 # undef LOCAL_STORAGE_AREA
30 # include "dl-trampoline-state.h"
32 .globl _dl_runtime_resolve
33 .hidden _dl_runtime_resolve
34 .type _dl_runtime_resolve
, @function
38 cfi_adjust_cfa_offset(16) # Incorporate PLT
40 # if DL_RUNTIME_RESOLVE_REALIGN_STACK
41 # if LOCAL_STORAGE_AREA != 8
42 # error LOCAL_STORAGE_AREA must be 8
44 pushq
%rbx
# push subtracts stack by 8.
45 cfi_adjust_cfa_offset(8)
46 cfi_rel_offset(%rbx
, 0)
48 cfi_def_cfa_register(%rbx
)
49 and $
-STATE_SAVE_ALIGNMENT
, %RSP_LP
51 # ifdef REGISTER_SAVE_AREA
52 sub $REGISTER_SAVE_AREA
, %RSP_LP
53 # if !DL_RUNTIME_RESOLVE_REALIGN_STACK
54 cfi_adjust_cfa_offset(REGISTER_SAVE_AREA
)
57 # Allocate stack space of the required size to save the state.
59 sub _rtld_local_ro
+RTLD_GLOBAL_RO_DL_X86_CPU_FEATURES_OFFSET
+XSAVE_STATE_SIZE_OFFSET(%rip
), %RSP_LP
61 sub _dl_x86_cpu_features
+XSAVE_STATE_SIZE_OFFSET(%rip
), %RSP_LP
64 # Preserve registers otherwise clobbered.
65 movq
%rax
, REGISTER_SAVE_RAX(%rsp
)
66 movq
%rcx
, REGISTER_SAVE_RCX(%rsp
)
67 movq
%rdx
, REGISTER_SAVE_RDX(%rsp
)
68 movq
%rsi
, REGISTER_SAVE_RSI(%rsp
)
69 movq
%rdi
, REGISTER_SAVE_RDI(%rsp
)
70 movq
%r8
, REGISTER_SAVE_R8(%rsp
)
71 movq
%r9
, REGISTER_SAVE_R9(%rsp
)
73 fxsave
STATE_SAVE_OFFSET(%rsp
)
75 movl $STATE_SAVE_MASK
, %eax
77 # Clear the XSAVE Header.
79 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512)(%rsp
)
80 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8)(%rsp
)
82 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 2)(%rsp
)
83 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 3)(%rsp
)
84 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 4)(%rsp
)
85 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 5)(%rsp
)
86 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 6)(%rsp
)
87 movq
%rdx
, (STATE_SAVE_OFFSET
+ 512 + 8 * 7)(%rsp
)
89 xsave
STATE_SAVE_OFFSET(%rsp
)
91 xsavec
STATE_SAVE_OFFSET(%rsp
)
94 # Copy args pushed by PLT in register.
95 # %rdi: link_map, %rsi: reloc_index
96 mov (LOCAL_STORAGE_AREA
+ 8)(%BASE
), %RSI_LP
97 mov
LOCAL_STORAGE_AREA(%BASE
), %RDI_LP
98 call _dl_fixup
# Call resolver.
99 mov
%RAX_LP
, %R11_LP
# Save return value
100 # Get register content back.
102 fxrstor
STATE_SAVE_OFFSET(%rsp
)
104 movl $STATE_SAVE_MASK
, %eax
106 xrstor
STATE_SAVE_OFFSET(%rsp
)
108 movq
REGISTER_SAVE_R9(%rsp
), %r9
109 movq
REGISTER_SAVE_R8(%rsp
), %r8
110 movq
REGISTER_SAVE_RDI(%rsp
), %rdi
111 movq
REGISTER_SAVE_RSI(%rsp
), %rsi
112 movq
REGISTER_SAVE_RDX(%rsp
), %rdx
113 movq
REGISTER_SAVE_RCX(%rsp
), %rcx
114 movq
REGISTER_SAVE_RAX(%rsp
), %rax
115 # if DL_RUNTIME_RESOLVE_REALIGN_STACK
117 cfi_def_cfa_register(%rsp
)
121 # Adjust stack(PLT did 2 pushes)
122 add $
(LOCAL_STORAGE_AREA
+ 16), %RSP_LP
123 cfi_adjust_cfa_offset(-(LOCAL_STORAGE_AREA
+ 16))
124 jmp
*%r11
# Jump to function address.
126 .size _dl_runtime_resolve
, .-_dl_runtime_resolve
130 #if !defined PROF && defined _dl_runtime_profile
131 # if (LR_VECTOR_OFFSET % VEC_SIZE) != 0
132 # error LR_VECTOR_OFFSET must be multiple of VEC_SIZE
135 .globl _dl_runtime_profile
136 .hidden _dl_runtime_profile
137 .type _dl_runtime_profile
, @function
141 cfi_adjust_cfa_offset(16) # Incorporate PLT
143 /* The La_x86_64_regs data structure pointed to by the
144 fourth parameter must be VEC_SIZE-byte aligned. This must
145 be explicitly enforced. We have the set up a dynamically
146 sized stack frame. %rbx points to the top half which
147 has a fixed size and preserves the original stack pointer. */
149 sub $
32, %RSP_LP
# Allocate the local storage.
150 cfi_adjust_cfa_offset(32)
152 cfi_rel_offset(%rbx
, 0)
155 56(%rbx) parameter #1
156 48(%rbx) return address
161 24(%rbx) La_x86_64_regs pointer
169 cfi_def_cfa_register(%rbx
)
171 /* Actively align the La_x86_64_regs structure. */
172 and $
-VEC_SIZE
, %RSP_LP
173 /* sizeof(La_x86_64_regs). Need extra space for 8 SSE registers
174 to detect if any xmm0-xmm7 registers are changed by audit
176 sub $
(LR_SIZE
+ XMM_SIZE
*8), %RSP_LP
179 /* Fill the La_x86_64_regs structure. */
180 movq
%rdx
, LR_RDX_OFFSET(%rsp
)
181 movq
%r8
, LR_R8_OFFSET(%rsp
)
182 movq
%r9
, LR_R9_OFFSET(%rsp
)
183 movq
%rcx
, LR_RCX_OFFSET(%rsp
)
184 movq
%rsi
, LR_RSI_OFFSET(%rsp
)
185 movq
%rdi
, LR_RDI_OFFSET(%rsp
)
186 movq
%rbp
, LR_RBP_OFFSET(%rsp
)
188 lea
48(%rbx
), %RAX_LP
189 movq
%rax
, LR_RSP_OFFSET(%rsp
)
191 /* We always store the XMM registers even if AVX is available.
192 This is to provide backward binary compatibility for existing
194 VMOVA
%xmm0
, (LR_XMM_OFFSET
+ XMM_SIZE
*0)(%rsp
)
195 VMOVA
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
*1)(%rsp
)
196 VMOVA
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
197 VMOVA
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
198 VMOVA
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
199 VMOVA
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
200 VMOVA
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
201 VMOVA
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
204 /* This is to support AVX audit modules. */
205 VMOVA
%VEC(0), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*0)(%rsp
)
206 VMOVA
%VEC(1), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*1)(%rsp
)
207 VMOVA
%VEC(2), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
208 VMOVA
%VEC(3), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
209 VMOVA
%VEC(4), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
210 VMOVA
%VEC(5), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
211 VMOVA
%VEC(6), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
212 VMOVA
%VEC(7), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
214 /* Save xmm0-xmm7 registers to detect if any of them are
215 changed by audit module. */
216 vmovdqa
%xmm0
, (LR_SIZE
+ XMM_SIZE
*0)(%rsp
)
217 vmovdqa
%xmm1
, (LR_SIZE
+ XMM_SIZE
*1)(%rsp
)
218 vmovdqa
%xmm2
, (LR_SIZE
+ XMM_SIZE
*2)(%rsp
)
219 vmovdqa
%xmm3
, (LR_SIZE
+ XMM_SIZE
*3)(%rsp
)
220 vmovdqa
%xmm4
, (LR_SIZE
+ XMM_SIZE
*4)(%rsp
)
221 vmovdqa
%xmm5
, (LR_SIZE
+ XMM_SIZE
*5)(%rsp
)
222 vmovdqa
%xmm6
, (LR_SIZE
+ XMM_SIZE
*6)(%rsp
)
223 vmovdqa
%xmm7
, (LR_SIZE
+ XMM_SIZE
*7)(%rsp
)
226 mov
%RSP_LP
, %RCX_LP
# La_x86_64_regs pointer to %rcx.
227 mov
48(%rbx
), %RDX_LP
# Load return address if needed.
228 mov
40(%rbx
), %RSI_LP
# Copy args pushed by PLT in register.
229 mov
32(%rbx
), %RDI_LP
# %rdi: link_map, %rsi: reloc_index
230 lea
16(%rbx
), %R8_LP
# Address of framesize
231 call _dl_profile_fixup
# Call resolver.
233 mov
%RAX_LP
, %R11_LP
# Save return value.
235 movq
8(%rbx
), %rax
# Get back register content.
236 movq
LR_RDX_OFFSET(%rsp
), %rdx
237 movq
LR_R8_OFFSET(%rsp
), %r8
238 movq
LR_R9_OFFSET(%rsp
), %r9
240 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*0)(%rsp
), %xmm0
241 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*1)(%rsp
), %xmm1
242 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
), %xmm2
243 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
), %xmm3
244 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
), %xmm4
245 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
), %xmm5
246 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
), %xmm6
247 VMOVA (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
), %xmm7
250 /* Check if any xmm0-xmm7 registers are changed by audit
252 vpcmpeqb (LR_SIZE
)(%rsp
), %xmm0
, %xmm8
253 vpmovmskb
%xmm8
, %esi
256 vmovdqa
%xmm0
, (LR_VECTOR_OFFSET
)(%rsp
)
258 2: VMOVA (LR_VECTOR_OFFSET
)(%rsp
), %VEC(0)
259 vmovdqa
%xmm0
, (LR_XMM_OFFSET
)(%rsp
)
261 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm8
262 vpmovmskb
%xmm8
, %esi
265 vmovdqa
%xmm1
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
267 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
), %VEC(1)
268 vmovdqa
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
)
270 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*2)(%rsp
), %xmm2
, %xmm8
271 vpmovmskb
%xmm8
, %esi
274 vmovdqa
%xmm2
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
276 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
), %VEC(2)
277 vmovdqa
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
279 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*3)(%rsp
), %xmm3
, %xmm8
280 vpmovmskb
%xmm8
, %esi
283 vmovdqa
%xmm3
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
285 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
), %VEC(3)
286 vmovdqa
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
288 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*4)(%rsp
), %xmm4
, %xmm8
289 vpmovmskb
%xmm8
, %esi
292 vmovdqa
%xmm4
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
294 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
), %VEC(4)
295 vmovdqa
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
297 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*5)(%rsp
), %xmm5
, %xmm8
298 vpmovmskb
%xmm8
, %esi
301 vmovdqa
%xmm5
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
303 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
), %VEC(5)
304 vmovdqa
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
306 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*6)(%rsp
), %xmm6
, %xmm8
307 vpmovmskb
%xmm8
, %esi
310 vmovdqa
%xmm6
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
312 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
), %VEC(6)
313 vmovdqa
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
315 1: vpcmpeqb (LR_SIZE
+ XMM_SIZE
*7)(%rsp
), %xmm7
, %xmm8
316 vpmovmskb
%xmm8
, %esi
319 vmovdqa
%xmm7
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
321 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
), %VEC(7)
322 vmovdqa
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
327 mov
16(%rbx
), %RCX_LP
# Anything in framesize?
328 test
%RCX_LP
, %RCX_LP
331 /* There's nothing in the frame size, so there
332 will be no call to the _dl_audit_pltexit. */
334 /* Get back registers content. */
335 movq
LR_RCX_OFFSET(%rsp
), %rcx
336 movq
LR_RSI_OFFSET(%rsp
), %rsi
337 movq
LR_RDI_OFFSET(%rsp
), %rdi
342 cfi_def_cfa_register(%rsp
)
344 add $
48, %RSP_LP
# Adjust the stack to the return value
345 # (eats the reloc index and link_map)
346 cfi_adjust_cfa_offset(-48)
347 jmp
*%r11
# Jump to function address.
350 cfi_adjust_cfa_offset(48)
351 cfi_rel_offset(%rbx
, 0)
352 cfi_def_cfa_register(%rbx
)
354 /* At this point we need to prepare new stack for the function
355 which has to be called. We copy the original stack to a
356 temporary buffer of the size specified by the 'framesize'
357 returned from _dl_profile_fixup */
359 lea
LR_RSP_OFFSET(%rbx
), %RSI_LP
# stack
366 movq
24(%rdi
), %rcx
# Get back register content.
372 mov
24(%rbx
), %RSP_LP
# Drop the copied stack content
374 /* Now we have to prepare the La_x86_64_retval structure for the
375 _dl_audit_pltexit. The La_x86_64_regs is being pointed by rsp now,
376 so we just need to allocate the sizeof(La_x86_64_retval) space on
377 the stack, since the alignment has already been taken care of. */
379 /* sizeof(La_x86_64_retval). Need extra space for 2 SSE
380 registers to detect if xmm0/xmm1 registers are changed
381 by audit module. Since rsp is aligned to VEC_SIZE, we
382 need to make sure that the address of La_x86_64_retval +
383 LRV_VECTOR0_OFFSET is aligned to VEC_SIZE. */
384 # define LRV_SPACE (LRV_SIZE + XMM_SIZE*2)
385 # define LRV_MISALIGNED ((LRV_SIZE + LRV_VECTOR0_OFFSET) & (VEC_SIZE - 1))
386 # if LRV_MISALIGNED == 0
387 sub $LRV_SPACE
, %RSP_LP
389 sub $
(LRV_SPACE
+ VEC_SIZE
- LRV_MISALIGNED
), %RSP_LP
392 sub $LRV_SIZE
, %RSP_LP
# sizeof(La_x86_64_retval)
394 mov
%RSP_LP
, %RCX_LP
# La_x86_64_retval argument to %rcx.
396 /* Fill in the La_x86_64_retval structure. */
397 movq
%rax
, LRV_RAX_OFFSET(%rcx
)
398 movq
%rdx
, LRV_RDX_OFFSET(%rcx
)
400 VMOVA
%xmm0
, LRV_XMM0_OFFSET(%rcx
)
401 VMOVA
%xmm1
, LRV_XMM1_OFFSET(%rcx
)
404 /* This is to support AVX audit modules. */
405 VMOVA
%VEC(0), LRV_VECTOR0_OFFSET(%rcx
)
406 VMOVA
%VEC(1), LRV_VECTOR1_OFFSET(%rcx
)
408 /* Save xmm0/xmm1 registers to detect if they are changed
410 vmovdqa
%xmm0
, (LRV_SIZE
+ XMM_SIZE
*0)(%rcx
)
411 vmovdqa
%xmm1
, (LRV_SIZE
+ XMM_SIZE
*1)(%rcx
)
414 fstpt
LRV_ST0_OFFSET(%rcx
)
415 fstpt
LRV_ST1_OFFSET(%rcx
)
417 movq
24(%rbx
), %rdx
# La_x86_64_regs argument to %rdx.
418 movq
40(%rbx
), %rsi
# Copy args pushed by PLT in register.
419 movq
32(%rbx
), %rdi
# %rdi: link_map, %rsi: reloc_index
420 call _dl_audit_pltexit
422 /* Restore return registers. */
423 movq
LRV_RAX_OFFSET(%rsp
), %rax
424 movq
LRV_RDX_OFFSET(%rsp
), %rdx
426 VMOVA
LRV_XMM0_OFFSET(%rsp
), %xmm0
427 VMOVA
LRV_XMM1_OFFSET(%rsp
), %xmm1
430 /* Check if xmm0/xmm1 registers are changed by audit module. */
431 vpcmpeqb (LRV_SIZE
)(%rsp
), %xmm0
, %xmm2
432 vpmovmskb
%xmm2
, %esi
435 VMOVA
LRV_VECTOR0_OFFSET(%rsp
), %VEC(0)
437 1: vpcmpeqb (LRV_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm2
438 vpmovmskb
%xmm2
, %esi
441 VMOVA
LRV_VECTOR1_OFFSET(%rsp
), %VEC(1)
446 fldt
LRV_ST1_OFFSET(%rsp
)
447 fldt
LRV_ST0_OFFSET(%rsp
)
452 cfi_def_cfa_register(%rsp
)
454 add $
48, %RSP_LP
# Adjust the stack to the return value
455 # (eats the reloc index and link_map)
456 cfi_adjust_cfa_offset(-48)
460 .size _dl_runtime_profile
, .-_dl_runtime_profile