1 /* PLT trampolines. x86-64 version.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #undef REGISTER_SAVE_AREA_RAW
21 /* X32 saves RCX, RDX, RSI, RDI, R8 and R9 plus RAX as well as VEC0 to
23 # define REGISTER_SAVE_AREA_RAW (8 * 7 + VEC_SIZE * 8)
25 /* X86-64 saves RCX, RDX, RSI, RDI, R8 and R9 plus RAX as well as
26 BND0, BND1, BND2, BND3 and VEC0 to VEC7. */
27 # define REGISTER_SAVE_AREA_RAW (8 * 7 + 16 * 4 + VEC_SIZE * 8)
30 #undef REGISTER_SAVE_AREA
31 #undef LOCAL_STORAGE_AREA
33 #if DL_RUNIME_RESOLVE_REALIGN_STACK
34 # define REGISTER_SAVE_AREA (REGISTER_SAVE_AREA_RAW + 8)
35 /* Local stack area before jumping to function address: RBX. */
36 # define LOCAL_STORAGE_AREA 8
38 # if (REGISTER_SAVE_AREA % VEC_SIZE) != 0
39 # error REGISTER_SAVE_AREA must be multples of VEC_SIZE
42 # define REGISTER_SAVE_AREA REGISTER_SAVE_AREA_RAW
43 /* Local stack area before jumping to function address: All saved
45 # define LOCAL_STORAGE_AREA REGISTER_SAVE_AREA
47 # if (REGISTER_SAVE_AREA % 16) != 8
48 # error REGISTER_SAVE_AREA must be odd multples of 8
53 .globl _dl_runtime_resolve
54 .hidden _dl_runtime_resolve
55 .type _dl_runtime_resolve
, @function
59 cfi_adjust_cfa_offset(16) # Incorporate PLT
60 #if DL_RUNIME_RESOLVE_REALIGN_STACK
61 # if LOCAL_STORAGE_AREA != 8
62 # error LOCAL_STORAGE_AREA must be 8
64 pushq
%rbx
# push subtracts stack by 8.
65 cfi_adjust_cfa_offset(8)
66 cfi_rel_offset(%rbx
, 0)
68 cfi_def_cfa_register(%rbx
)
69 and $
-VEC_SIZE
, %RSP_LP
71 sub $REGISTER_SAVE_AREA
, %RSP_LP
72 cfi_adjust_cfa_offset(REGISTER_SAVE_AREA
)
73 # Preserve registers otherwise clobbered.
74 movq
%rax
, REGISTER_SAVE_RAX(%rsp
)
75 movq
%rcx
, REGISTER_SAVE_RCX(%rsp
)
76 movq
%rdx
, REGISTER_SAVE_RDX(%rsp
)
77 movq
%rsi
, REGISTER_SAVE_RSI(%rsp
)
78 movq
%rdi
, REGISTER_SAVE_RDI(%rsp
)
79 movq
%r8
, REGISTER_SAVE_R8(%rsp
)
80 movq
%r9
, REGISTER_SAVE_R9(%rsp
)
81 VMOV
%VEC(0), (REGISTER_SAVE_VEC_OFF
)(%rsp
)
82 VMOV
%VEC(1), (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
)(%rsp
)
83 VMOV
%VEC(2), (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 2)(%rsp
)
84 VMOV
%VEC(3), (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 3)(%rsp
)
85 VMOV
%VEC(4), (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 4)(%rsp
)
86 VMOV
%VEC(5), (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 5)(%rsp
)
87 VMOV
%VEC(6), (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 6)(%rsp
)
88 VMOV
%VEC(7), (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 7)(%rsp
)
90 # We also have to preserve bound registers. These are nops if
91 # Intel MPX isn't available or disabled.
92 # ifdef HAVE_MPX_SUPPORT
93 bndmov %bnd0, REGISTER_SAVE_BND0(%rsp)
94 bndmov %bnd1, REGISTER_SAVE_BND1(%rsp)
95 bndmov %bnd2, REGISTER_SAVE_BND2(%rsp)
96 bndmov %bnd3, REGISTER_SAVE_BND3(%rsp)
98 # if REGISTER_SAVE_BND0 == 0
99 .byte 0x66,0x0f,0x1b,0x04,0x24
101 .byte 0x66,0x0f,0x1b,0x44,0x24,REGISTER_SAVE_BND0
103 .byte 0x66,0x0f,0x1b,0x4c,0x24,REGISTER_SAVE_BND1
104 .byte 0x66,0x0f,0x1b,0x54,0x24,REGISTER_SAVE_BND2
105 .byte 0x66,0x0f,0x1b,0x5c,0x24,REGISTER_SAVE_BND3
108 # Copy args pushed by PLT in register.
109 # %rdi: link_map, %rsi: reloc_index
110 mov (LOCAL_STORAGE_AREA + 8)(%BASE), %RSI_LP
111 mov LOCAL_STORAGE_AREA(%BASE), %RDI_LP
112 call _dl_fixup # Call resolver.
113 mov %RAX_LP, %R11_LP # Save return value
115 # Restore bound registers. These are nops if Intel MPX isn't
116 # avaiable or disabled.
117 # ifdef HAVE_MPX_SUPPORT
118 bndmov
REGISTER_SAVE_BND3(%rsp
), %bnd3
119 bndmov
REGISTER_SAVE_BND2(%rsp
), %bnd2
120 bndmov
REGISTER_SAVE_BND1(%rsp
), %bnd1
121 bndmov
REGISTER_SAVE_BND0(%rsp
), %bnd0
123 .byte
0x66,0x0f,0x1a,0x5c,0x24,REGISTER_SAVE_BND3
124 .byte
0x66,0x0f,0x1a,0x54,0x24,REGISTER_SAVE_BND2
125 .byte
0x66,0x0f,0x1a,0x4c,0x24,REGISTER_SAVE_BND1
126 # if REGISTER_SAVE_BND0 == 0
127 .byte
0x66,0x0f,0x1a,0x04,0x24
129 .byte
0x66,0x0f,0x1a,0x44,0x24,REGISTER_SAVE_BND0
133 # Get register content back.
134 movq
REGISTER_SAVE_R9(%rsp
), %r9
135 movq
REGISTER_SAVE_R8(%rsp
), %r8
136 movq
REGISTER_SAVE_RDI(%rsp
), %rdi
137 movq
REGISTER_SAVE_RSI(%rsp
), %rsi
138 movq
REGISTER_SAVE_RDX(%rsp
), %rdx
139 movq
REGISTER_SAVE_RCX(%rsp
), %rcx
140 movq
REGISTER_SAVE_RAX(%rsp
), %rax
141 VMOV (REGISTER_SAVE_VEC_OFF
)(%rsp
), %VEC(0)
142 VMOV (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
)(%rsp
), %VEC(1)
143 VMOV (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 2)(%rsp
), %VEC(2)
144 VMOV (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 3)(%rsp
), %VEC(3)
145 VMOV (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 4)(%rsp
), %VEC(4)
146 VMOV (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 5)(%rsp
), %VEC(5)
147 VMOV (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 6)(%rsp
), %VEC(6)
148 VMOV (REGISTER_SAVE_VEC_OFF
+ VEC_SIZE
* 7)(%rsp
), %VEC(7)
149 #if DL_RUNIME_RESOLVE_REALIGN_STACK
151 cfi_def_cfa_register(%rsp
)
155 # Adjust stack(PLT did 2 pushes)
156 add $
(LOCAL_STORAGE_AREA
+ 16), %RSP_LP
157 cfi_adjust_cfa_offset(-(LOCAL_STORAGE_AREA
+ 16))
158 # Preserve bound registers.
159 PRESERVE_BND_REGS_PREFIX
160 jmp
*%r11
# Jump to function address.
162 .size _dl_runtime_resolve
, .-_dl_runtime_resolve
166 # if (LR_VECTOR_OFFSET % VEC_SIZE) != 0
167 # error LR_VECTOR_OFFSET must be multples of VEC_SIZE
170 .globl _dl_runtime_profile
171 .hidden _dl_runtime_profile
172 .type _dl_runtime_profile
, @function
176 cfi_adjust_cfa_offset(16) # Incorporate PLT
177 /* The La_x86_64_regs data structure pointed to by the
178 fourth paramater must be VEC_SIZE-byte aligned. This must
179 be explicitly enforced. We have the set up a dynamically
180 sized stack frame. %rbx points to the top half which
181 has a fixed size and preserves the original stack pointer. */
183 sub $
32, %RSP_LP
# Allocate the local storage.
184 cfi_adjust_cfa_offset(32)
186 cfi_rel_offset(%rbx
, 0)
189 56(%rbx) parameter #1
190 48(%rbx) return address
195 24(%rbx) La_x86_64_regs pointer
203 cfi_def_cfa_register(%rbx
)
205 /* Actively align the La_x86_64_regs structure. */
206 and $
-VEC_SIZE
, %RSP_LP
207 # if defined HAVE_AVX_SUPPORT || defined HAVE_AVX512_ASM_SUPPORT
208 /* sizeof(La_x86_64_regs). Need extra space for 8 SSE registers
209 to detect if any xmm0-xmm7 registers are changed by audit
211 sub $
(LR_SIZE
+ XMM_SIZE
*8), %RSP_LP
213 sub $LR_SIZE
, %RSP_LP
# sizeof(La_x86_64_regs)
217 /* Fill the La_x86_64_regs structure. */
218 movq
%rdx
, LR_RDX_OFFSET(%rsp
)
219 movq
%r8
, LR_R8_OFFSET(%rsp
)
220 movq
%r9
, LR_R9_OFFSET(%rsp
)
221 movq
%rcx
, LR_RCX_OFFSET(%rsp
)
222 movq
%rsi
, LR_RSI_OFFSET(%rsp
)
223 movq
%rdi
, LR_RDI_OFFSET(%rsp
)
224 movq
%rbp
, LR_RBP_OFFSET(%rsp
)
226 lea
48(%rbx
), %RAX_LP
227 movq
%rax
, LR_RSP_OFFSET(%rsp
)
229 /* We always store the XMM registers even if AVX is available.
230 This is to provide backward binary compatibility for existing
232 movaps
%xmm0
, (LR_XMM_OFFSET
)(%rsp
)
233 movaps
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
)
234 movaps
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
235 movaps
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
236 movaps
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
237 movaps
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
238 movaps
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
239 movaps
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
242 # ifdef HAVE_MPX_SUPPORT
243 bndmov
%bnd0
, (LR_BND_OFFSET
)(%rsp
) # Preserve bound
244 bndmov
%bnd1
, (LR_BND_OFFSET
+ BND_SIZE
)(%rsp
) # registers. Nops if
245 bndmov
%bnd2
, (LR_BND_OFFSET
+ BND_SIZE
*2)(%rsp
) # MPX not available
246 bndmov
%bnd3
, (LR_BND_OFFSET
+ BND_SIZE
*3)(%rsp
) # or disabled.
248 .byte
0x66,0x0f,0x1b,0x84,0x24;.long (LR_BND_OFFSET
)
249 .byte
0x66,0x0f,0x1b,0x8c,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
)
250 .byte
0x66,0x0f,0x1b,0x94,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
*2)
251 .byte
0x66,0x0f,0x1b,0x9c,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
*3)
256 /* This is to support AVX audit modules. */
257 VMOVA
%VEC(0), (LR_VECTOR_OFFSET
)(%rsp
)
258 VMOVA
%VEC(1), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
259 VMOVA
%VEC(2), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
260 VMOVA
%VEC(3), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
261 VMOVA
%VEC(4), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
262 VMOVA
%VEC(5), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
263 VMOVA
%VEC(6), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
264 VMOVA
%VEC(7), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
266 /* Save xmm0-xmm7 registers to detect if any of them are
267 changed by audit module. */
268 vmovdqa
%xmm0
, (LR_SIZE
)(%rsp
)
269 vmovdqa
%xmm1
, (LR_SIZE
+ XMM_SIZE
)(%rsp
)
270 vmovdqa
%xmm2
, (LR_SIZE
+ XMM_SIZE
*2)(%rsp
)
271 vmovdqa
%xmm3
, (LR_SIZE
+ XMM_SIZE
*3)(%rsp
)
272 vmovdqa
%xmm4
, (LR_SIZE
+ XMM_SIZE
*4)(%rsp
)
273 vmovdqa
%xmm5
, (LR_SIZE
+ XMM_SIZE
*5)(%rsp
)
274 vmovdqa
%xmm6
, (LR_SIZE
+ XMM_SIZE
*6)(%rsp
)
275 vmovdqa
%xmm7
, (LR_SIZE
+ XMM_SIZE
*7)(%rsp
)
278 mov
%RSP_LP
, %RCX_LP
# La_x86_64_regs pointer to %rcx.
279 mov
48(%rbx
), %RDX_LP
# Load return address if needed.
280 mov
40(%rbx
), %RSI_LP
# Copy args pushed by PLT in register.
281 mov
32(%rbx
), %RDI_LP
# %rdi: link_map, %rsi: reloc_index
282 lea
16(%rbx
), %R8_LP
# Address of framesize
283 call _dl_profile_fixup
# Call resolver.
285 mov
%RAX_LP
, %R11_LP
# Save return value.
287 movq
8(%rbx
), %rax
# Get back register content.
288 movq
LR_RDX_OFFSET(%rsp
), %rdx
289 movq
LR_R8_OFFSET(%rsp
), %r8
290 movq
LR_R9_OFFSET(%rsp
), %r9
292 movaps (LR_XMM_OFFSET
)(%rsp
), %xmm0
293 movaps (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
), %xmm1
294 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
), %xmm2
295 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
), %xmm3
296 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
), %xmm4
297 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
), %xmm5
298 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
), %xmm6
299 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
), %xmm7
302 /* Check if any xmm0-xmm7 registers are changed by audit
304 vpcmpeqq (LR_SIZE
)(%rsp
), %xmm0
, %xmm8
305 vpmovmskb
%xmm8
, %esi
308 vmovdqa
%xmm0
, (LR_VECTOR_OFFSET
)(%rsp
)
310 2: VMOVA (LR_VECTOR_OFFSET
)(%rsp
), %VEC(0)
311 vmovdqa
%xmm0
, (LR_XMM_OFFSET
)(%rsp
)
313 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm8
314 vpmovmskb
%xmm8
, %esi
317 vmovdqa
%xmm1
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
319 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
), %VEC(1)
320 vmovdqa
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
)
322 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*2)(%rsp
), %xmm2
, %xmm8
323 vpmovmskb
%xmm8
, %esi
326 vmovdqa
%xmm2
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
328 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
), %VEC(2)
329 vmovdqa
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
331 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*3)(%rsp
), %xmm3
, %xmm8
332 vpmovmskb
%xmm8
, %esi
335 vmovdqa
%xmm3
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
337 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
), %VEC(3)
338 vmovdqa
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
340 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*4)(%rsp
), %xmm4
, %xmm8
341 vpmovmskb
%xmm8
, %esi
344 vmovdqa
%xmm4
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
346 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
), %VEC(4)
347 vmovdqa
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
349 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*5)(%rsp
), %xmm5
, %xmm8
350 vpmovmskb
%xmm8
, %esi
353 vmovdqa
%xmm5
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
355 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
), %VEC(5)
356 vmovdqa
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
358 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*6)(%rsp
), %xmm6
, %xmm8
359 vpmovmskb
%xmm8
, %esi
362 vmovdqa
%xmm6
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
364 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
), %VEC(6)
365 vmovdqa
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
367 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*7)(%rsp
), %xmm7
, %xmm8
368 vpmovmskb
%xmm8
, %esi
371 vmovdqa
%xmm7
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
373 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
), %VEC(7)
374 vmovdqa
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
380 # ifdef HAVE_MPX_SUPPORT
381 bndmov (LR_BND_OFFSET
)(%rsp
), %bnd0
# Restore bound
382 bndmov (LR_BND_OFFSET
+ BND_SIZE
)(%rsp
), %bnd1
# registers.
383 bndmov (LR_BND_OFFSET
+ BND_SIZE
*2)(%rsp
), %bnd2
384 bndmov (LR_BND_OFFSET
+ BND_SIZE
*3)(%rsp
), %bnd3
386 .byte
0x66,0x0f,0x1a,0x84,0x24;.long (LR_BND_OFFSET
)
387 .byte
0x66,0x0f,0x1a,0x8c,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
)
388 .byte
0x66,0x0f,0x1a,0x94,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
*2)
389 .byte
0x66,0x0f,0x1a,0x9c,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
*3)
393 mov
16(%rbx
), %R10_LP
# Anything in framesize?
394 test
%R10_LP
, %R10_LP
395 PRESERVE_BND_REGS_PREFIX
398 /* There's nothing in the frame size, so there
399 will be no call to the _dl_call_pltexit. */
401 /* Get back registers content. */
402 movq
LR_RCX_OFFSET(%rsp
), %rcx
403 movq
LR_RSI_OFFSET(%rsp
), %rsi
404 movq
LR_RDI_OFFSET(%rsp
), %rdi
409 cfi_def_cfa_register(%rsp
)
411 add $
48, %RSP_LP
# Adjust the stack to the return value
412 # (eats the reloc index and link_map)
413 cfi_adjust_cfa_offset(-48)
414 PRESERVE_BND_REGS_PREFIX
415 jmp
*%r11
# Jump to function address.
418 cfi_adjust_cfa_offset(48)
419 cfi_rel_offset(%rbx
, 0)
420 cfi_def_cfa_register(%rbx
)
422 /* At this point we need to prepare new stack for the function
423 which has to be called. We copy the original stack to a
424 temporary buffer of the size specified by the 'framesize'
425 returned from _dl_profile_fixup */
427 lea
LR_RSP_OFFSET(%rbx
), %RSI_LP
# stack
437 movq
24(%rdi
), %rcx
# Get back register content.
441 PRESERVE_BND_REGS_PREFIX
444 mov
24(%rbx
), %RSP_LP
# Drop the copied stack content
446 /* Now we have to prepare the La_x86_64_retval structure for the
447 _dl_call_pltexit. The La_x86_64_regs is being pointed by rsp now,
448 so we just need to allocate the sizeof(La_x86_64_retval) space on
449 the stack, since the alignment has already been taken care of. */
451 /* sizeof(La_x86_64_retval). Need extra space for 2 SSE
452 registers to detect if xmm0/xmm1 registers are changed
454 sub $
(LRV_SIZE
+ XMM_SIZE
*2), %RSP_LP
456 sub $LRV_SIZE
, %RSP_LP
# sizeof(La_x86_64_retval)
458 mov
%RSP_LP
, %RCX_LP
# La_x86_64_retval argument to %rcx.
460 /* Fill in the La_x86_64_retval structure. */
461 movq
%rax
, LRV_RAX_OFFSET(%rcx
)
462 movq
%rdx
, LRV_RDX_OFFSET(%rcx
)
464 movaps
%xmm0
, LRV_XMM0_OFFSET(%rcx
)
465 movaps
%xmm1
, LRV_XMM1_OFFSET(%rcx
)
468 /* This is to support AVX audit modules. */
469 VMOVA
%VEC(0), LRV_VECTOR0_OFFSET(%rcx
)
470 VMOVA
%VEC(1), LRV_VECTOR1_OFFSET(%rcx
)
472 /* Save xmm0/xmm1 registers to detect if they are changed
474 vmovdqa
%xmm0
, (LRV_SIZE
)(%rcx
)
475 vmovdqa
%xmm1
, (LRV_SIZE
+ XMM_SIZE
)(%rcx
)
479 # ifdef HAVE_MPX_SUPPORT
480 bndmov
%bnd0
, LRV_BND0_OFFSET(%rcx
) # Preserve returned bounds.
481 bndmov
%bnd1
, LRV_BND1_OFFSET(%rcx
)
483 .byte
0x66,0x0f,0x1b,0x81;.long (LRV_BND0_OFFSET
)
484 .byte
0x66,0x0f,0x1b,0x89;.long (LRV_BND1_OFFSET
)
488 fstpt
LRV_ST0_OFFSET(%rcx
)
489 fstpt
LRV_ST1_OFFSET(%rcx
)
491 movq
24(%rbx
), %rdx
# La_x86_64_regs argument to %rdx.
492 movq
40(%rbx
), %rsi
# Copy args pushed by PLT in register.
493 movq
32(%rbx
), %rdi
# %rdi: link_map, %rsi: reloc_index
494 call _dl_call_pltexit
496 /* Restore return registers. */
497 movq
LRV_RAX_OFFSET(%rsp
), %rax
498 movq
LRV_RDX_OFFSET(%rsp
), %rdx
500 movaps
LRV_XMM0_OFFSET(%rsp
), %xmm0
501 movaps
LRV_XMM1_OFFSET(%rsp
), %xmm1
504 /* Check if xmm0/xmm1 registers are changed by audit module. */
505 vpcmpeqq (LRV_SIZE
)(%rsp
), %xmm0
, %xmm2
506 vpmovmskb
%xmm2
, %esi
509 VMOVA
LRV_VECTOR0_OFFSET(%rsp
), %VEC(0)
511 1: vpcmpeqq (LRV_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm2
512 vpmovmskb
%xmm2
, %esi
515 VMOVA
LRV_VECTOR1_OFFSET(%rsp
), %VEC(1)
521 # ifdef HAVE_MPX_SUPPORT
522 bndmov
LRV_BND0_OFFSET(%rsp
), %bnd0
# Restore bound registers.
523 bndmov
LRV_BND1_OFFSET(%rsp
), %bnd1
525 .byte
0x66,0x0f,0x1a,0x84,0x24;.long (LRV_BND0_OFFSET
)
526 .byte
0x66,0x0f,0x1a,0x8c,0x24;.long (LRV_BND1_OFFSET
)
530 fldt
LRV_ST1_OFFSET(%rsp
)
531 fldt
LRV_ST0_OFFSET(%rsp
)
536 cfi_def_cfa_register(%rsp
)
538 add $
48, %RSP_LP
# Adjust the stack to the return value
539 # (eats the reloc index and link_map)
540 cfi_adjust_cfa_offset(-48)
541 PRESERVE_BND_REGS_PREFIX
545 .size _dl_runtime_profile
, .-_dl_runtime_profile