1 /* PLT trampolines. x86-64 version.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #undef REGISTER_SAVE_AREA_RAW
21 /* X32 saves RCX, RDX, RSI, RDI, R8 and R9 plus RAX as well as VEC0 to
23 # define REGISTER_SAVE_AREA_RAW (8 * 7 + VEC_SIZE * 8)
25 /* X86-64 saves RCX, RDX, RSI, RDI, R8 and R9 plus RAX as well as
26 BND0, BND1, BND2, BND3 and VEC0 to VEC7. */
27 # define REGISTER_SAVE_AREA_RAW (8 * 7 + 16 * 4 + VEC_SIZE * 8)
30 #undef REGISTER_SAVE_AREA
31 #undef LOCAL_STORAGE_AREA
33 #if DL_RUNTIME_RESOLVE_REALIGN_STACK
34 # define REGISTER_SAVE_AREA (REGISTER_SAVE_AREA_RAW + 8)
35 /* Local stack area before jumping to function address: RBX. */
36 # define LOCAL_STORAGE_AREA 8
38 # if (REGISTER_SAVE_AREA % VEC_SIZE) != 0
39 # error REGISTER_SAVE_AREA must be multples of VEC_SIZE
42 # define REGISTER_SAVE_AREA REGISTER_SAVE_AREA_RAW
43 /* Local stack area before jumping to function address: All saved
45 # define LOCAL_STORAGE_AREA REGISTER_SAVE_AREA
47 # if (REGISTER_SAVE_AREA % 16) != 8
48 # error REGISTER_SAVE_AREA must be odd multples of 8
53 #ifdef _dl_runtime_resolve_opt
54 /* Use the smallest vector registers to preserve the full YMM/ZMM
55 registers to avoid SSE transition penalty. */
58 /* Check if the upper 128 bits in %ymm0 - %ymm7 registers are non-zero
59 and preserve %xmm0 - %xmm7 registers with the zero upper bits. Since
60 there is no SSE transition penalty on AVX512 processors which don't
61 support XGETBV with ECX == 1, _dl_runtime_resolve_avx512_slow isn't
63 .globl _dl_runtime_resolve_avx_slow
64 .hidden _dl_runtime_resolve_avx_slow
65 .type _dl_runtime_resolve_avx_slow
, @function
67 _dl_runtime_resolve_avx_slow
:
69 cfi_adjust_cfa_offset(16) # Incorporate PLT
70 vorpd
%ymm0
, %ymm1
, %ymm8
71 vorpd
%ymm2
, %ymm3
, %ymm9
72 vorpd
%ymm4
, %ymm5
, %ymm10
73 vorpd
%ymm6
, %ymm7
, %ymm11
74 vorpd
%ymm8
, %ymm9
, %ymm9
75 vorpd
%ymm10
, %ymm11
, %ymm10
76 vpcmpeqd
%xmm8
, %xmm8
, %xmm8
77 vorpd
%ymm9
, %ymm10
, %ymm10
79 # Preserve %ymm0 - %ymm7 registers if the upper 128 bits of any
80 # %ymm0 - %ymm7 registers aren't zero.
81 PRESERVE_BND_REGS_PREFIX
82 jnc _dl_runtime_resolve_avx
83 # Use vzeroupper to avoid SSE transition penalty.
85 # Preserve %xmm0 - %xmm7 registers with the zero upper 128 bits
86 # when the upper 128 bits of %ymm0 - %ymm7 registers are zero.
87 PRESERVE_BND_REGS_PREFIX
88 jmp _dl_runtime_resolve_sse_vex
89 cfi_adjust_cfa_offset(-16) # Restore PLT adjustment
91 .size _dl_runtime_resolve_avx_slow, .-_dl_runtime_resolve_avx_slow
94 /* Use XGETBV with ECX == 1 to check which bits in vector registers are
95 non-zero and only preserve the non-zero lower bits with zero upper
97 .globl _dl_runtime_resolve_opt
98 .hidden _dl_runtime_resolve_opt
99 .type _dl_runtime_resolve_opt, @function
101 _dl_runtime_resolve_opt:
103 cfi_adjust_cfa_offset(16) # Incorporate PLT
105 cfi_adjust_cfa_offset(8)
106 cfi_rel_offset(%rax, 0)
108 cfi_adjust_cfa_offset(8)
109 cfi_rel_offset(%rcx, 0)
111 cfi_adjust_cfa_offset(8)
112 cfi_rel_offset(%rdx, 0)
117 cfi_adjust_cfa_offset(-8)
120 cfi_adjust_cfa_offset(-8)
123 cfi_adjust_cfa_offset(-8)
126 # For YMM registers, check if YMM state is in use.
127 andl $bit_YMM_state, %r11d
128 # Preserve %xmm0 - %xmm7 registers with the zero upper 128 bits if
129 # YMM state isn't in use.
130 PRESERVE_BND_REGS_PREFIX
131 jz _dl_runtime_resolve_sse_vex
132 # elif VEC_SIZE == 16
133 # For ZMM registers, check if YMM state and ZMM state are in
135 andl $
(bit_YMM_state
| bit_ZMM0_15_state
), %r11d
136 cmpl $bit_YMM_state
, %r11d
137 # Preserve %zmm0 - %zmm7 registers if ZMM state is in use.
138 PRESERVE_BND_REGS_PREFIX
139 jg _dl_runtime_resolve_avx512
140 # Preserve %ymm0 - %ymm7 registers with the zero upper 256 bits if
141 # ZMM state isn't in use.
142 PRESERVE_BND_REGS_PREFIX
143 je _dl_runtime_resolve_avx
144 # Preserve %xmm0 - %xmm7 registers with the zero upper 384 bits if
145 # neither YMM state nor ZMM state are in use.
147 # error Unsupported VEC_SIZE!
149 cfi_adjust_cfa_offset(-16) # Restore PLT adjustment
151 .size _dl_runtime_resolve_opt, .-_dl_runtime_resolve_opt
153 .globl _dl_runtime_resolve
154 .hidden _dl_runtime_resolve
155 .type _dl_runtime_resolve, @function
159 cfi_adjust_cfa_offset(16) # Incorporate PLT
160 #if DL_RUNTIME_RESOLVE_REALIGN_STACK
161 # if LOCAL_STORAGE_AREA != 8
162 # error LOCAL_STORAGE_AREA must be 8
164 pushq %rbx # push subtracts stack by 8.
165 cfi_adjust_cfa_offset(8)
166 cfi_rel_offset(%rbx, 0)
168 cfi_def_cfa_register(%rbx)
169 and $-VEC_SIZE, %RSP_LP
171 sub $REGISTER_SAVE_AREA, %RSP_LP
172 #if !DL_RUNTIME_RESOLVE_REALIGN_STACK
173 cfi_adjust_cfa_offset(REGISTER_SAVE_AREA)
175 # Preserve registers otherwise clobbered.
176 movq %rax, REGISTER_SAVE_RAX(%rsp)
177 movq %rcx, REGISTER_SAVE_RCX(%rsp)
178 movq %rdx, REGISTER_SAVE_RDX(%rsp)
179 movq %rsi, REGISTER_SAVE_RSI(%rsp)
180 movq %rdi, REGISTER_SAVE_RDI(%rsp)
181 movq %r8, REGISTER_SAVE_R8(%rsp)
182 movq %r9, REGISTER_SAVE_R9(%rsp)
183 VMOV %VEC(0), (REGISTER_SAVE_VEC_OFF)(%rsp)
184 VMOV %VEC(1), (REGISTER_SAVE_VEC_OFF + VEC_SIZE)(%rsp)
185 VMOV %VEC(2), (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 2)(%rsp)
186 VMOV %VEC(3), (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 3)(%rsp)
187 VMOV %VEC(4), (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 4)(%rsp)
188 VMOV %VEC(5), (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 5)(%rsp)
189 VMOV %VEC(6), (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 6)(%rsp)
190 VMOV %VEC(7), (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 7)(%rsp)
192 # We also have to preserve bound registers. These are nops if
193 # Intel MPX isn't available or disabled.
194 # ifdef HAVE_MPX_SUPPORT
195 bndmov
%bnd0
, REGISTER_SAVE_BND0(%rsp
)
196 bndmov
%bnd1
, REGISTER_SAVE_BND1(%rsp
)
197 bndmov
%bnd2
, REGISTER_SAVE_BND2(%rsp
)
198 bndmov
%bnd3
, REGISTER_SAVE_BND3(%rsp
)
200 # if REGISTER_SAVE_BND0 == 0
201 .byte
0x66,0x0f,0x1b,0x04,0x24
203 .byte
0x66,0x0f,0x1b,0x44,0x24,REGISTER_SAVE_BND0
205 .byte
0x66,0x0f,0x1b,0x4c,0x24,REGISTER_SAVE_BND1
206 .byte
0x66,0x0f,0x1b,0x54,0x24,REGISTER_SAVE_BND2
207 .byte
0x66,0x0f,0x1b,0x5c,0x24,REGISTER_SAVE_BND3
210 # Copy args pushed by PLT in register.
211 # %rdi: link_map, %rsi: reloc_index
212 mov (LOCAL_STORAGE_AREA
+ 8)(%BASE
), %RSI_LP
213 mov
LOCAL_STORAGE_AREA(%BASE
), %RDI_LP
214 call _dl_fixup
# Call resolver.
215 mov
%RAX_LP
, %R11_LP
# Save return value
217 # Restore bound registers. These are nops if Intel MPX isn't
218 # avaiable or disabled.
219 # ifdef HAVE_MPX_SUPPORT
220 bndmov REGISTER_SAVE_BND3(%rsp), %bnd3
221 bndmov REGISTER_SAVE_BND2(%rsp), %bnd2
222 bndmov REGISTER_SAVE_BND1(%rsp), %bnd1
223 bndmov REGISTER_SAVE_BND0(%rsp), %bnd0
225 .byte 0x66,0x0f,0x1a,0x5c,0x24,REGISTER_SAVE_BND3
226 .byte 0x66,0x0f,0x1a,0x54,0x24,REGISTER_SAVE_BND2
227 .byte 0x66,0x0f,0x1a,0x4c,0x24,REGISTER_SAVE_BND1
228 # if REGISTER_SAVE_BND0 == 0
229 .byte 0x66,0x0f,0x1a,0x04,0x24
231 .byte 0x66,0x0f,0x1a,0x44,0x24,REGISTER_SAVE_BND0
235 # Get register content back.
236 movq REGISTER_SAVE_R9(%rsp), %r9
237 movq REGISTER_SAVE_R8(%rsp), %r8
238 movq REGISTER_SAVE_RDI(%rsp), %rdi
239 movq REGISTER_SAVE_RSI(%rsp), %rsi
240 movq REGISTER_SAVE_RDX(%rsp), %rdx
241 movq REGISTER_SAVE_RCX(%rsp), %rcx
242 movq REGISTER_SAVE_RAX(%rsp), %rax
243 VMOV (REGISTER_SAVE_VEC_OFF)(%rsp), %VEC(0)
244 VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE)(%rsp), %VEC(1)
245 VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 2)(%rsp), %VEC(2)
246 VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 3)(%rsp), %VEC(3)
247 VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 4)(%rsp), %VEC(4)
248 VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 5)(%rsp), %VEC(5)
249 VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 6)(%rsp), %VEC(6)
250 VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 7)(%rsp), %VEC(7)
251 #if DL_RUNTIME_RESOLVE_REALIGN_STACK
253 cfi_def_cfa_register(%rsp)
257 # Adjust stack(PLT did 2 pushes)
258 add $(LOCAL_STORAGE_AREA + 16), %RSP_LP
259 cfi_adjust_cfa_offset(-(LOCAL_STORAGE_AREA + 16))
260 # Preserve bound registers.
261 PRESERVE_BND_REGS_PREFIX
262 jmp *%r11 # Jump to function address.
264 .size _dl_runtime_resolve, .-_dl_runtime_resolve
267 /* To preserve %xmm0 - %xmm7 registers, dl-trampoline.h is included
268 twice, for _dl_runtime_resolve_sse and _dl_runtime_resolve_sse_vex.
269 But we don't need another _dl_runtime_profile for XMM registers. */
270 #if !defined PROF && defined _dl_runtime_profile
271 # if (LR_VECTOR_OFFSET % VEC_SIZE) != 0
272 # error LR_VECTOR_OFFSET must be multples of VEC_SIZE
275 .globl _dl_runtime_profile
276 .hidden _dl_runtime_profile
277 .type _dl_runtime_profile
, @function
281 cfi_adjust_cfa_offset(16) # Incorporate PLT
282 /* The La_x86_64_regs data structure pointed to by the
283 fourth paramater must be VEC_SIZE-byte aligned. This must
284 be explicitly enforced. We have the set up a dynamically
285 sized stack frame. %rbx points to the top half which
286 has a fixed size and preserves the original stack pointer. */
288 sub $
32, %RSP_LP
# Allocate the local storage.
289 cfi_adjust_cfa_offset(32)
291 cfi_rel_offset(%rbx
, 0)
294 56(%rbx) parameter #1
295 48(%rbx) return address
300 24(%rbx) La_x86_64_regs pointer
308 cfi_def_cfa_register(%rbx
)
310 /* Actively align the La_x86_64_regs structure. */
311 and $
-VEC_SIZE
, %RSP_LP
312 /* sizeof(La_x86_64_regs). Need extra space for 8 SSE registers
313 to detect if any xmm0-xmm7 registers are changed by audit
315 sub $
(LR_SIZE
+ XMM_SIZE
*8), %RSP_LP
318 /* Fill the La_x86_64_regs structure. */
319 movq
%rdx
, LR_RDX_OFFSET(%rsp
)
320 movq
%r8
, LR_R8_OFFSET(%rsp
)
321 movq
%r9
, LR_R9_OFFSET(%rsp
)
322 movq
%rcx
, LR_RCX_OFFSET(%rsp
)
323 movq
%rsi
, LR_RSI_OFFSET(%rsp
)
324 movq
%rdi
, LR_RDI_OFFSET(%rsp
)
325 movq
%rbp
, LR_RBP_OFFSET(%rsp
)
327 lea
48(%rbx
), %RAX_LP
328 movq
%rax
, LR_RSP_OFFSET(%rsp
)
330 /* We always store the XMM registers even if AVX is available.
331 This is to provide backward binary compatibility for existing
333 movaps
%xmm0
, (LR_XMM_OFFSET
)(%rsp
)
334 movaps
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
)
335 movaps
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
336 movaps
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
337 movaps
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
338 movaps
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
339 movaps
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
340 movaps
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
343 # ifdef HAVE_MPX_SUPPORT
344 bndmov
%bnd0
, (LR_BND_OFFSET
)(%rsp
) # Preserve bound
345 bndmov
%bnd1
, (LR_BND_OFFSET
+ BND_SIZE
)(%rsp
) # registers. Nops if
346 bndmov
%bnd2
, (LR_BND_OFFSET
+ BND_SIZE
*2)(%rsp
) # MPX not available
347 bndmov
%bnd3
, (LR_BND_OFFSET
+ BND_SIZE
*3)(%rsp
) # or disabled.
349 .byte
0x66,0x0f,0x1b,0x84,0x24;.long (LR_BND_OFFSET
)
350 .byte
0x66,0x0f,0x1b,0x8c,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
)
351 .byte
0x66,0x0f,0x1b,0x94,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
*2)
352 .byte
0x66,0x0f,0x1b,0x9c,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
*3)
357 /* This is to support AVX audit modules. */
358 VMOVA
%VEC(0), (LR_VECTOR_OFFSET
)(%rsp
)
359 VMOVA
%VEC(1), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
360 VMOVA
%VEC(2), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
361 VMOVA
%VEC(3), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
362 VMOVA
%VEC(4), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
363 VMOVA
%VEC(5), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
364 VMOVA
%VEC(6), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
365 VMOVA
%VEC(7), (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
367 /* Save xmm0-xmm7 registers to detect if any of them are
368 changed by audit module. */
369 vmovdqa
%xmm0
, (LR_SIZE
)(%rsp
)
370 vmovdqa
%xmm1
, (LR_SIZE
+ XMM_SIZE
)(%rsp
)
371 vmovdqa
%xmm2
, (LR_SIZE
+ XMM_SIZE
*2)(%rsp
)
372 vmovdqa
%xmm3
, (LR_SIZE
+ XMM_SIZE
*3)(%rsp
)
373 vmovdqa
%xmm4
, (LR_SIZE
+ XMM_SIZE
*4)(%rsp
)
374 vmovdqa
%xmm5
, (LR_SIZE
+ XMM_SIZE
*5)(%rsp
)
375 vmovdqa
%xmm6
, (LR_SIZE
+ XMM_SIZE
*6)(%rsp
)
376 vmovdqa
%xmm7
, (LR_SIZE
+ XMM_SIZE
*7)(%rsp
)
379 mov
%RSP_LP
, %RCX_LP
# La_x86_64_regs pointer to %rcx.
380 mov
48(%rbx
), %RDX_LP
# Load return address if needed.
381 mov
40(%rbx
), %RSI_LP
# Copy args pushed by PLT in register.
382 mov
32(%rbx
), %RDI_LP
# %rdi: link_map, %rsi: reloc_index
383 lea
16(%rbx
), %R8_LP
# Address of framesize
384 call _dl_profile_fixup
# Call resolver.
386 mov
%RAX_LP
, %R11_LP
# Save return value.
388 movq
8(%rbx
), %rax
# Get back register content.
389 movq
LR_RDX_OFFSET(%rsp
), %rdx
390 movq
LR_R8_OFFSET(%rsp
), %r8
391 movq
LR_R9_OFFSET(%rsp
), %r9
393 movaps (LR_XMM_OFFSET
)(%rsp
), %xmm0
394 movaps (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
), %xmm1
395 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
), %xmm2
396 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
), %xmm3
397 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
), %xmm4
398 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
), %xmm5
399 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
), %xmm6
400 movaps (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
), %xmm7
403 /* Check if any xmm0-xmm7 registers are changed by audit
405 vpcmpeqq (LR_SIZE
)(%rsp
), %xmm0
, %xmm8
406 vpmovmskb
%xmm8
, %esi
409 vmovdqa
%xmm0
, (LR_VECTOR_OFFSET
)(%rsp
)
411 2: VMOVA (LR_VECTOR_OFFSET
)(%rsp
), %VEC(0)
412 vmovdqa
%xmm0
, (LR_XMM_OFFSET
)(%rsp
)
414 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm8
415 vpmovmskb
%xmm8
, %esi
418 vmovdqa
%xmm1
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
)
420 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
)(%rsp
), %VEC(1)
421 vmovdqa
%xmm1
, (LR_XMM_OFFSET
+ XMM_SIZE
)(%rsp
)
423 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*2)(%rsp
), %xmm2
, %xmm8
424 vpmovmskb
%xmm8
, %esi
427 vmovdqa
%xmm2
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
)
429 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*2)(%rsp
), %VEC(2)
430 vmovdqa
%xmm2
, (LR_XMM_OFFSET
+ XMM_SIZE
*2)(%rsp
)
432 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*3)(%rsp
), %xmm3
, %xmm8
433 vpmovmskb
%xmm8
, %esi
436 vmovdqa
%xmm3
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
)
438 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*3)(%rsp
), %VEC(3)
439 vmovdqa
%xmm3
, (LR_XMM_OFFSET
+ XMM_SIZE
*3)(%rsp
)
441 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*4)(%rsp
), %xmm4
, %xmm8
442 vpmovmskb
%xmm8
, %esi
445 vmovdqa
%xmm4
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
)
447 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*4)(%rsp
), %VEC(4)
448 vmovdqa
%xmm4
, (LR_XMM_OFFSET
+ XMM_SIZE
*4)(%rsp
)
450 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*5)(%rsp
), %xmm5
, %xmm8
451 vpmovmskb
%xmm8
, %esi
454 vmovdqa
%xmm5
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
)
456 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*5)(%rsp
), %VEC(5)
457 vmovdqa
%xmm5
, (LR_XMM_OFFSET
+ XMM_SIZE
*5)(%rsp
)
459 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*6)(%rsp
), %xmm6
, %xmm8
460 vpmovmskb
%xmm8
, %esi
463 vmovdqa
%xmm6
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
)
465 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*6)(%rsp
), %VEC(6)
466 vmovdqa
%xmm6
, (LR_XMM_OFFSET
+ XMM_SIZE
*6)(%rsp
)
468 1: vpcmpeqq (LR_SIZE
+ XMM_SIZE
*7)(%rsp
), %xmm7
, %xmm8
469 vpmovmskb
%xmm8
, %esi
472 vmovdqa
%xmm7
, (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
)
474 2: VMOVA (LR_VECTOR_OFFSET
+ VECTOR_SIZE
*7)(%rsp
), %VEC(7)
475 vmovdqa
%xmm7
, (LR_XMM_OFFSET
+ XMM_SIZE
*7)(%rsp
)
481 # ifdef HAVE_MPX_SUPPORT
482 bndmov (LR_BND_OFFSET
)(%rsp
), %bnd0
# Restore bound
483 bndmov (LR_BND_OFFSET
+ BND_SIZE
)(%rsp
), %bnd1
# registers.
484 bndmov (LR_BND_OFFSET
+ BND_SIZE
*2)(%rsp
), %bnd2
485 bndmov (LR_BND_OFFSET
+ BND_SIZE
*3)(%rsp
), %bnd3
487 .byte
0x66,0x0f,0x1a,0x84,0x24;.long (LR_BND_OFFSET
)
488 .byte
0x66,0x0f,0x1a,0x8c,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
)
489 .byte
0x66,0x0f,0x1a,0x94,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
*2)
490 .byte
0x66,0x0f,0x1a,0x9c,0x24;.long (LR_BND_OFFSET
+ BND_SIZE
*3)
494 mov
16(%rbx
), %R10_LP
# Anything in framesize?
495 test
%R10_LP
, %R10_LP
496 PRESERVE_BND_REGS_PREFIX
499 /* There's nothing in the frame size, so there
500 will be no call to the _dl_call_pltexit. */
502 /* Get back registers content. */
503 movq
LR_RCX_OFFSET(%rsp
), %rcx
504 movq
LR_RSI_OFFSET(%rsp
), %rsi
505 movq
LR_RDI_OFFSET(%rsp
), %rdi
510 cfi_def_cfa_register(%rsp
)
512 add $
48, %RSP_LP
# Adjust the stack to the return value
513 # (eats the reloc index and link_map)
514 cfi_adjust_cfa_offset(-48)
515 PRESERVE_BND_REGS_PREFIX
516 jmp
*%r11
# Jump to function address.
519 cfi_adjust_cfa_offset(48)
520 cfi_rel_offset(%rbx
, 0)
521 cfi_def_cfa_register(%rbx
)
523 /* At this point we need to prepare new stack for the function
524 which has to be called. We copy the original stack to a
525 temporary buffer of the size specified by the 'framesize'
526 returned from _dl_profile_fixup */
528 lea
LR_RSP_OFFSET(%rbx
), %RSI_LP
# stack
538 movq
24(%rdi
), %rcx
# Get back register content.
542 PRESERVE_BND_REGS_PREFIX
545 mov
24(%rbx
), %RSP_LP
# Drop the copied stack content
547 /* Now we have to prepare the La_x86_64_retval structure for the
548 _dl_call_pltexit. The La_x86_64_regs is being pointed by rsp now,
549 so we just need to allocate the sizeof(La_x86_64_retval) space on
550 the stack, since the alignment has already been taken care of. */
552 /* sizeof(La_x86_64_retval). Need extra space for 2 SSE
553 registers to detect if xmm0/xmm1 registers are changed
555 sub $
(LRV_SIZE
+ XMM_SIZE
*2), %RSP_LP
557 sub $LRV_SIZE
, %RSP_LP
# sizeof(La_x86_64_retval)
559 mov
%RSP_LP
, %RCX_LP
# La_x86_64_retval argument to %rcx.
561 /* Fill in the La_x86_64_retval structure. */
562 movq
%rax
, LRV_RAX_OFFSET(%rcx
)
563 movq
%rdx
, LRV_RDX_OFFSET(%rcx
)
565 movaps
%xmm0
, LRV_XMM0_OFFSET(%rcx
)
566 movaps
%xmm1
, LRV_XMM1_OFFSET(%rcx
)
569 /* This is to support AVX audit modules. */
570 VMOVA
%VEC(0), LRV_VECTOR0_OFFSET(%rcx
)
571 VMOVA
%VEC(1), LRV_VECTOR1_OFFSET(%rcx
)
573 /* Save xmm0/xmm1 registers to detect if they are changed
575 vmovdqa
%xmm0
, (LRV_SIZE
)(%rcx
)
576 vmovdqa
%xmm1
, (LRV_SIZE
+ XMM_SIZE
)(%rcx
)
580 # ifdef HAVE_MPX_SUPPORT
581 bndmov
%bnd0
, LRV_BND0_OFFSET(%rcx
) # Preserve returned bounds.
582 bndmov
%bnd1
, LRV_BND1_OFFSET(%rcx
)
584 .byte
0x66,0x0f,0x1b,0x81;.long (LRV_BND0_OFFSET
)
585 .byte
0x66,0x0f,0x1b,0x89;.long (LRV_BND1_OFFSET
)
589 fstpt
LRV_ST0_OFFSET(%rcx
)
590 fstpt
LRV_ST1_OFFSET(%rcx
)
592 movq
24(%rbx
), %rdx
# La_x86_64_regs argument to %rdx.
593 movq
40(%rbx
), %rsi
# Copy args pushed by PLT in register.
594 movq
32(%rbx
), %rdi
# %rdi: link_map, %rsi: reloc_index
595 call _dl_call_pltexit
597 /* Restore return registers. */
598 movq
LRV_RAX_OFFSET(%rsp
), %rax
599 movq
LRV_RDX_OFFSET(%rsp
), %rdx
601 movaps
LRV_XMM0_OFFSET(%rsp
), %xmm0
602 movaps
LRV_XMM1_OFFSET(%rsp
), %xmm1
605 /* Check if xmm0/xmm1 registers are changed by audit module. */
606 vpcmpeqq (LRV_SIZE
)(%rsp
), %xmm0
, %xmm2
607 vpmovmskb
%xmm2
, %esi
610 VMOVA
LRV_VECTOR0_OFFSET(%rsp
), %VEC(0)
612 1: vpcmpeqq (LRV_SIZE
+ XMM_SIZE
)(%rsp
), %xmm1
, %xmm2
613 vpmovmskb
%xmm2
, %esi
616 VMOVA
LRV_VECTOR1_OFFSET(%rsp
), %VEC(1)
622 # ifdef HAVE_MPX_SUPPORT
623 bndmov
LRV_BND0_OFFSET(%rsp
), %bnd0
# Restore bound registers.
624 bndmov
LRV_BND1_OFFSET(%rsp
), %bnd1
626 .byte
0x66,0x0f,0x1a,0x84,0x24;.long (LRV_BND0_OFFSET
)
627 .byte
0x66,0x0f,0x1a,0x8c,0x24;.long (LRV_BND1_OFFSET
)
631 fldt
LRV_ST1_OFFSET(%rsp
)
632 fldt
LRV_ST0_OFFSET(%rsp
)
637 cfi_def_cfa_register(%rsp
)
639 add $
48, %RSP_LP
# Adjust the stack to the return value
640 # (eats the reloc index and link_map)
641 cfi_adjust_cfa_offset(-48)
642 PRESERVE_BND_REGS_PREFIX
646 .size _dl_runtime_profile
, .-_dl_runtime_profile