1 /* Verify that changing AVX registers in audit library won't affect
2 function parameter passing/return. */
12 #include <bits/wordsize.h>
13 #include <gnu/lib-names.h>
16 la_version (unsigned int v
)
20 printf ("version: %u\n", v
);
23 sprintf (buf
, "%u", v
);
29 la_activity (uintptr_t *cookie
, unsigned int flag
)
31 if (flag
== LA_ACT_CONSISTENT
)
32 printf ("activity: consistent\n");
33 else if (flag
== LA_ACT_ADD
)
34 printf ("activity: add\n");
35 else if (flag
== LA_ACT_DELETE
)
36 printf ("activity: delete\n");
38 printf ("activity: unknown activity %u\n", flag
);
42 la_objsearch (const char *name
, uintptr_t *cookie
, unsigned int flag
)
46 if (flag
== LA_SER_ORIG
)
47 flagstr
= "LA_SET_ORIG";
48 else if (flag
== LA_SER_LIBPATH
)
49 flagstr
= "LA_SER_LIBPATH";
50 else if (flag
== LA_SER_RUNPATH
)
51 flagstr
= "LA_SER_RUNPATH";
52 else if (flag
== LA_SER_CONFIG
)
53 flagstr
= "LA_SER_CONFIG";
54 else if (flag
== LA_SER_DEFAULT
)
55 flagstr
= "LA_SER_DEFAULT";
56 else if (flag
== LA_SER_SECURE
)
57 flagstr
= "LA_SER_SECURE";
60 sprintf (buf
, "unknown flag %d", flag
);
63 printf ("objsearch: %s, %s\n", name
, flagstr
);
69 la_objopen (struct link_map
*l
, Lmid_t lmid
, uintptr_t *cookie
)
71 printf ("objopen: %ld, %s\n", lmid
, l
->l_name
);
77 la_preinit (uintptr_t *cookie
)
83 la_objclose (uintptr_t *cookie
)
85 printf ("objclose\n");
90 la_symbind32 (Elf32_Sym
*sym
, unsigned int ndx
, uintptr_t *refcook
,
91 uintptr_t *defcook
, unsigned int *flags
, const char *symname
)
93 printf ("symbind32: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
94 symname
, (long int) sym
->st_value
, ndx
, *flags
);
100 la_symbind64 (Elf64_Sym
*sym
, unsigned int ndx
, uintptr_t *refcook
,
101 uintptr_t *defcook
, unsigned int *flags
, const char *symname
)
103 printf ("symbind64: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
104 symname
, (long int) sym
->st_value
, ndx
, *flags
);
106 return sym
->st_value
;
109 #include <tst-audit.h>
112 #include <immintrin.h>
118 __attribute ((always_inline
))
123 unsigned int eax
, ebx
, ecx
, edx
;
125 if (__get_cpuid (1, &eax
, &ebx
, &ecx
, &edx
)
134 #include <emmintrin.h>
138 pltenter (ElfW(Sym
) *sym
, unsigned int ndx
, uintptr_t *refcook
,
139 uintptr_t *defcook
, La_regs
*regs
, unsigned int *flags
,
140 const char *symname
, long int *framesizep
)
142 printf ("pltenter: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
143 symname
, (long int) sym
->st_value
, ndx
, *flags
);
146 if (check_avx () && strcmp (symname
, "audit_test") == 0)
150 __m128i xmm
= _mm_setzero_si128 ();
151 for (i
= 0; i
< 8; i
++)
152 if (memcmp (®s
->lr_xmm
[i
], &xmm
, sizeof (xmm
))
153 || memcmp (®s
->lr_vector
[i
], &xmm
, sizeof (xmm
)))
156 for (i
= 0; i
< 8; i
+= 2)
158 regs
->lr_xmm
[i
] = (La_x86_64_xmm
) _mm_set1_epi32 (i
+ 0x100);
159 regs
->lr_vector
[i
+ 1].ymm
[0]
160 = (La_x86_64_ymm
) _mm256_set1_epi32 (i
+ 0x101);
163 __m256i ymm
= _mm256_set1_epi32 (-1);
164 asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm
) : "xmm0" );
165 asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm
) : "xmm1" );
166 asm volatile ("vmovdqa %0, %%ymm2" : : "x" (ymm
) : "xmm2" );
167 asm volatile ("vmovdqa %0, %%ymm3" : : "x" (ymm
) : "xmm3" );
168 asm volatile ("vmovdqa %0, %%ymm4" : : "x" (ymm
) : "xmm4" );
169 asm volatile ("vmovdqa %0, %%ymm5" : : "x" (ymm
) : "xmm5" );
170 asm volatile ("vmovdqa %0, %%ymm6" : : "x" (ymm
) : "xmm6" );
171 asm volatile ("vmovdqa %0, %%ymm7" : : "x" (ymm
) : "xmm7" );
177 return sym
->st_value
;
181 pltexit (ElfW(Sym
) *sym
, unsigned int ndx
, uintptr_t *refcook
,
182 uintptr_t *defcook
, const La_regs
*inregs
, La_retval
*outregs
,
185 printf ("pltexit: symname=%s, st_value=%#lx, ndx=%u, retval=%tu\n",
186 symname
, (long int) sym
->st_value
, ndx
,
187 (ptrdiff_t) outregs
->int_retval
);
190 if (check_avx () && strcmp (symname
, "audit_test") == 0)
194 __m128i xmm
= _mm_setzero_si128 ();
195 if (memcmp (&outregs
->lrv_xmm0
, &xmm
, sizeof (xmm
))
196 || memcmp (&outregs
->lrv_vector0
, &xmm
, sizeof (xmm
)))
201 for (i
= 0; i
< 8; i
+= 2)
203 xmm
= _mm_set1_epi32 (i
+ 0x100);
204 if (memcmp (&inregs
->lr_xmm
[i
], &xmm
, sizeof (xmm
))
205 || memcmp (&inregs
->lr_vector
[i
], &xmm
, sizeof (xmm
)))
208 ymm
= _mm256_set1_epi32 (i
+ 0x101);
209 if (memcmp (&inregs
->lr_xmm
[i
+ 1],
210 &inregs
->lr_vector
[i
+ 1].xmm
[0], sizeof (xmm
))
211 || memcmp (&inregs
->lr_vector
[i
+ 1], &ymm
, sizeof (ymm
)))
215 outregs
->lrv_vector0
.ymm
[0]
216 = (La_x86_64_ymm
) _mm256_set1_epi32 (0x98abcdef);
218 ymm
= _mm256_set1_epi32 (-1);
219 asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm
) : "xmm0" );
220 asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm
) : "xmm1" );