1 /* Verify that changing AVX registers in audit library won't affect
2 function parameter passing/return. */
10 #include <bits/wordsize.h>
11 #include <gnu/lib-names.h>
14 la_version (unsigned int v
)
18 printf ("version: %u\n", v
);
21 sprintf (buf
, "%u", v
);
27 la_activity (uintptr_t *cookie
, unsigned int flag
)
29 if (flag
== LA_ACT_CONSISTENT
)
30 printf ("activity: consistent\n");
31 else if (flag
== LA_ACT_ADD
)
32 printf ("activity: add\n");
33 else if (flag
== LA_ACT_DELETE
)
34 printf ("activity: delete\n");
36 printf ("activity: unknown activity %u\n", flag
);
40 la_objsearch (const char *name
, uintptr_t *cookie
, unsigned int flag
)
44 if (flag
== LA_SER_ORIG
)
45 flagstr
= "LA_SET_ORIG";
46 else if (flag
== LA_SER_LIBPATH
)
47 flagstr
= "LA_SER_LIBPATH";
48 else if (flag
== LA_SER_RUNPATH
)
49 flagstr
= "LA_SER_RUNPATH";
50 else if (flag
== LA_SER_CONFIG
)
51 flagstr
= "LA_SER_CONFIG";
52 else if (flag
== LA_SER_DEFAULT
)
53 flagstr
= "LA_SER_DEFAULT";
54 else if (flag
== LA_SER_SECURE
)
55 flagstr
= "LA_SER_SECURE";
58 sprintf (buf
, "unknown flag %d", flag
);
61 printf ("objsearch: %s, %s\n", name
, flagstr
);
67 la_objopen (struct link_map
*l
, Lmid_t lmid
, uintptr_t *cookie
)
69 printf ("objopen: %ld, %s\n", lmid
, l
->l_name
);
75 la_preinit (uintptr_t *cookie
)
81 la_objclose (uintptr_t *cookie
)
83 printf ("objclose\n");
88 la_symbind32 (Elf32_Sym
*sym
, unsigned int ndx
, uintptr_t *refcook
,
89 uintptr_t *defcook
, unsigned int *flags
, const char *symname
)
91 printf ("symbind32: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
92 symname
, (long int) sym
->st_value
, ndx
, *flags
);
98 la_symbind64 (Elf64_Sym
*sym
, unsigned int ndx
, uintptr_t *refcook
,
99 uintptr_t *defcook
, unsigned int *flags
, const char *symname
)
101 printf ("symbind64: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
102 symname
, (long int) sym
->st_value
, ndx
, *flags
);
104 return sym
->st_value
;
107 #include <tst-audit.h>
110 #include <immintrin.h>
116 __attribute ((always_inline
))
121 unsigned int eax
, ebx
, ecx
, edx
;
123 if (__get_cpuid (1, &eax
, &ebx
, &ecx
, &edx
)
132 #include <emmintrin.h>
136 pltenter (ElfW(Sym
) *sym
, unsigned int ndx
, uintptr_t *refcook
,
137 uintptr_t *defcook
, La_regs
*regs
, unsigned int *flags
,
138 const char *symname
, long int *framesizep
)
140 printf ("pltenter: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
141 symname
, (long int) sym
->st_value
, ndx
, *flags
);
144 if (check_avx () && strcmp (symname
, "audit_test") == 0)
148 __m128i xmm
= _mm_setzero_si128 ();
149 for (i
= 0; i
< 8; i
++)
150 if (memcmp (®s
->lr_xmm
[i
], &xmm
, sizeof (xmm
))
151 || memcmp (®s
->lr_vector
[i
], &xmm
, sizeof (xmm
)))
154 for (i
= 0; i
< 8; i
+= 2)
156 regs
->lr_xmm
[i
] = (La_x86_64_xmm
) _mm_set1_epi32 (i
+ 0x100);
157 regs
->lr_vector
[i
+ 1].ymm
[0]
158 = (La_x86_64_ymm
) _mm256_set1_epi32 (i
+ 0x101);
161 __m256i ymm
= _mm256_set1_epi32 (-1);
162 asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm
) : "xmm0" );
163 asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm
) : "xmm1" );
164 asm volatile ("vmovdqa %0, %%ymm2" : : "x" (ymm
) : "xmm2" );
165 asm volatile ("vmovdqa %0, %%ymm3" : : "x" (ymm
) : "xmm3" );
166 asm volatile ("vmovdqa %0, %%ymm4" : : "x" (ymm
) : "xmm4" );
167 asm volatile ("vmovdqa %0, %%ymm5" : : "x" (ymm
) : "xmm5" );
168 asm volatile ("vmovdqa %0, %%ymm6" : : "x" (ymm
) : "xmm6" );
169 asm volatile ("vmovdqa %0, %%ymm7" : : "x" (ymm
) : "xmm7" );
175 return sym
->st_value
;
179 pltexit (ElfW(Sym
) *sym
, unsigned int ndx
, uintptr_t *refcook
,
180 uintptr_t *defcook
, const La_regs
*inregs
, La_retval
*outregs
,
183 printf ("pltexit: symname=%s, st_value=%#lx, ndx=%u, retval=%tu\n",
184 symname
, (long int) sym
->st_value
, ndx
,
185 (ptrdiff_t) outregs
->int_retval
);
188 if (check_avx () && strcmp (symname
, "audit_test") == 0)
192 __m128i xmm
= _mm_setzero_si128 ();
193 if (memcmp (&outregs
->lrv_xmm0
, &xmm
, sizeof (xmm
))
194 || memcmp (&outregs
->lrv_vector0
, &xmm
, sizeof (xmm
)))
199 for (i
= 0; i
< 8; i
+= 2)
201 xmm
= _mm_set1_epi32 (i
+ 0x100);
202 if (memcmp (&inregs
->lr_xmm
[i
], &xmm
, sizeof (xmm
))
203 || memcmp (&inregs
->lr_vector
[i
], &xmm
, sizeof (xmm
)))
206 ymm
= _mm256_set1_epi32 (i
+ 0x101);
207 if (memcmp (&inregs
->lr_xmm
[i
+ 1],
208 &inregs
->lr_vector
[i
+ 1].xmm
[0], sizeof (xmm
))
209 || memcmp (&inregs
->lr_vector
[i
+ 1], &ymm
, sizeof (ymm
)))
213 outregs
->lrv_vector0
.ymm
[0]
214 = (La_x86_64_ymm
) _mm256_set1_epi32 (0x98abcdef);
216 ymm
= _mm256_set1_epi32 (-1);
217 asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm
) : "xmm0" );
218 asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm
) : "xmm1" );