1 /* Verify that changing AVX registers in audit library won't affect
2 function parameter passing/return. */
10 #include <bits/wordsize.h>
11 #include <gnu/lib-names.h>
14 la_version (unsigned int v
)
18 printf ("version: %u\n", v
);
21 sprintf (buf
, "%u", v
);
27 la_activity (uintptr_t *cookie
, unsigned int flag
)
29 if (flag
== LA_ACT_CONSISTENT
)
30 printf ("activity: consistent\n");
31 else if (flag
== LA_ACT_ADD
)
32 printf ("activity: add\n");
33 else if (flag
== LA_ACT_DELETE
)
34 printf ("activity: delete\n");
36 printf ("activity: unknown activity %u\n", flag
);
40 la_objsearch (const char *name
, uintptr_t *cookie
, unsigned int flag
)
44 if (flag
== LA_SER_ORIG
)
45 flagstr
= "LA_SET_ORIG";
46 else if (flag
== LA_SER_LIBPATH
)
47 flagstr
= "LA_SER_LIBPATH";
48 else if (flag
== LA_SER_RUNPATH
)
49 flagstr
= "LA_SER_RUNPATH";
50 else if (flag
== LA_SER_CONFIG
)
51 flagstr
= "LA_SER_CONFIG";
52 else if (flag
== LA_SER_DEFAULT
)
53 flagstr
= "LA_SER_DEFAULT";
54 else if (flag
== LA_SER_SECURE
)
55 flagstr
= "LA_SER_SECURE";
58 sprintf (buf
, "unknown flag %d", flag
);
61 printf ("objsearch: %s, %s\n", name
, flagstr
);
67 la_objopen (struct link_map
*l
, Lmid_t lmid
, uintptr_t *cookie
)
69 printf ("objopen: %ld, %s\n", lmid
, l
->l_name
);
75 la_preinit (uintptr_t *cookie
)
81 la_objclose (uintptr_t *cookie
)
83 printf ("objclose\n");
88 la_symbind64 (Elf64_Sym
*sym
, unsigned int ndx
, uintptr_t *refcook
,
89 uintptr_t *defcook
, unsigned int *flags
, const char *symname
)
91 printf ("symbind64: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
92 symname
, (long int) sym
->st_value
, ndx
, *flags
);
97 #include <tst-audit.h>
100 #include <immintrin.h>
106 __attribute ((always_inline
))
111 unsigned int eax
, ebx
, ecx
, edx
;
113 if (__get_cpuid (1, &eax
, &ebx
, &ecx
, &edx
)
122 #include <emmintrin.h>
126 pltenter (ElfW(Sym
) *sym
, unsigned int ndx
, uintptr_t *refcook
,
127 uintptr_t *defcook
, La_regs
*regs
, unsigned int *flags
,
128 const char *symname
, long int *framesizep
)
130 printf ("pltenter: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
131 symname
, (long int) sym
->st_value
, ndx
, *flags
);
134 if (check_avx () && strcmp (symname
, "audit_test") == 0)
140 for (i
= 0; i
< 8; i
+= 2)
142 xmm
= _mm_set1_epi32 (i
+ 1);
143 if (memcmp (®s
->lr_xmm
[i
], &xmm
, sizeof (xmm
))
144 || memcmp (®s
->lr_vector
[i
], &xmm
, sizeof (xmm
)))
146 regs
->lr_xmm
[i
] = (La_x86_64_xmm
) _mm_set1_epi32 (i
+ 0x100);
147 regs
->lr_vector
[i
].xmm
[0] = regs
->lr_xmm
[i
];
149 ymm
= _mm256_set1_epi32 (i
+ 2);
150 if (memcmp (®s
->lr_xmm
[i
+ 1],
151 ®s
->lr_vector
[i
+ 1].xmm
[0], sizeof (xmm
))
152 || memcmp (®s
->lr_vector
[i
+ 1], &ymm
, sizeof (ymm
)))
154 regs
->lr_vector
[i
+ 1].ymm
[0]
155 = (La_x86_64_ymm
) _mm256_set1_epi32 (i
+ 0x101);
156 regs
->lr_xmm
[i
+ 1] = regs
->lr_vector
[i
+ 1].xmm
[0];
159 ymm
= _mm256_set1_epi32 (-1);
160 asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm
) : "xmm0" );
161 asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm
) : "xmm1" );
162 asm volatile ("vmovdqa %0, %%ymm2" : : "x" (ymm
) : "xmm2" );
163 asm volatile ("vmovdqa %0, %%ymm3" : : "x" (ymm
) : "xmm3" );
164 asm volatile ("vmovdqa %0, %%ymm4" : : "x" (ymm
) : "xmm4" );
165 asm volatile ("vmovdqa %0, %%ymm5" : : "x" (ymm
) : "xmm5" );
166 asm volatile ("vmovdqa %0, %%ymm6" : : "x" (ymm
) : "xmm6" );
167 asm volatile ("vmovdqa %0, %%ymm7" : : "x" (ymm
) : "xmm7" );
173 return sym
->st_value
;
177 pltexit (ElfW(Sym
) *sym
, unsigned int ndx
, uintptr_t *refcook
,
178 uintptr_t *defcook
, const La_regs
*inregs
, La_retval
*outregs
,
181 printf ("pltexit: symname=%s, st_value=%#lx, ndx=%u, retval=%tu\n",
182 symname
, (long int) sym
->st_value
, ndx
,
183 (ptrdiff_t) outregs
->int_retval
);
186 if (check_avx () && strcmp (symname
, "audit_test") == 0)
190 __m256i ymm
= _mm256_set1_epi32 (0x12349876);;
191 if (memcmp (&outregs
->lrv_vector0
, &ymm
, sizeof (ymm
)))
196 for (i
= 0; i
< 8; i
+= 2)
198 xmm
= _mm_set1_epi32 (i
+ 0x100);
199 if (memcmp (&inregs
->lr_xmm
[i
], &xmm
, sizeof (xmm
))
200 || memcmp (&inregs
->lr_vector
[i
], &xmm
, sizeof (xmm
)))
203 ymm
= _mm256_set1_epi32 (i
+ 0x101);
204 if (memcmp (&inregs
->lr_xmm
[i
+ 1],
205 &inregs
->lr_vector
[i
+ 1].xmm
[0], sizeof (xmm
))
206 || memcmp (&inregs
->lr_vector
[i
+ 1], &ymm
, sizeof (ymm
)))
210 outregs
->lrv_vector0
.ymm
[0]
211 = (La_x86_64_ymm
) _mm256_set1_epi32 (0x98abcdef);
213 ymm
= _mm256_set1_epi32 (-1);
214 asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm
) : "xmm0" );
215 asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm
) : "xmm1" );