Remove old autovect-branch by moving to "dead" directory.
[official-gcc.git] / old-autovect-branch / gcc / testsuite / gcc.c-torture / execute / builtins / mempcpy-chk.c
bloba59d59bd85deacf435c15bcd07510c8523269aef
1 /* Copyright (C) 2004, 2005 Free Software Foundation.
3 Ensure builtin __mempcpy_chk performs correctly. */
5 extern void abort (void);
6 typedef __SIZE_TYPE__ size_t;
7 extern size_t strlen(const char *);
8 extern void *memcpy (void *, const void *, size_t);
9 extern void *mempcpy (void *, const void *, size_t);
10 extern int memcmp (const void *, const void *, size_t);
12 #include "chk.h"
14 const char s1[] = "123";
15 char p[32] = "";
16 char *s2 = "defg";
17 char *s3 = "FGH";
18 size_t l1 = 1;
20 void
21 __attribute__((noinline))
22 test1 (void)
24 int i;
26 #if defined __i386__ || defined __x86_64__
27 /* The functions below might not be optimized into direct stores on all
28 arches. It depends on how many instructions would be generated and
29 what limits the architecture chooses in STORE_BY_PIECES_P. */
30 mempcpy_disallowed = 1;
31 #endif
33 /* All the mempcpy calls in this routine except last have fixed length, so
34 object size checking should be done at compile time if optimizing. */
35 chk_calls = 0;
37 if (mempcpy (p, "ABCDE", 6) != p + 6 || memcmp (p, "ABCDE", 6))
38 abort ();
39 if (mempcpy (p + 16, "VWX" + 1, 2) != p + 16 + 2
40 || memcmp (p + 16, "WX\0\0", 5))
41 abort ();
42 if (mempcpy (p + 1, "", 1) != p + 1 + 1 || memcmp (p, "A\0CDE", 6))
43 abort ();
44 if (mempcpy (p + 3, "FGHI", 4) != p + 3 + 4 || memcmp (p, "A\0CFGHI", 8))
45 abort ();
47 i = 8;
48 memcpy (p + 20, "qrstu", 6);
49 memcpy (p + 25, "QRSTU", 6);
50 if (mempcpy (p + 25 + 1, s1, 3) != (p + 25 + 1 + 3)
51 || memcmp (p + 25, "Q123U", 6))
52 abort ();
54 if (mempcpy (mempcpy (p, "abcdEFG", 4), "efg", 4) != p + 8
55 || memcmp (p, "abcdefg", 8))
56 abort();
58 /* Test at least one instance of the __builtin_ style. We do this
59 to ensure that it works and that the prototype is correct. */
60 if (__builtin_mempcpy (p, "ABCDE", 6) != p + 6 || memcmp (p, "ABCDE", 6))
61 abort ();
63 /* If the result of mempcpy is ignored, gcc should use memcpy.
64 This should be optimized always, so disallow mempcpy calls. */
65 mempcpy_disallowed = 1;
66 mempcpy (p + 5, s3, 1);
67 if (memcmp (p, "ABCDEFg", 8))
68 abort ();
70 if (chk_calls)
71 abort ();
72 chk_calls = 0;
74 mempcpy (p + 6, s1 + 1, l1);
75 if (memcmp (p, "ABCDEF2", 8))
76 abort ();
78 /* The above mempcpy copies into an object with known size, but
79 unknown length and with result ignored, so it should be a
80 __memcpy_chk call. */
81 if (chk_calls != 1)
82 abort ();
84 mempcpy_disallowed = 0;
87 long buf1[64];
88 char *buf2 = (char *) (buf1 + 32);
89 long buf5[20];
90 char buf7[20];
92 void
93 __attribute__((noinline))
94 test2_sub (long *buf3, char *buf4, char *buf6, int n)
96 int i = 0;
98 /* All the mempcpy/__builtin_mempcpy/__builtin___mempcpy_chk
99 calls in this routine are either fixed length, or have
100 side-effects in __builtin_object_size arguments, or
101 dst doesn't point into a known object. */
102 chk_calls = 0;
104 /* These should probably be handled by store_by_pieces on most arches. */
105 if (mempcpy (buf1, "ABCDEFGHI", 9) != (char *) buf1 + 9
106 || memcmp (buf1, "ABCDEFGHI\0", 11))
107 abort ();
109 if (mempcpy (buf1, "abcdefghijklmnopq", 17) != (char *) buf1 + 17
110 || memcmp (buf1, "abcdefghijklmnopq\0", 19))
111 abort ();
113 if (__builtin_mempcpy (buf3, "ABCDEF", 6) != (char *) buf1 + 6
114 || memcmp (buf1, "ABCDEFghijklmnopq\0", 19))
115 abort ();
117 if (__builtin_mempcpy (buf3, "a", 1) != (char *) buf1 + 1
118 || memcmp (buf1, "aBCDEFghijklmnopq\0", 19))
119 abort ();
121 if (mempcpy ((char *) buf3 + 2, "bcd" + ++i, 2) != (char *) buf1 + 4
122 || memcmp (buf1, "aBcdEFghijklmnopq\0", 19)
123 || i != 1)
124 abort ();
126 /* These should probably be handled by move_by_pieces on most arches. */
127 if (mempcpy ((char *) buf3 + 4, buf5, 6) != (char *) buf1 + 10
128 || memcmp (buf1, "aBcdRSTUVWklmnopq\0", 19))
129 abort ();
131 if (__builtin_mempcpy ((char *) buf1 + ++i + 8, (char *) buf5 + 1, 1)
132 != (char *) buf1 + 11
133 || memcmp (buf1, "aBcdRSTUVWSlmnopq\0", 19)
134 || i != 2)
135 abort ();
137 if (mempcpy ((char *) buf3 + 14, buf6, 2) != (char *) buf1 + 16
138 || memcmp (buf1, "aBcdRSTUVWSlmnrsq\0", 19))
139 abort ();
141 if (mempcpy (buf3, buf5, 8) != (char *) buf1 + 8
142 || memcmp (buf1, "RSTUVWXYVWSlmnrsq\0", 19))
143 abort ();
145 if (mempcpy (buf3, buf5, 17) != (char *) buf1 + 17
146 || memcmp (buf1, "RSTUVWXYZ01234567\0", 19))
147 abort ();
149 __builtin_memcpy (buf3, "aBcdEFghijklmnopq\0", 19);
151 /* These should be handled either by movmemendM or mempcpy
152 call. */
154 /* buf3 points to an unknown object, so __mempcpy_chk should not be done. */
155 if (mempcpy ((char *) buf3 + 4, buf5, n + 6) != (char *) buf1 + 10
156 || memcmp (buf1, "aBcdRSTUVWklmnopq\0", 19))
157 abort ();
159 /* This call has side-effects in dst, therefore no checking. */
160 if (__builtin___mempcpy_chk ((char *) buf1 + ++i + 8, (char *) buf5 + 1,
161 n + 1, os ((char *) buf1 + ++i + 8))
162 != (char *) buf1 + 12
163 || memcmp (buf1, "aBcdRSTUVWkSmnopq\0", 19)
164 || i != 3)
165 abort ();
167 if (mempcpy ((char *) buf3 + 14, buf6, n + 2) != (char *) buf1 + 16
168 || memcmp (buf1, "aBcdRSTUVWkSmnrsq\0", 19))
169 abort ();
171 i = 1;
173 /* These might be handled by store_by_pieces. */
174 if (mempcpy (buf2, "ABCDEFGHI", 9) != buf2 + 9
175 || memcmp (buf2, "ABCDEFGHI\0", 11))
176 abort ();
178 if (mempcpy (buf2, "abcdefghijklmnopq", 17) != buf2 + 17
179 || memcmp (buf2, "abcdefghijklmnopq\0", 19))
180 abort ();
182 if (__builtin_mempcpy (buf4, "ABCDEF", 6) != buf2 + 6
183 || memcmp (buf2, "ABCDEFghijklmnopq\0", 19))
184 abort ();
186 if (__builtin_mempcpy (buf4, "a", 1) != buf2 + 1
187 || memcmp (buf2, "aBCDEFghijklmnopq\0", 19))
188 abort ();
190 if (mempcpy (buf4 + 2, "bcd" + i++, 2) != buf2 + 4
191 || memcmp (buf2, "aBcdEFghijklmnopq\0", 19)
192 || i != 2)
193 abort ();
195 /* These might be handled by move_by_pieces. */
196 if (mempcpy (buf4 + 4, buf7, 6) != buf2 + 10
197 || memcmp (buf2, "aBcdRSTUVWklmnopq\0", 19))
198 abort ();
200 /* Side effect. */
201 if (__builtin___mempcpy_chk (buf2 + i++ + 8, buf7 + 1, 1,
202 os (buf2 + i++ + 8))
203 != buf2 + 11
204 || memcmp (buf2, "aBcdRSTUVWSlmnopq\0", 19)
205 || i != 3)
206 abort ();
208 if (mempcpy (buf4 + 14, buf6, 2) != buf2 + 16
209 || memcmp (buf2, "aBcdRSTUVWSlmnrsq\0", 19))
210 abort ();
212 __builtin_memcpy (buf4, "aBcdEFghijklmnopq\0", 19);
214 /* These should be handled either by movmemendM or mempcpy
215 call. */
216 if (mempcpy (buf4 + 4, buf7, n + 6) != buf2 + 10
217 || memcmp (buf2, "aBcdRSTUVWklmnopq\0", 19))
218 abort ();
220 /* Side effect. */
221 if (__builtin___mempcpy_chk (buf2 + i++ + 8, buf7 + 1,
222 n + 1, os (buf2 + i++ + 8))
223 != buf2 + 12
224 || memcmp (buf2, "aBcdRSTUVWkSmnopq\0", 19)
225 || i != 4)
226 abort ();
228 if (mempcpy (buf4 + 14, buf6, n + 2) != buf2 + 16
229 || memcmp (buf2, "aBcdRSTUVWkSmnrsq\0", 19))
230 abort ();
232 if (chk_calls)
233 abort ();
236 void
237 __attribute__((noinline))
238 test2 (void)
240 long *x;
241 char *y;
242 int z;
243 __builtin_memcpy (buf5, "RSTUVWXYZ0123456789", 20);
244 __builtin_memcpy (buf7, "RSTUVWXYZ0123456789", 20);
245 __asm ("" : "=r" (x) : "0" (buf1));
246 __asm ("" : "=r" (y) : "0" (buf2));
247 __asm ("" : "=r" (z) : "0" (0));
248 test2_sub (x, y, "rstuvwxyz", z);
251 volatile void *vx;
253 /* Test whether compile time checking is done where it should
254 and so is runtime object size checking. */
255 void
256 __attribute__((noinline))
257 test3 (void)
259 struct A { char buf1[10]; char buf2[10]; } a;
260 char *r = l1 == 1 ? &a.buf1[5] : &a.buf2[4];
261 char buf3[20];
262 int i;
263 size_t l;
265 /* The following calls should do runtime checking
266 - length is not known, but destination is. */
267 chk_calls = 0;
268 vx = mempcpy (a.buf1 + 2, s3, l1);
269 vx = mempcpy (r, s3, l1 + 1);
270 r = l1 == 1 ? __builtin_alloca (4) : &a.buf2[7];
271 vx = mempcpy (r, s2, l1 + 2);
272 vx = mempcpy (r + 2, s3, l1);
273 r = buf3;
274 for (i = 0; i < 4; ++i)
276 if (i == l1 - 1)
277 r = &a.buf1[1];
278 else if (i == l1)
279 r = &a.buf2[7];
280 else if (i == l1 + 1)
281 r = &buf3[5];
282 else if (i == l1 + 2)
283 r = &a.buf1[9];
285 vx = mempcpy (r, s2, l1);
286 if (chk_calls != 5)
287 abort ();
289 /* Following have known destination and known length,
290 so if optimizing certainly shouldn't result in the checking
291 variants. */
292 chk_calls = 0;
293 vx = mempcpy (a.buf1 + 2, s3, 1);
294 vx = mempcpy (r, s3, 2);
295 r = l1 == 1 ? __builtin_alloca (4) : &a.buf2[7];
296 vx = mempcpy (r, s2, 3);
297 r = buf3;
298 l = 4;
299 for (i = 0; i < 4; ++i)
301 if (i == l1 - 1)
302 r = &a.buf1[1], l = 2;
303 else if (i == l1)
304 r = &a.buf2[7], l = 3;
305 else if (i == l1 + 1)
306 r = &buf3[5], l = 4;
307 else if (i == l1 + 2)
308 r = &a.buf1[9], l = 1;
310 vx = mempcpy (r, s2, 1);
311 /* Here, l is known to be at most 4 and __builtin_object_size (&buf3[16], 0)
312 is 4, so this doesn't need runtime checking. */
313 vx = mempcpy (&buf3[16], s2, l);
314 if (chk_calls)
315 abort ();
316 chk_calls = 0;
319 /* Test whether runtime and/or compile time checking catches
320 buffer overflows. */
321 void
322 __attribute__((noinline))
323 test4 (void)
325 struct A { char buf1[10]; char buf2[10]; } a;
326 char buf3[20];
328 chk_fail_allowed = 1;
329 /* Runtime checks. */
330 if (__builtin_setjmp (chk_fail_buf) == 0)
332 vx = mempcpy (&a.buf2[9], s2, l1 + 1);
333 abort ();
335 if (__builtin_setjmp (chk_fail_buf) == 0)
337 vx = mempcpy (&a.buf2[7], s3, strlen (s3) + 1);
338 abort ();
340 /* This should be detectable at compile time already. */
341 if (__builtin_setjmp (chk_fail_buf) == 0)
343 vx = mempcpy (&buf3[19], "ab", 2);
344 abort ();
346 chk_fail_allowed = 0;
349 #ifndef MAX_OFFSET
350 #define MAX_OFFSET (sizeof (long long))
351 #endif
353 #ifndef MAX_COPY
354 #define MAX_COPY (10 * sizeof (long long))
355 #endif
357 #ifndef MAX_EXTRA
358 #define MAX_EXTRA (sizeof (long long))
359 #endif
361 #define MAX_LENGTH (MAX_OFFSET + MAX_COPY + MAX_EXTRA)
363 /* Use a sequence length that is not divisible by two, to make it more
364 likely to detect when words are mixed up. */
365 #define SEQUENCE_LENGTH 31
367 static union {
368 char buf[MAX_LENGTH];
369 long long align_int;
370 long double align_fp;
371 } u1, u2;
373 void
374 __attribute__((noinline))
375 test5 (void)
377 int off1, off2, len, i;
378 char *p, *q, c;
380 for (off1 = 0; off1 < MAX_OFFSET; off1++)
381 for (off2 = 0; off2 < MAX_OFFSET; off2++)
382 for (len = 1; len < MAX_COPY; len++)
384 for (i = 0, c = 'A'; i < MAX_LENGTH; i++, c++)
386 u1.buf[i] = 'a';
387 if (c >= 'A' + SEQUENCE_LENGTH)
388 c = 'A';
389 u2.buf[i] = c;
392 p = mempcpy (u1.buf + off1, u2.buf + off2, len);
393 if (p != u1.buf + off1 + len)
394 abort ();
396 q = u1.buf;
397 for (i = 0; i < off1; i++, q++)
398 if (*q != 'a')
399 abort ();
401 for (i = 0, c = 'A' + off2; i < len; i++, q++, c++)
403 if (c >= 'A' + SEQUENCE_LENGTH)
404 c = 'A';
405 if (*q != c)
406 abort ();
409 for (i = 0; i < MAX_EXTRA; i++, q++)
410 if (*q != 'a')
411 abort ();
415 #define TESTSIZE 80
417 char srcb[TESTSIZE] __attribute__ ((aligned));
418 char dstb[TESTSIZE] __attribute__ ((aligned));
420 void
421 __attribute__((noinline))
422 check (char *test, char *match, int n)
424 if (memcmp (test, match, n))
425 abort ();
428 #define TN(n) \
429 { memset (dstb, 0, n); vx = mempcpy (dstb, srcb, n); check (dstb, srcb, n); }
430 #define T(n) \
431 TN (n) \
432 TN ((n) + 1) \
433 TN ((n) + 2) \
434 TN ((n) + 3)
436 void
437 __attribute__((noinline))
438 test6 (void)
440 int i;
442 chk_calls = 0;
444 for (i = 0; i < sizeof (srcb); ++i)
445 srcb[i] = 'a' + i % 26;
447 T (0);
448 T (4);
449 T (8);
450 T (12);
451 T (16);
452 T (20);
453 T (24);
454 T (28);
455 T (32);
456 T (36);
457 T (40);
458 T (44);
459 T (48);
460 T (52);
461 T (56);
462 T (60);
463 T (64);
464 T (68);
465 T (72);
466 T (76);
468 /* All mempcpy calls in this routine have constant arguments. */
469 if (chk_calls)
470 abort ();
473 void
474 main_test (void)
476 #ifndef __OPTIMIZE__
477 /* Object size checking is only intended for -O[s123]. */
478 return;
479 #endif
480 __asm ("" : "=r" (l1) : "0" (l1));
481 test1 ();
482 test2 ();
483 test3 ();
484 test4 ();
485 test5 ();
486 test6 ();