c++: prvalue of array type [PR111286]
[official-gcc.git] / gcc / testsuite / c-c++-common / gomp / pr61486-2.c
blobc86fd9184942c4b0bf85f227b0b2b7ed7bee6525
1 /* PR middle-end/61486 */
2 /* { dg-do compile } */
3 /* { dg-require-effective-target alloca } */
5 #pragma omp declare target
6 void dosomething (int *a, int n, int m);
7 #pragma omp end declare target
9 void
10 test (int n, int o, int p, int q, int r, int s, int *pp)
12 int a[o], i, j;
13 #pragma omp target data device (n + 1) if (n != 6) map (tofrom: n, r)
15 #pragma omp target device (n + 1) if (n != 6) map (from: n) map (alloc: a[2:o-2])
16 dosomething (a, n, 0);
17 #pragma omp target teams device (n + 1) num_teams (n + 4) thread_limit (n * 2) \
18 if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
19 private (p) firstprivate (q) shared (n) reduction (+: r)
21 r = r + 1;
22 p = q;
23 dosomething (a, n, p + q);
25 #pragma omp target teams distribute device (n + 1) num_teams (n + 4) collapse (2) \
26 if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
27 private (p) firstprivate (q) shared (n) reduction (+: r) \
28 thread_limit (n * 2) dist_schedule (static, 4)
29 for (i = 0; i < 10; i++)
30 for (j = 0; j < 10; j++)
32 r = r + 1;
33 p = q;
34 dosomething (a, n, p + q);
36 #pragma omp target teams distribute device (n + 1) num_teams (n + 4) \
37 if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
38 private (p) firstprivate (q) shared (n) reduction (+: r) \
39 thread_limit (n * 2) dist_schedule (static, 4)
40 for (i = 0; i < 10; i++)
41 for (j = 0; j < 10; j++)
43 r = r + 1;
44 p = q;
45 dosomething (a, n, p + q);
47 #pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
48 if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
49 private (p) firstprivate (q) shared (n) reduction (+: r) \
50 thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
51 num_threads (n + 4) proc_bind (spread) lastprivate (s) \
52 schedule (static, 8)
53 for (i = 0; i < 10; i++)
54 for (j = 0; j < 10; j++)
56 r = r + 1;
57 p = q;
58 dosomething (a, n, p + q);
59 p = q;
60 s = i * 10 + j;
62 #pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
63 if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
64 private (p) firstprivate (q) shared (n) reduction (+: r) \
65 thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
66 proc_bind (master) lastprivate (s) schedule (static, 8)
67 for (i = 0; i < 10; i++)
69 for (j = 0; j < 10; j++)
71 r = r + 1;
72 p = q;
73 dosomething (a, n, p + q);
75 p = q;
76 s = i * 10;
78 #pragma omp target teams distribute parallel for simd device (n + 1) \
79 if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
80 private (p) firstprivate (q) shared (n) reduction (+: r) \
81 thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
82 num_threads (n + 4) proc_bind (spread) lastprivate (s) \
83 schedule (static, 8) num_teams (n + 4) safelen(8)
84 for (i = 0; i < 10; i++)
85 for (j = 0; j < 10; j++)
87 r = r + 1;
88 p = q;
89 a[2+i*10+j] = p + q;
90 s = i * 10 + j;
92 #pragma omp target teams distribute parallel for simd device (n + 1) \
93 if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
94 private (p) firstprivate (q) shared (n) reduction (+: r) \
95 thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
96 proc_bind (master) lastprivate (s) schedule (static, 8) \
97 num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
98 for (i = 0; i < 10; i++)
100 r = r + 1;
101 p = q;
102 a[2+i] = p + q;
103 s = i * 10;
105 #pragma omp target teams distribute simd device (n + 1) \
106 if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
107 private (p) firstprivate (q) shared (n) reduction (+: r) \
108 thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
109 lastprivate (s) num_teams (n + 4) safelen(8)
110 for (i = 0; i < 10; i++)
111 for (j = 0; j < 10; j++)
113 r = r + 1;
114 p = q;
115 a[2+i*10+j] = p + q;
116 s = i * 10 + j;
118 #pragma omp target teams distribute simd device (n + 1) \
119 if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
120 private (p) firstprivate (q) shared (n) reduction (+: r) \
121 thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
122 num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
123 for (i = 0; i < 10; i++)
125 r = r + 1;
126 p = q;
127 a[2+i] = p + q;
128 s = i * 10;
130 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
131 #pragma omp teams num_teams (n + 4) thread_limit (n * 2) default(shared) \
132 private (p) firstprivate (q) shared (n) reduction (+: r)
134 r = r + 1;
135 p = q;
136 dosomething (a, n, p + q);
138 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
139 #pragma omp teams distribute num_teams (n + 4) collapse (2) default(shared) \
140 private (p) firstprivate (q) shared (n) reduction (+: r) \
141 thread_limit (n * 2) dist_schedule (static, 4)
142 for (i = 0; i < 10; i++)
143 for (j = 0; j < 10; j++)
145 r = r + 1;
146 p = q;
147 dosomething (a, n, p + q);
149 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
150 #pragma omp teams distribute num_teams (n + 4) default(shared) \
151 private (p) firstprivate (q) shared (n) reduction (+: r) \
152 thread_limit (n * 2) dist_schedule (static, 4)
153 for (i = 0; i < 10; i++)
154 for (j = 0; j < 10; j++)
156 r = r + 1;
157 p = q;
158 dosomething (a, n, p + q);
160 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
161 #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
162 default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
163 thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
164 num_threads (n + 4) proc_bind (spread) lastprivate (s) \
165 schedule (static, 8)
166 for (i = 0; i < 10; i++)
167 for (j = 0; j < 10; j++)
169 r = r + 1;
170 p = q;
171 dosomething (a, n, p + q);
172 p = q;
173 s = i * 10 + j;
175 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
176 #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
177 default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
178 thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
179 proc_bind (master) lastprivate (s) schedule (static, 8)
180 for (i = 0; i < 10; i++)
182 for (j = 0; j < 10; j++)
184 r = r + 1;
185 p = q;
186 dosomething (a, n, p + q);
188 p = q;
189 s = i * 10;
191 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
192 #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
193 private (p) firstprivate (q) shared (n) reduction (+: r) \
194 thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
195 num_threads (n + 4) proc_bind (spread) lastprivate (s) \
196 schedule (static, 8) num_teams (n + 4) safelen(8)
197 for (i = 0; i < 10; i++)
198 for (j = 0; j < 10; j++)
200 r = r + 1;
201 p = q;
202 a[2+i*10+j] = p + q;
203 s = i * 10 + j;
205 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
206 #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
207 private (p) firstprivate (q) shared (n) reduction (+: r) \
208 thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
209 proc_bind (master) lastprivate (s) schedule (static, 8) \
210 num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
211 for (i = 0; i < 10; i++)
213 r = r + 1;
214 p = q;
215 a[2+i] = p + q;
216 s = i * 10;
218 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
219 #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
220 private (p) firstprivate (q) shared (n) reduction (+: r) \
221 thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
222 proc_bind (primary) lastprivate (s) schedule (static, 8) \
223 num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
224 for (i = 0; i < 10; i++)
226 r = r + 1;
227 p = q;
228 a[2+i] = p + q;
229 s = i * 10;
231 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
232 #pragma omp teams distribute simd default(shared) \
233 private (p) firstprivate (q) shared (n) reduction (+: r) \
234 thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
235 lastprivate (s) num_teams (n + 4) safelen(8)
236 for (i = 0; i < 10; i++)
237 for (j = 0; j < 10; j++)
239 r = r + 1;
240 p = q;
241 a[2+i*10+j] = p + q;
242 s = i * 10 + j;
244 #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
245 #pragma omp teams distribute simd default(shared) \
246 private (p) firstprivate (q) shared (n) reduction (+: r) \
247 thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
248 num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
249 for (i = 0; i < 10; i++)
251 r = r + 1;
252 p = q;
253 a[2+i] = p + q;
254 s = i * 10;
256 #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
257 num_teams (n + 4) thread_limit (n * 2)default(shared) shared(n) \
258 private (p) reduction (+: r)
259 #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
260 for (i = 0; i < 10; i++)
261 for (j = 0; j < 10; j++)
263 r = r + 1;
264 p = q;
265 dosomething (a, n, p + q);
267 #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
268 num_teams (n + 4) thread_limit (n * 2) shared(n) private(p) reduction (+ : r) \
269 default(shared)
270 #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
271 for (i = 0; i < 10; i++)
272 for (j = 0; j < 10; j++)
274 r = r + 1;
275 p = q;
276 dosomething (a, n, p + q);
278 #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
279 num_teams (n + 4) thread_limit (n * 2)
280 #pragma omp distribute parallel for if (n != 6) \
281 default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
282 collapse (2) dist_schedule (static, 4) \
283 num_threads (n + 4) proc_bind (spread) lastprivate (s) \
284 schedule (static, 8)
285 for (i = 0; i < 10; i++)
286 for (j = 0; j < 10; j++)
288 r = r + 1;
289 p = q;
290 dosomething (a, n, p + q);
291 p = q;
292 s = i * 10 + j;
294 #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
295 num_teams (n + 4) thread_limit (n * 2)
296 #pragma omp distribute parallel for if (n != 6) \
297 default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
298 num_threads (n + 4) dist_schedule (static, 4) \
299 proc_bind (master) lastprivate (s) schedule (static, 8)
300 for (i = 0; i < 10; i++)
302 for (j = 0; j < 10; j++)
304 r = r + 1;
305 p = q;
306 dosomething (a, n, p + q);
308 p = q;
309 s = i * 10;
311 #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
312 num_teams (n + 4) thread_limit (n * 2)
313 #pragma omp distribute parallel for simd if (n != 6)default(shared) \
314 private (p) firstprivate (q) shared (n) reduction (+: r) \
315 collapse (2) dist_schedule (static, 4) \
316 num_threads (n + 4) proc_bind (spread) lastprivate (s) \
317 schedule (static, 8) safelen(8)
318 for (i = 0; i < 10; i++)
319 for (j = 0; j < 10; j++)
321 r = r + 1;
322 p = q;
323 a[2+i*10+j] = p + q;
324 s = i * 10 + j;
326 #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
327 num_teams (n + 4) thread_limit (n * 2)
328 #pragma omp distribute parallel for simd if (n != 6)default(shared) \
329 private (p) firstprivate (q) shared (n) reduction (+: r) \
330 num_threads (n + 4) dist_schedule (static, 4) \
331 proc_bind (master) lastprivate (s) schedule (static, 8) \
332 safelen(16) linear(i:1) aligned (pp:4)
333 for (i = 0; i < 10; i++)
335 r = r + 1;
336 p = q;
337 a[2+i] = p + q;
338 s = i * 10;
340 #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
341 num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
342 reduction(+:r)
343 #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
344 collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
345 for (i = 0; i < 10; i++)
346 for (j = 0; j < 10; j++)
348 r = r + 1;
349 p = q;
350 a[2+i*10+j] = p + q;
351 s = i * 10 + j;
353 #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
354 num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
355 reduction(+:r)
356 #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
357 lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
358 for (i = 0; i < 10; i++)
360 r = r + 1;
361 p = q;
362 a[2+i] = p + q;
363 s = i * 10;
368 int q, i, j;
370 #pragma omp declare target
371 int s;
373 void
374 test2 (int n, int o, int p, int r, int *pp)
376 int a[o];
377 #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
378 for (i = 0; i < 10; i++)
379 for (j = 0; j < 10; j++)
381 r = r + 1;
382 p = q;
383 dosomething (a, n, p + q);
385 #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
386 for (i = 0; i < 10; i++)
387 for (j = 0; j < 10; j++)
389 r = r + 1;
390 p = q;
391 dosomething (a, n, p + q);
393 #pragma omp distribute parallel for if (n != 6) \
394 default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
395 collapse (2) dist_schedule (static, 4) \
396 num_threads (n + 4) proc_bind (spread) lastprivate (s) \
397 schedule (static, 8)
398 for (i = 0; i < 10; i++)
399 for (j = 0; j < 10; j++)
401 r = r + 1;
402 p = q;
403 dosomething (a, n, p + q);
404 p = q;
405 s = i * 10 + j;
407 #pragma omp distribute parallel for if (n != 6) \
408 default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
409 num_threads (n + 4) dist_schedule (static, 4) \
410 proc_bind (master) lastprivate (s) schedule (static, 8)
411 for (i = 0; i < 10; i++)
413 for (j = 0; j < 10; j++)
415 r = r + 1;
416 p = q;
417 dosomething (a, n, p + q);
419 p = q;
420 s = i * 10;
422 #pragma omp distribute parallel for simd if (n != 6)default(shared) \
423 private (p) firstprivate (q) shared (n) reduction (+: r) \
424 collapse (2) dist_schedule (static, 4) \
425 num_threads (n + 4) proc_bind (spread) lastprivate (s) \
426 schedule (static, 8) safelen(8)
427 for (i = 0; i < 10; i++)
428 for (j = 0; j < 10; j++)
430 r = r + 1;
431 p = q;
432 a[2+i*10+j] = p + q;
433 s = i * 10 + j;
435 #pragma omp distribute parallel for simd if (n != 6)default(shared) \
436 private (p) firstprivate (q) shared (n) reduction (+: r) \
437 num_threads (n + 4) dist_schedule (static, 4) \
438 proc_bind (master) lastprivate (s) schedule (static, 8) \
439 safelen(16) linear(i:1) aligned (pp:4)
440 for (i = 0; i < 10; i++)
442 r = r + 1;
443 p = q;
444 a[2+i] = p + q;
445 s = i * 10;
447 #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
448 collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
449 for (i = 0; i < 10; i++)
450 for (j = 0; j < 10; j++)
452 r = r + 1;
453 p = q;
454 a[2+i*10+j] = p + q;
455 s = i * 10 + j;
457 #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
458 lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
459 for (i = 0; i < 10; i++)
461 r = r + 1;
462 p = q;
463 a[2+i] = p + q;
464 s = i * 10;
467 #pragma omp end declare target