PR rtl-optimization/88470
[official-gcc.git] / gcc / c-family / c-omp.c
blobc7d44560da5081b6fc819f15fb724218bd3b6398
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2018 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "options.h"
28 #include "c-common.h"
29 #include "gimple-expr.h"
30 #include "c-pragma.h"
31 #include "stringpool.h"
32 #include "omp-general.h"
33 #include "gomp-constants.h"
34 #include "memmodel.h"
37 /* Complete a #pragma oacc wait construct. LOC is the location of
38 the #pragma. */
40 tree
41 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
43 const int nparms = list_length (parms);
44 tree stmt, t;
45 vec<tree, va_gc> *args;
47 vec_alloc (args, nparms + 2);
48 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
50 if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC))
51 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
52 else
53 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
55 args->quick_push (t);
56 args->quick_push (build_int_cst (integer_type_node, nparms));
58 for (t = parms; t; t = TREE_CHAIN (t))
60 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
61 args->quick_push (build_int_cst (integer_type_node,
62 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
63 else
64 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
67 stmt = build_call_expr_loc_vec (loc, stmt, args);
69 vec_free (args);
71 return stmt;
74 /* Complete a #pragma omp master construct. STMT is the structured-block
75 that follows the pragma. LOC is the location of the #pragma. */
77 tree
78 c_finish_omp_master (location_t loc, tree stmt)
80 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
81 SET_EXPR_LOCATION (t, loc);
82 return t;
85 /* Complete a #pragma omp taskgroup construct. BODY is the structured-block
86 that follows the pragma. LOC is the location of the #pragma. */
88 tree
89 c_finish_omp_taskgroup (location_t loc, tree body, tree clauses)
91 tree stmt = make_node (OMP_TASKGROUP);
92 TREE_TYPE (stmt) = void_type_node;
93 OMP_TASKGROUP_BODY (stmt) = body;
94 OMP_TASKGROUP_CLAUSES (stmt) = clauses;
95 SET_EXPR_LOCATION (stmt, loc);
96 return add_stmt (stmt);
99 /* Complete a #pragma omp critical construct. BODY is the structured-block
100 that follows the pragma, NAME is the identifier in the pragma, or null
101 if it was omitted. LOC is the location of the #pragma. */
103 tree
104 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses)
106 tree stmt = make_node (OMP_CRITICAL);
107 TREE_TYPE (stmt) = void_type_node;
108 OMP_CRITICAL_BODY (stmt) = body;
109 OMP_CRITICAL_NAME (stmt) = name;
110 OMP_CRITICAL_CLAUSES (stmt) = clauses;
111 SET_EXPR_LOCATION (stmt, loc);
112 return add_stmt (stmt);
115 /* Complete a #pragma omp ordered construct. STMT is the structured-block
116 that follows the pragma. LOC is the location of the #pragma. */
118 tree
119 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt)
121 tree t = make_node (OMP_ORDERED);
122 TREE_TYPE (t) = void_type_node;
123 OMP_ORDERED_BODY (t) = stmt;
124 if (!flag_openmp /* flag_openmp_simd */
125 && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD
126 || OMP_CLAUSE_CHAIN (clauses)))
127 clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD);
128 OMP_ORDERED_CLAUSES (t) = clauses;
129 SET_EXPR_LOCATION (t, loc);
130 return add_stmt (t);
134 /* Complete a #pragma omp barrier construct. LOC is the location of
135 the #pragma. */
137 void
138 c_finish_omp_barrier (location_t loc)
140 tree x;
142 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
143 x = build_call_expr_loc (loc, x, 0);
144 add_stmt (x);
148 /* Complete a #pragma omp taskwait construct. LOC is the location of the
149 pragma. */
151 void
152 c_finish_omp_taskwait (location_t loc)
154 tree x;
156 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
157 x = build_call_expr_loc (loc, x, 0);
158 add_stmt (x);
162 /* Complete a #pragma omp taskyield construct. LOC is the location of the
163 pragma. */
165 void
166 c_finish_omp_taskyield (location_t loc)
168 tree x;
170 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
171 x = build_call_expr_loc (loc, x, 0);
172 add_stmt (x);
176 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
177 the expression to be implemented atomically is LHS opcode= RHS.
178 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
179 opcode= RHS with the new or old content of LHS returned.
180 LOC is the location of the atomic statement. The value returned
181 is either error_mark_node (if the construct was erroneous) or an
182 OMP_ATOMIC* node which should be added to the current statement
183 tree with add_stmt. If TEST is set, avoid calling save_expr
184 or create_tmp_var*. */
186 tree
187 c_finish_omp_atomic (location_t loc, enum tree_code code,
188 enum tree_code opcode, tree lhs, tree rhs,
189 tree v, tree lhs1, tree rhs1, bool swapped,
190 enum omp_memory_order memory_order, bool test)
192 tree x, type, addr, pre = NULL_TREE;
193 HOST_WIDE_INT bitpos = 0, bitsize = 0;
195 if (lhs == error_mark_node || rhs == error_mark_node
196 || v == error_mark_node || lhs1 == error_mark_node
197 || rhs1 == error_mark_node)
198 return error_mark_node;
200 /* ??? According to one reading of the OpenMP spec, complex type are
201 supported, but there are no atomic stores for any architecture.
202 But at least icc 9.0 doesn't support complex types here either.
203 And lets not even talk about vector types... */
204 type = TREE_TYPE (lhs);
205 if (!INTEGRAL_TYPE_P (type)
206 && !POINTER_TYPE_P (type)
207 && !SCALAR_FLOAT_TYPE_P (type))
209 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
210 return error_mark_node;
212 if (TYPE_ATOMIC (type))
214 error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>");
215 return error_mark_node;
218 if (opcode == RDIV_EXPR)
219 opcode = TRUNC_DIV_EXPR;
221 /* ??? Validate that rhs does not overlap lhs. */
222 tree blhs = NULL;
223 if (TREE_CODE (lhs) == COMPONENT_REF
224 && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL
225 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1))
226 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1)))
228 tree field = TREE_OPERAND (lhs, 1);
229 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
230 if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))
231 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)))
232 bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field))
233 - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT;
234 else
235 bitpos = 0;
236 bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
237 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
238 gcc_assert (tree_fits_shwi_p (DECL_SIZE (field)));
239 bitsize = tree_to_shwi (DECL_SIZE (field));
240 blhs = lhs;
241 type = TREE_TYPE (repr);
242 lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0),
243 repr, TREE_OPERAND (lhs, 2));
246 /* Take and save the address of the lhs. From then on we'll reference it
247 via indirection. */
248 addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
249 if (addr == error_mark_node)
250 return error_mark_node;
251 if (!test)
252 addr = save_expr (addr);
253 if (!test
254 && TREE_CODE (addr) != SAVE_EXPR
255 && (TREE_CODE (addr) != ADDR_EXPR
256 || !VAR_P (TREE_OPERAND (addr, 0))))
258 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
259 it even after unsharing function body. */
260 tree var = create_tmp_var_raw (TREE_TYPE (addr));
261 DECL_CONTEXT (var) = current_function_decl;
262 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
264 tree orig_lhs = lhs;
265 lhs = build_indirect_ref (loc, addr, RO_NULL);
266 tree new_lhs = lhs;
268 if (code == OMP_ATOMIC_READ)
270 x = build1 (OMP_ATOMIC_READ, type, addr);
271 SET_EXPR_LOCATION (x, loc);
272 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
273 if (blhs)
274 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
275 bitsize_int (bitsize), bitsize_int (bitpos));
276 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
277 loc, x, NULL_TREE);
280 /* There are lots of warnings, errors, and conversions that need to happen
281 in the course of interpreting a statement. Use the normal mechanisms
282 to do this, and then take it apart again. */
283 if (blhs)
285 lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs,
286 bitsize_int (bitsize), bitsize_int (bitpos));
287 if (swapped)
288 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
289 else if (opcode != NOP_EXPR)
290 rhs = build_binary_op (loc, opcode, lhs, rhs, true);
291 opcode = NOP_EXPR;
293 else if (swapped)
295 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
296 opcode = NOP_EXPR;
298 bool save = in_late_binary_op;
299 in_late_binary_op = true;
300 x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode,
301 loc, rhs, NULL_TREE);
302 in_late_binary_op = save;
303 if (x == error_mark_node)
304 return error_mark_node;
305 if (TREE_CODE (x) == COMPOUND_EXPR)
307 pre = TREE_OPERAND (x, 0);
308 gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
309 x = TREE_OPERAND (x, 1);
311 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
312 rhs = TREE_OPERAND (x, 1);
314 if (blhs)
315 rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs,
316 rhs, bitsize_int (bitpos));
318 /* Punt the actual generation of atomic operations to common code. */
319 if (code == OMP_ATOMIC)
320 type = void_type_node;
321 x = build2 (code, type, addr, rhs);
322 SET_EXPR_LOCATION (x, loc);
323 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
325 /* Generally it is hard to prove lhs1 and lhs are the same memory
326 location, just diagnose different variables. */
327 if (rhs1
328 && VAR_P (rhs1)
329 && VAR_P (orig_lhs)
330 && rhs1 != orig_lhs
331 && !test)
333 if (code == OMP_ATOMIC)
334 error_at (loc, "%<#pragma omp atomic update%> uses two different "
335 "variables for memory");
336 else
337 error_at (loc, "%<#pragma omp atomic capture%> uses two different "
338 "variables for memory");
339 return error_mark_node;
342 if (lhs1
343 && lhs1 != orig_lhs
344 && TREE_CODE (lhs1) == COMPONENT_REF
345 && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL
346 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1))
347 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1)))
349 tree field = TREE_OPERAND (lhs1, 1);
350 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
351 lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0),
352 repr, TREE_OPERAND (lhs1, 2));
354 if (rhs1
355 && rhs1 != orig_lhs
356 && TREE_CODE (rhs1) == COMPONENT_REF
357 && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL
358 && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1))
359 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1)))
361 tree field = TREE_OPERAND (rhs1, 1);
362 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
363 rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0),
364 repr, TREE_OPERAND (rhs1, 2));
367 if (code != OMP_ATOMIC)
369 /* Generally it is hard to prove lhs1 and lhs are the same memory
370 location, just diagnose different variables. */
371 if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs))
373 if (lhs1 != orig_lhs && !test)
375 error_at (loc, "%<#pragma omp atomic capture%> uses two "
376 "different variables for memory");
377 return error_mark_node;
380 if (blhs)
381 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
382 bitsize_int (bitsize), bitsize_int (bitpos));
383 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
384 loc, x, NULL_TREE);
385 if (rhs1 && rhs1 != orig_lhs)
387 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
388 if (rhs1addr == error_mark_node)
389 return error_mark_node;
390 x = omit_one_operand_loc (loc, type, x, rhs1addr);
392 if (lhs1 && lhs1 != orig_lhs)
394 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false);
395 if (lhs1addr == error_mark_node)
396 return error_mark_node;
397 if (code == OMP_ATOMIC_CAPTURE_OLD)
398 x = omit_one_operand_loc (loc, type, x, lhs1addr);
399 else
401 if (!test)
402 x = save_expr (x);
403 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
407 else if (rhs1 && rhs1 != orig_lhs)
409 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
410 if (rhs1addr == error_mark_node)
411 return error_mark_node;
412 x = omit_one_operand_loc (loc, type, x, rhs1addr);
415 if (pre)
416 x = omit_one_operand_loc (loc, type, x, pre);
417 return x;
421 /* Return true if TYPE is the implementation's omp_depend_t. */
423 bool
424 c_omp_depend_t_p (tree type)
426 type = TYPE_MAIN_VARIANT (type);
427 return (TREE_CODE (type) == RECORD_TYPE
428 && TYPE_NAME (type)
429 && ((TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
430 ? DECL_NAME (TYPE_NAME (type)) : TYPE_NAME (type))
431 == get_identifier ("omp_depend_t"))
432 && (!TYPE_CONTEXT (type)
433 || TREE_CODE (TYPE_CONTEXT (type)) == TRANSLATION_UNIT_DECL)
434 && COMPLETE_TYPE_P (type)
435 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
436 && !compare_tree_int (TYPE_SIZE (type),
437 2 * tree_to_uhwi (TYPE_SIZE (ptr_type_node))));
441 /* Complete a #pragma omp depobj construct. LOC is the location of the
442 #pragma. */
444 void
445 c_finish_omp_depobj (location_t loc, tree depobj,
446 enum omp_clause_depend_kind kind, tree clause)
448 tree t = NULL_TREE;
449 if (!error_operand_p (depobj))
451 if (!c_omp_depend_t_p (TREE_TYPE (depobj)))
453 error_at (EXPR_LOC_OR_LOC (depobj, loc),
454 "type of %<depobj%> expression is not %<omp_depend_t%>");
455 depobj = error_mark_node;
457 else if (TYPE_READONLY (TREE_TYPE (depobj)))
459 error_at (EXPR_LOC_OR_LOC (depobj, loc),
460 "%<const%> qualified %<depobj%> expression");
461 depobj = error_mark_node;
464 else
465 depobj = error_mark_node;
467 if (clause == error_mark_node)
468 return;
470 if (clause)
472 gcc_assert (TREE_CODE (clause) == OMP_CLAUSE
473 && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_DEPEND);
474 if (OMP_CLAUSE_CHAIN (clause))
475 error_at (OMP_CLAUSE_LOCATION (clause),
476 "more than one locator in %<depend%> clause on %<depobj%> "
477 "construct");
478 switch (OMP_CLAUSE_DEPEND_KIND (clause))
480 case OMP_CLAUSE_DEPEND_DEPOBJ:
481 error_at (OMP_CLAUSE_LOCATION (clause),
482 "%<depobj%> dependence type specified in %<depend%> "
483 "clause on %<depobj%> construct");
484 return;
485 case OMP_CLAUSE_DEPEND_SOURCE:
486 case OMP_CLAUSE_DEPEND_SINK:
487 error_at (OMP_CLAUSE_LOCATION (clause),
488 "%<depend(%s)%> is only allowed in %<omp ordered%>",
489 OMP_CLAUSE_DEPEND_KIND (clause) == OMP_CLAUSE_DEPEND_SOURCE
490 ? "source" : "sink");
491 return;
492 case OMP_CLAUSE_DEPEND_IN:
493 case OMP_CLAUSE_DEPEND_OUT:
494 case OMP_CLAUSE_DEPEND_INOUT:
495 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
496 kind = OMP_CLAUSE_DEPEND_KIND (clause);
497 t = OMP_CLAUSE_DECL (clause);
498 gcc_assert (t);
499 if (TREE_CODE (t) == TREE_LIST
500 && TREE_PURPOSE (t)
501 && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
503 error_at (OMP_CLAUSE_LOCATION (clause),
504 "%<iterator%> modifier may not be specified on "
505 "%<depobj%> construct");
506 return;
508 if (TREE_CODE (t) == COMPOUND_EXPR)
510 tree t1 = build_fold_addr_expr (TREE_OPERAND (t, 1));
511 t = build2 (COMPOUND_EXPR, TREE_TYPE (t1), TREE_OPERAND (t, 0),
512 t1);
514 else
515 t = build_fold_addr_expr (t);
516 break;
517 default:
518 gcc_unreachable ();
521 else
522 gcc_assert (kind != OMP_CLAUSE_DEPEND_SOURCE);
524 if (depobj == error_mark_node)
525 return;
527 depobj = build_fold_addr_expr_loc (EXPR_LOC_OR_LOC (depobj, loc), depobj);
528 tree dtype
529 = build_pointer_type_for_mode (ptr_type_node, TYPE_MODE (ptr_type_node),
530 true);
531 depobj = fold_convert (dtype, depobj);
532 tree r;
533 if (clause)
535 depobj = save_expr (depobj);
536 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
537 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
539 int k;
540 switch (kind)
542 case OMP_CLAUSE_DEPEND_IN:
543 k = GOMP_DEPEND_IN;
544 break;
545 case OMP_CLAUSE_DEPEND_OUT:
546 k = GOMP_DEPEND_OUT;
547 break;
548 case OMP_CLAUSE_DEPEND_INOUT:
549 k = GOMP_DEPEND_INOUT;
550 break;
551 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
552 k = GOMP_DEPEND_MUTEXINOUTSET;
553 break;
554 case OMP_CLAUSE_DEPEND_LAST:
555 k = -1;
556 break;
557 default:
558 gcc_unreachable ();
560 t = build_int_cst (ptr_type_node, k);
561 depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj,
562 TYPE_SIZE_UNIT (ptr_type_node));
563 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
564 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
568 /* Complete a #pragma omp flush construct. We don't do anything with
569 the variable list that the syntax allows. LOC is the location of
570 the #pragma. */
572 void
573 c_finish_omp_flush (location_t loc, int mo)
575 tree x;
577 if (mo == MEMMODEL_LAST)
579 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
580 x = build_call_expr_loc (loc, x, 0);
582 else
584 x = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE);
585 x = build_call_expr_loc (loc, x, 1,
586 build_int_cst (integer_type_node, mo));
588 add_stmt (x);
592 /* Check and canonicalize OMP_FOR increment expression.
593 Helper function for c_finish_omp_for. */
595 static tree
596 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
598 tree t;
600 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
601 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
602 return error_mark_node;
604 if (exp == decl)
605 return build_int_cst (TREE_TYPE (exp), 0);
607 switch (TREE_CODE (exp))
609 CASE_CONVERT:
610 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
611 if (t != error_mark_node)
612 return fold_convert_loc (loc, TREE_TYPE (exp), t);
613 break;
614 case MINUS_EXPR:
615 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
616 if (t != error_mark_node)
617 return fold_build2_loc (loc, MINUS_EXPR,
618 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
619 break;
620 case PLUS_EXPR:
621 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
622 if (t != error_mark_node)
623 return fold_build2_loc (loc, PLUS_EXPR,
624 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
625 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
626 if (t != error_mark_node)
627 return fold_build2_loc (loc, PLUS_EXPR,
628 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
629 break;
630 case COMPOUND_EXPR:
632 /* cp_build_modify_expr forces preevaluation of the RHS to make
633 sure that it is evaluated before the lvalue-rvalue conversion
634 is applied to the LHS. Reconstruct the original expression. */
635 tree op0 = TREE_OPERAND (exp, 0);
636 if (TREE_CODE (op0) == TARGET_EXPR
637 && !VOID_TYPE_P (TREE_TYPE (op0)))
639 tree op1 = TREE_OPERAND (exp, 1);
640 tree temp = TARGET_EXPR_SLOT (op0);
641 if (BINARY_CLASS_P (op1)
642 && TREE_OPERAND (op1, 1) == temp)
644 op1 = copy_node (op1);
645 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
646 return check_omp_for_incr_expr (loc, op1, decl);
649 break;
651 default:
652 break;
655 return error_mark_node;
658 /* If the OMP_FOR increment expression in INCR is of pointer type,
659 canonicalize it into an expression handled by gimplify_omp_for()
660 and return it. DECL is the iteration variable. */
662 static tree
663 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
665 if (POINTER_TYPE_P (TREE_TYPE (decl))
666 && TREE_OPERAND (incr, 1))
668 tree t = fold_convert_loc (loc,
669 sizetype, TREE_OPERAND (incr, 1));
671 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
672 || TREE_CODE (incr) == PREDECREMENT_EXPR)
673 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
674 t = fold_build_pointer_plus (decl, t);
675 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
677 return incr;
680 /* Validate and generate OMP_FOR.
681 DECLV is a vector of iteration variables, for each collapsed loop.
683 ORIG_DECLV, if non-NULL, is a vector with the original iteration
684 variables (prior to any transformations, by say, C++ iterators).
686 INITV, CONDV and INCRV are vectors containing initialization
687 expressions, controlling predicates and increment expressions.
688 BODY is the body of the loop and PRE_BODY statements that go before
689 the loop. */
691 tree
692 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
693 tree orig_declv, tree initv, tree condv, tree incrv,
694 tree body, tree pre_body, bool final_p)
696 location_t elocus;
697 bool fail = false;
698 int i;
700 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
701 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
702 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
703 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
705 tree decl = TREE_VEC_ELT (declv, i);
706 tree init = TREE_VEC_ELT (initv, i);
707 tree cond = TREE_VEC_ELT (condv, i);
708 tree incr = TREE_VEC_ELT (incrv, i);
710 elocus = locus;
711 if (EXPR_HAS_LOCATION (init))
712 elocus = EXPR_LOCATION (init);
714 /* Validate the iteration variable. */
715 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
716 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
718 error_at (elocus, "invalid type for iteration variable %qE", decl);
719 fail = true;
721 else if (TYPE_ATOMIC (TREE_TYPE (decl)))
723 error_at (elocus, "%<_Atomic%> iteration variable %qE", decl);
724 fail = true;
725 /* _Atomic iterator confuses stuff too much, so we risk ICE
726 trying to diagnose it further. */
727 continue;
730 /* In the case of "for (int i = 0...)", init will be a decl. It should
731 have a DECL_INITIAL that we can turn into an assignment. */
732 if (init == decl)
734 elocus = DECL_SOURCE_LOCATION (decl);
736 init = DECL_INITIAL (decl);
737 if (init == NULL)
739 error_at (elocus, "%qE is not initialized", decl);
740 init = integer_zero_node;
741 fail = true;
743 DECL_INITIAL (decl) = NULL_TREE;
745 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
746 /* FIXME diagnostics: This should
747 be the location of the INIT. */
748 elocus,
749 init,
750 NULL_TREE);
752 if (init != error_mark_node)
754 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
755 gcc_assert (TREE_OPERAND (init, 0) == decl);
758 if (cond == NULL_TREE)
760 error_at (elocus, "missing controlling predicate");
761 fail = true;
763 else
765 bool cond_ok = false;
767 /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
768 evaluation of the vla VAR_DECL. We need to readd
769 them to the non-decl operand. See PR45784. */
770 while (TREE_CODE (cond) == COMPOUND_EXPR)
771 cond = TREE_OPERAND (cond, 1);
773 if (EXPR_HAS_LOCATION (cond))
774 elocus = EXPR_LOCATION (cond);
776 if (TREE_CODE (cond) == LT_EXPR
777 || TREE_CODE (cond) == LE_EXPR
778 || TREE_CODE (cond) == GT_EXPR
779 || TREE_CODE (cond) == GE_EXPR
780 || TREE_CODE (cond) == NE_EXPR
781 || TREE_CODE (cond) == EQ_EXPR)
783 tree op0 = TREE_OPERAND (cond, 0);
784 tree op1 = TREE_OPERAND (cond, 1);
786 /* 2.5.1. The comparison in the condition is computed in
787 the type of DECL, otherwise the behavior is undefined.
789 For example:
790 long n; int i;
791 i < n;
793 according to ISO will be evaluated as:
794 (long)i < n;
796 We want to force:
797 i < (int)n; */
798 if (TREE_CODE (op0) == NOP_EXPR
799 && decl == TREE_OPERAND (op0, 0))
801 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
802 TREE_OPERAND (cond, 1)
803 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
804 TREE_OPERAND (cond, 1));
806 else if (TREE_CODE (op1) == NOP_EXPR
807 && decl == TREE_OPERAND (op1, 0))
809 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
810 TREE_OPERAND (cond, 0)
811 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
812 TREE_OPERAND (cond, 0));
815 if (decl == TREE_OPERAND (cond, 0))
816 cond_ok = true;
817 else if (decl == TREE_OPERAND (cond, 1))
819 TREE_SET_CODE (cond,
820 swap_tree_comparison (TREE_CODE (cond)));
821 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
822 TREE_OPERAND (cond, 0) = decl;
823 cond_ok = true;
826 if (TREE_CODE (cond) == NE_EXPR
827 || TREE_CODE (cond) == EQ_EXPR)
829 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
831 if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
832 cond_ok = false;
834 else if (operand_equal_p (TREE_OPERAND (cond, 1),
835 TYPE_MIN_VALUE (TREE_TYPE (decl)),
837 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
838 ? GT_EXPR : LE_EXPR);
839 else if (operand_equal_p (TREE_OPERAND (cond, 1),
840 TYPE_MAX_VALUE (TREE_TYPE (decl)),
842 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
843 ? LT_EXPR : GE_EXPR);
844 else if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
845 cond_ok = false;
848 if (cond_ok && TREE_VEC_ELT (condv, i) != cond)
850 tree ce = NULL_TREE, *pce = &ce;
851 tree type = TREE_TYPE (TREE_OPERAND (cond, 1));
852 for (tree c = TREE_VEC_ELT (condv, i); c != cond;
853 c = TREE_OPERAND (c, 1))
855 *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0),
856 TREE_OPERAND (cond, 1));
857 pce = &TREE_OPERAND (*pce, 1);
859 TREE_OPERAND (cond, 1) = ce;
860 TREE_VEC_ELT (condv, i) = cond;
864 if (!cond_ok)
866 error_at (elocus, "invalid controlling predicate");
867 fail = true;
871 if (incr == NULL_TREE)
873 error_at (elocus, "missing increment expression");
874 fail = true;
876 else
878 bool incr_ok = false;
880 if (EXPR_HAS_LOCATION (incr))
881 elocus = EXPR_LOCATION (incr);
883 /* Check all the valid increment expressions: v++, v--, ++v, --v,
884 v = v + incr, v = incr + v and v = v - incr. */
885 switch (TREE_CODE (incr))
887 case POSTINCREMENT_EXPR:
888 case PREINCREMENT_EXPR:
889 case POSTDECREMENT_EXPR:
890 case PREDECREMENT_EXPR:
891 if (TREE_OPERAND (incr, 0) != decl)
892 break;
894 incr_ok = true;
895 if (!fail
896 && TREE_CODE (cond) == NE_EXPR
897 && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
898 && TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))
899 && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))))
900 != INTEGER_CST))
902 /* For pointer to VLA, transform != into < or >
903 depending on whether incr is increment or decrement. */
904 if (TREE_CODE (incr) == PREINCREMENT_EXPR
905 || TREE_CODE (incr) == POSTINCREMENT_EXPR)
906 TREE_SET_CODE (cond, LT_EXPR);
907 else
908 TREE_SET_CODE (cond, GT_EXPR);
910 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
911 break;
913 case COMPOUND_EXPR:
914 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
915 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
916 break;
917 incr = TREE_OPERAND (incr, 1);
918 /* FALLTHRU */
919 case MODIFY_EXPR:
920 if (TREE_OPERAND (incr, 0) != decl)
921 break;
922 if (TREE_OPERAND (incr, 1) == decl)
923 break;
924 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
925 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
926 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
927 incr_ok = true;
928 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
929 || (TREE_CODE (TREE_OPERAND (incr, 1))
930 == POINTER_PLUS_EXPR))
931 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
932 incr_ok = true;
933 else
935 tree t = check_omp_for_incr_expr (elocus,
936 TREE_OPERAND (incr, 1),
937 decl);
938 if (t != error_mark_node)
940 incr_ok = true;
941 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
942 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
945 if (!fail
946 && incr_ok
947 && TREE_CODE (cond) == NE_EXPR)
949 tree i = TREE_OPERAND (incr, 1);
950 i = TREE_OPERAND (i, TREE_OPERAND (i, 0) == decl);
951 i = c_fully_fold (i, false, NULL);
952 if (!final_p
953 && TREE_CODE (i) != INTEGER_CST)
955 else if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
957 tree unit
958 = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
959 if (unit)
961 enum tree_code ccode = GT_EXPR;
962 unit = c_fully_fold (unit, false, NULL);
963 i = fold_convert (TREE_TYPE (unit), i);
964 if (operand_equal_p (unit, i, 0))
965 ccode = LT_EXPR;
966 if (ccode == GT_EXPR)
968 i = fold_unary (NEGATE_EXPR, TREE_TYPE (i), i);
969 if (i == NULL_TREE
970 || !operand_equal_p (unit, i, 0))
972 error_at (elocus,
973 "increment is not constant 1 or "
974 "-1 for != condition");
975 fail = true;
978 if (TREE_CODE (unit) != INTEGER_CST)
979 /* For pointer to VLA, transform != into < or >
980 depending on whether the pointer is
981 incremented or decremented in each
982 iteration. */
983 TREE_SET_CODE (cond, ccode);
986 else
988 if (!integer_onep (i) && !integer_minus_onep (i))
990 error_at (elocus,
991 "increment is not constant 1 or -1 for"
992 " != condition");
993 fail = true;
997 break;
999 default:
1000 break;
1002 if (!incr_ok)
1004 error_at (elocus, "invalid increment expression");
1005 fail = true;
1009 TREE_VEC_ELT (initv, i) = init;
1010 TREE_VEC_ELT (incrv, i) = incr;
1013 if (fail)
1014 return NULL;
1015 else
1017 tree t = make_node (code);
1019 TREE_TYPE (t) = void_type_node;
1020 OMP_FOR_INIT (t) = initv;
1021 OMP_FOR_COND (t) = condv;
1022 OMP_FOR_INCR (t) = incrv;
1023 OMP_FOR_BODY (t) = body;
1024 OMP_FOR_PRE_BODY (t) = pre_body;
1025 OMP_FOR_ORIG_DECLS (t) = orig_declv;
1027 SET_EXPR_LOCATION (t, locus);
1028 return t;
1032 /* Type for passing data in between c_omp_check_loop_iv and
1033 c_omp_check_loop_iv_r. */
1035 struct c_omp_check_loop_iv_data
1037 tree declv;
1038 bool fail;
1039 location_t stmt_loc;
1040 location_t expr_loc;
1041 int kind;
1042 walk_tree_lh lh;
1043 hash_set<tree> *ppset;
1046 /* Helper function called via walk_tree, to diagnose uses
1047 of associated loop IVs inside of lb, b and incr expressions
1048 of OpenMP loops. */
1050 static tree
1051 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
1053 struct c_omp_check_loop_iv_data *d
1054 = (struct c_omp_check_loop_iv_data *) data;
1055 if (DECL_P (*tp))
1057 int i;
1058 for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++)
1059 if (*tp == TREE_VEC_ELT (d->declv, i)
1060 || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
1061 && *tp == TREE_PURPOSE (TREE_VEC_ELT (d->declv, i)))
1062 || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
1063 && TREE_CHAIN (TREE_VEC_ELT (d->declv, i))
1064 && (TREE_CODE (TREE_CHAIN (TREE_VEC_ELT (d->declv, i)))
1065 == TREE_VEC)
1066 && *tp == TREE_VEC_ELT (TREE_CHAIN (TREE_VEC_ELT (d->declv,
1067 i)), 2)))
1069 location_t loc = d->expr_loc;
1070 if (loc == UNKNOWN_LOCATION)
1071 loc = d->stmt_loc;
1072 switch (d->kind)
1074 case 0:
1075 error_at (loc, "initializer expression refers to "
1076 "iteration variable %qD", *tp);
1077 break;
1078 case 1:
1079 error_at (loc, "condition expression refers to "
1080 "iteration variable %qD", *tp);
1081 break;
1082 case 2:
1083 error_at (loc, "increment expression refers to "
1084 "iteration variable %qD", *tp);
1085 break;
1087 d->fail = true;
1090 /* Don't walk dtors added by C++ wrap_cleanups_r. */
1091 else if (TREE_CODE (*tp) == TRY_CATCH_EXPR
1092 && TRY_CATCH_IS_CLEANUP (*tp))
1094 *walk_subtrees = 0;
1095 return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data,
1096 d->ppset, d->lh);
1099 return NULL_TREE;
1102 /* Diagnose invalid references to loop iterators in lb, b and incr
1103 expressions. */
1105 bool
1106 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh)
1108 hash_set<tree> pset;
1109 struct c_omp_check_loop_iv_data data;
1110 int i;
1112 data.declv = declv;
1113 data.fail = false;
1114 data.stmt_loc = EXPR_LOCATION (stmt);
1115 data.lh = lh;
1116 data.ppset = &pset;
1117 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
1119 tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
1120 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
1121 tree decl = TREE_OPERAND (init, 0);
1122 tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
1123 gcc_assert (COMPARISON_CLASS_P (cond));
1124 gcc_assert (TREE_OPERAND (cond, 0) == decl);
1125 tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i);
1126 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1));
1127 data.kind = 0;
1128 walk_tree_1 (&TREE_OPERAND (init, 1),
1129 c_omp_check_loop_iv_r, &data, &pset, lh);
1130 /* Don't warn for C++ random access iterators here, the
1131 expression then involves the subtraction and always refers
1132 to the original value. The C++ FE needs to warn on those
1133 earlier. */
1134 if (decl == TREE_VEC_ELT (declv, i)
1135 || (TREE_CODE (TREE_VEC_ELT (declv, i)) == TREE_LIST
1136 && decl == TREE_PURPOSE (TREE_VEC_ELT (declv, i))))
1138 data.expr_loc = EXPR_LOCATION (cond);
1139 data.kind = 1;
1140 walk_tree_1 (&TREE_OPERAND (cond, 1),
1141 c_omp_check_loop_iv_r, &data, &pset, lh);
1143 if (TREE_CODE (incr) == MODIFY_EXPR)
1145 gcc_assert (TREE_OPERAND (incr, 0) == decl);
1146 incr = TREE_OPERAND (incr, 1);
1147 data.kind = 2;
1148 if (TREE_CODE (incr) == PLUS_EXPR
1149 && TREE_OPERAND (incr, 1) == decl)
1151 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0));
1152 walk_tree_1 (&TREE_OPERAND (incr, 0),
1153 c_omp_check_loop_iv_r, &data, &pset, lh);
1155 else
1157 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1));
1158 walk_tree_1 (&TREE_OPERAND (incr, 1),
1159 c_omp_check_loop_iv_r, &data, &pset, lh);
1163 return !data.fail;
1166 /* Similar, but allows to check the init or cond expressions individually. */
1168 bool
1169 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl,
1170 tree init, tree cond, walk_tree_lh lh)
1172 hash_set<tree> pset;
1173 struct c_omp_check_loop_iv_data data;
1175 data.declv = declv;
1176 data.fail = false;
1177 data.stmt_loc = stmt_loc;
1178 data.lh = lh;
1179 data.ppset = &pset;
1180 if (init)
1182 data.expr_loc = EXPR_LOCATION (init);
1183 data.kind = 0;
1184 walk_tree_1 (&init,
1185 c_omp_check_loop_iv_r, &data, &pset, lh);
1187 if (cond)
1189 gcc_assert (COMPARISON_CLASS_P (cond));
1190 data.expr_loc = EXPR_LOCATION (init);
1191 data.kind = 1;
1192 if (TREE_OPERAND (cond, 0) == decl)
1193 walk_tree_1 (&TREE_OPERAND (cond, 1),
1194 c_omp_check_loop_iv_r, &data, &pset, lh);
1195 else
1196 walk_tree_1 (&TREE_OPERAND (cond, 0),
1197 c_omp_check_loop_iv_r, &data, &pset, lh);
1199 return !data.fail;
1202 /* This function splits clauses for OpenACC combined loop
1203 constructs. OpenACC combined loop constructs are:
1204 #pragma acc kernels loop
1205 #pragma acc parallel loop */
1207 tree
1208 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
1209 bool is_parallel)
1211 tree next, loop_clauses, nc;
1213 loop_clauses = *not_loop_clauses = NULL_TREE;
1214 for (; clauses ; clauses = next)
1216 next = OMP_CLAUSE_CHAIN (clauses);
1218 switch (OMP_CLAUSE_CODE (clauses))
1220 /* Loop clauses. */
1221 case OMP_CLAUSE_COLLAPSE:
1222 case OMP_CLAUSE_TILE:
1223 case OMP_CLAUSE_GANG:
1224 case OMP_CLAUSE_WORKER:
1225 case OMP_CLAUSE_VECTOR:
1226 case OMP_CLAUSE_AUTO:
1227 case OMP_CLAUSE_SEQ:
1228 case OMP_CLAUSE_INDEPENDENT:
1229 case OMP_CLAUSE_PRIVATE:
1230 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
1231 loop_clauses = clauses;
1232 break;
1234 /* Reductions must be duplicated on both constructs. */
1235 case OMP_CLAUSE_REDUCTION:
1236 if (is_parallel)
1238 nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1239 OMP_CLAUSE_REDUCTION);
1240 OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses);
1241 OMP_CLAUSE_REDUCTION_CODE (nc)
1242 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1243 OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses;
1244 *not_loop_clauses = nc;
1247 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
1248 loop_clauses = clauses;
1249 break;
1251 /* Parallel/kernels clauses. */
1252 default:
1253 OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses;
1254 *not_loop_clauses = clauses;
1255 break;
1259 return loop_clauses;
1262 /* This function attempts to split or duplicate clauses for OpenMP
1263 combined/composite constructs. Right now there are 26 different
1264 constructs. CODE is the innermost construct in the combined construct,
1265 and MASK allows to determine which constructs are combined together,
1266 as every construct has at least one clause that no other construct
1267 has (except for OMP_SECTIONS, but that can be only combined with parallel,
1268 and OMP_MASTER, which doesn't have any clauses at all).
1269 OpenMP combined/composite constructs are:
1270 #pragma omp distribute parallel for
1271 #pragma omp distribute parallel for simd
1272 #pragma omp distribute simd
1273 #pragma omp for simd
1274 #pragma omp master taskloop
1275 #pragma omp master taskloop simd
1276 #pragma omp parallel for
1277 #pragma omp parallel for simd
1278 #pragma omp parallel master
1279 #pragma omp parallel master taskloop
1280 #pragma omp parallel master taskloop simd
1281 #pragma omp parallel sections
1282 #pragma omp target parallel
1283 #pragma omp target parallel for
1284 #pragma omp target parallel for simd
1285 #pragma omp target teams
1286 #pragma omp target teams distribute
1287 #pragma omp target teams distribute parallel for
1288 #pragma omp target teams distribute parallel for simd
1289 #pragma omp target teams distribute simd
1290 #pragma omp target simd
1291 #pragma omp taskloop simd
1292 #pragma omp teams distribute
1293 #pragma omp teams distribute parallel for
1294 #pragma omp teams distribute parallel for simd
1295 #pragma omp teams distribute simd */
1297 void
1298 c_omp_split_clauses (location_t loc, enum tree_code code,
1299 omp_clause_mask mask, tree clauses, tree *cclauses)
1301 tree next, c;
1302 enum c_omp_clause_split s;
1303 int i;
1305 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
1306 cclauses[i] = NULL;
1307 /* Add implicit nowait clause on
1308 #pragma omp parallel {for,for simd,sections}. */
1309 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1310 switch (code)
1312 case OMP_FOR:
1313 case OMP_SIMD:
1314 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1315 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
1316 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1317 break;
1318 case OMP_SECTIONS:
1319 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
1320 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1321 break;
1322 default:
1323 break;
1326 for (; clauses ; clauses = next)
1328 next = OMP_CLAUSE_CHAIN (clauses);
1330 switch (OMP_CLAUSE_CODE (clauses))
1332 /* First the clauses that are unique to some constructs. */
1333 case OMP_CLAUSE_DEVICE:
1334 case OMP_CLAUSE_MAP:
1335 case OMP_CLAUSE_IS_DEVICE_PTR:
1336 case OMP_CLAUSE_DEFAULTMAP:
1337 case OMP_CLAUSE_DEPEND:
1338 s = C_OMP_CLAUSE_SPLIT_TARGET;
1339 break;
1340 case OMP_CLAUSE_NUM_TEAMS:
1341 case OMP_CLAUSE_THREAD_LIMIT:
1342 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1343 break;
1344 case OMP_CLAUSE_DIST_SCHEDULE:
1345 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1346 break;
1347 case OMP_CLAUSE_COPYIN:
1348 case OMP_CLAUSE_NUM_THREADS:
1349 case OMP_CLAUSE_PROC_BIND:
1350 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1351 break;
1352 case OMP_CLAUSE_ORDERED:
1353 s = C_OMP_CLAUSE_SPLIT_FOR;
1354 break;
1355 case OMP_CLAUSE_SCHEDULE:
1356 s = C_OMP_CLAUSE_SPLIT_FOR;
1357 if (code != OMP_SIMD)
1358 OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
1359 break;
1360 case OMP_CLAUSE_SAFELEN:
1361 case OMP_CLAUSE_SIMDLEN:
1362 case OMP_CLAUSE_ALIGNED:
1363 case OMP_CLAUSE_NONTEMPORAL:
1364 s = C_OMP_CLAUSE_SPLIT_SIMD;
1365 break;
1366 case OMP_CLAUSE_GRAINSIZE:
1367 case OMP_CLAUSE_NUM_TASKS:
1368 case OMP_CLAUSE_FINAL:
1369 case OMP_CLAUSE_UNTIED:
1370 case OMP_CLAUSE_MERGEABLE:
1371 case OMP_CLAUSE_NOGROUP:
1372 case OMP_CLAUSE_PRIORITY:
1373 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1374 break;
1375 /* Duplicate this to all of taskloop, distribute, for and simd. */
1376 case OMP_CLAUSE_COLLAPSE:
1377 if (code == OMP_SIMD)
1379 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1380 | (OMP_CLAUSE_MASK_1
1381 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
1382 | (OMP_CLAUSE_MASK_1
1383 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
1385 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1386 OMP_CLAUSE_COLLAPSE);
1387 OMP_CLAUSE_COLLAPSE_EXPR (c)
1388 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1389 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1390 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1392 else
1394 /* This must be #pragma omp target simd */
1395 s = C_OMP_CLAUSE_SPLIT_SIMD;
1396 break;
1399 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1401 if ((mask & (OMP_CLAUSE_MASK_1
1402 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1404 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1405 OMP_CLAUSE_COLLAPSE);
1406 OMP_CLAUSE_COLLAPSE_EXPR (c)
1407 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1408 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
1409 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
1410 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1412 else
1413 s = C_OMP_CLAUSE_SPLIT_FOR;
1415 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1416 != 0)
1417 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1418 else
1419 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1420 break;
1421 /* Private clause is supported on all constructs but master,
1422 it is enough to put it on the innermost one other than master. For
1423 #pragma omp {for,sections} put it on parallel though,
1424 as that's what we did for OpenMP 3.1. */
1425 case OMP_CLAUSE_PRIVATE:
1426 switch (code)
1428 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
1429 case OMP_FOR: case OMP_SECTIONS:
1430 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1431 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
1432 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
1433 case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1434 case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break;
1435 default: gcc_unreachable ();
1437 break;
1438 /* Firstprivate clause is supported on all constructs but
1439 simd and master. Put it on the outermost of those and duplicate on
1440 teams and parallel. */
1441 case OMP_CLAUSE_FIRSTPRIVATE:
1442 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1443 != 0)
1445 if (code == OMP_SIMD
1446 && (mask & ((OMP_CLAUSE_MASK_1
1447 << PRAGMA_OMP_CLAUSE_NUM_THREADS)
1448 | (OMP_CLAUSE_MASK_1
1449 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
1451 /* This must be #pragma omp target simd. */
1452 s = C_OMP_CLAUSE_SPLIT_TARGET;
1453 break;
1455 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1456 OMP_CLAUSE_FIRSTPRIVATE);
1457 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1458 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1459 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1461 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1462 != 0)
1464 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
1465 | (OMP_CLAUSE_MASK_1
1466 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
1468 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1469 OMP_CLAUSE_FIRSTPRIVATE);
1470 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1471 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1472 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1473 if ((mask & (OMP_CLAUSE_MASK_1
1474 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
1475 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1476 else
1477 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1479 else if ((mask & (OMP_CLAUSE_MASK_1
1480 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1481 /* This must be
1482 #pragma omp parallel master taskloop{, simd}. */
1483 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1484 else
1485 /* This must be
1486 #pragma omp parallel{, for{, simd}, sections}
1488 #pragma omp target parallel. */
1489 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1491 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1492 != 0)
1494 /* This must be one of
1495 #pragma omp {,target }teams distribute
1496 #pragma omp target teams
1497 #pragma omp {,target }teams distribute simd. */
1498 gcc_assert (code == OMP_DISTRIBUTE
1499 || code == OMP_TEAMS
1500 || code == OMP_SIMD);
1501 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1503 else if ((mask & (OMP_CLAUSE_MASK_1
1504 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1506 /* This must be #pragma omp distribute simd. */
1507 gcc_assert (code == OMP_SIMD);
1508 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1510 else if ((mask & (OMP_CLAUSE_MASK_1
1511 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1513 /* This must be #pragma omp {,{,parallel }master }taskloop simd
1515 #pragma omp {,parallel }master taskloop. */
1516 gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP);
1517 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1519 else
1521 /* This must be #pragma omp for simd. */
1522 gcc_assert (code == OMP_SIMD);
1523 s = C_OMP_CLAUSE_SPLIT_FOR;
1525 break;
1526 /* Lastprivate is allowed on distribute, for, sections, taskloop and
1527 simd. In parallel {for{, simd},sections} we actually want to put
1528 it on parallel rather than for or sections. */
1529 case OMP_CLAUSE_LASTPRIVATE:
1530 if (code == OMP_DISTRIBUTE)
1532 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1533 break;
1535 if ((mask & (OMP_CLAUSE_MASK_1
1536 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1538 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1539 OMP_CLAUSE_LASTPRIVATE);
1540 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1541 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
1542 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
1543 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
1544 cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
1546 if (code == OMP_FOR || code == OMP_SECTIONS)
1548 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1549 != 0)
1550 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1551 else
1552 s = C_OMP_CLAUSE_SPLIT_FOR;
1553 break;
1555 if (code == OMP_TASKLOOP)
1557 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1558 break;
1560 gcc_assert (code == OMP_SIMD);
1561 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1563 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1564 OMP_CLAUSE_LASTPRIVATE);
1565 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1566 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
1567 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
1568 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1569 != 0)
1570 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1571 else
1572 s = C_OMP_CLAUSE_SPLIT_FOR;
1573 OMP_CLAUSE_CHAIN (c) = cclauses[s];
1574 cclauses[s] = c;
1576 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1578 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1579 OMP_CLAUSE_LASTPRIVATE);
1580 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1581 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
1582 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
1583 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
1584 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
1586 s = C_OMP_CLAUSE_SPLIT_SIMD;
1587 break;
1588 /* Shared and default clauses are allowed on parallel, teams and
1589 taskloop. */
1590 case OMP_CLAUSE_SHARED:
1591 case OMP_CLAUSE_DEFAULT:
1592 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1593 != 0)
1595 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1596 != 0)
1598 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1599 OMP_CLAUSE_CODE (clauses));
1600 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1601 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1602 else
1603 OMP_CLAUSE_DEFAULT_KIND (c)
1604 = OMP_CLAUSE_DEFAULT_KIND (clauses);
1605 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1606 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1608 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1609 break;
1611 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1612 != 0)
1614 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1615 == 0)
1617 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1618 break;
1620 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1621 OMP_CLAUSE_CODE (clauses));
1622 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1623 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1624 else
1625 OMP_CLAUSE_DEFAULT_KIND (c)
1626 = OMP_CLAUSE_DEFAULT_KIND (clauses);
1627 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
1628 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
1630 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1631 break;
1632 /* Reduction is allowed on simd, for, parallel, sections, taskloop
1633 and teams. Duplicate it on all of them, but omit on for or
1634 sections if parallel is present. If taskloop is combined with
1635 parallel, omit it on parallel. */
1636 case OMP_CLAUSE_REDUCTION:
1637 if (OMP_CLAUSE_REDUCTION_TASK (clauses))
1639 if (code == OMP_SIMD /* || code == OMP_LOOP */)
1641 error_at (OMP_CLAUSE_LOCATION (clauses),
1642 "invalid %<task%> reduction modifier on construct "
1643 "combined with %<simd%>" /* or %<loop%> */);
1644 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
1646 else if (code != OMP_SECTIONS
1647 && (mask & (OMP_CLAUSE_MASK_1
1648 << PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0
1649 && (mask & (OMP_CLAUSE_MASK_1
1650 << PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0)
1652 error_at (OMP_CLAUSE_LOCATION (clauses),
1653 "invalid %<task%> reduction modifier on construct "
1654 "not combined with %<parallel%>, %<for%> or "
1655 "%<sections%>");
1656 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
1659 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1661 if (code == OMP_SIMD)
1663 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1664 OMP_CLAUSE_REDUCTION);
1665 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1666 OMP_CLAUSE_REDUCTION_CODE (c)
1667 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1668 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1669 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1670 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1671 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1672 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1673 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1675 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1676 != 0)
1678 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1679 OMP_CLAUSE_REDUCTION);
1680 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1681 OMP_CLAUSE_REDUCTION_CODE (c)
1682 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1683 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1684 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1685 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1686 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1687 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
1688 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
1689 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1691 else if ((mask & (OMP_CLAUSE_MASK_1
1692 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1693 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1694 else
1695 s = C_OMP_CLAUSE_SPLIT_FOR;
1697 else if (code == OMP_SECTIONS
1698 || code == OMP_PARALLEL
1699 || code == OMP_MASTER)
1700 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1701 else if (code == OMP_TASKLOOP)
1702 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1703 else if (code == OMP_SIMD)
1705 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1706 != 0)
1708 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1709 OMP_CLAUSE_REDUCTION);
1710 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1711 OMP_CLAUSE_REDUCTION_CODE (c)
1712 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1713 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1714 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1715 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1716 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1717 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
1718 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
1720 s = C_OMP_CLAUSE_SPLIT_SIMD;
1722 else
1723 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1724 break;
1725 case OMP_CLAUSE_IN_REDUCTION:
1726 /* in_reduction on taskloop simd becomes reduction on the simd
1727 and keeps being in_reduction on taskloop. */
1728 if (code == OMP_SIMD)
1730 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1731 OMP_CLAUSE_REDUCTION);
1732 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1733 OMP_CLAUSE_REDUCTION_CODE (c)
1734 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1735 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1736 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1737 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1738 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1739 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1740 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1742 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1743 break;
1744 case OMP_CLAUSE_IF:
1745 if (OMP_CLAUSE_IF_MODIFIER (clauses) != ERROR_MARK)
1747 s = C_OMP_CLAUSE_SPLIT_COUNT;
1748 switch (OMP_CLAUSE_IF_MODIFIER (clauses))
1750 case OMP_PARALLEL:
1751 if ((mask & (OMP_CLAUSE_MASK_1
1752 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1753 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1754 break;
1755 case OMP_SIMD:
1756 if (code == OMP_SIMD)
1757 s = C_OMP_CLAUSE_SPLIT_SIMD;
1758 break;
1759 case OMP_TASKLOOP:
1760 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1761 != 0)
1762 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1763 break;
1764 case OMP_TARGET:
1765 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1766 != 0)
1767 s = C_OMP_CLAUSE_SPLIT_TARGET;
1768 break;
1769 default:
1770 break;
1772 if (s != C_OMP_CLAUSE_SPLIT_COUNT)
1773 break;
1774 /* Error-recovery here, invalid if-modifier specified, add the
1775 clause to just one construct. */
1776 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
1777 s = C_OMP_CLAUSE_SPLIT_TARGET;
1778 else if ((mask & (OMP_CLAUSE_MASK_1
1779 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1780 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1781 else if ((mask & (OMP_CLAUSE_MASK_1
1782 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1783 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1784 else if (code == OMP_SIMD)
1785 s = C_OMP_CLAUSE_SPLIT_SIMD;
1786 else
1787 gcc_unreachable ();
1788 break;
1790 /* Otherwise, duplicate if clause to all constructs. */
1791 if (code == OMP_SIMD)
1793 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)
1794 | (OMP_CLAUSE_MASK_1
1795 << PRAGMA_OMP_CLAUSE_NUM_THREADS)
1796 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)))
1797 != 0)
1799 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1800 OMP_CLAUSE_IF);
1801 OMP_CLAUSE_IF_MODIFIER (c)
1802 = OMP_CLAUSE_IF_MODIFIER (clauses);
1803 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
1804 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1805 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1807 else
1809 s = C_OMP_CLAUSE_SPLIT_SIMD;
1810 break;
1813 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1814 != 0)
1816 if ((mask & (OMP_CLAUSE_MASK_1
1817 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1819 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1820 OMP_CLAUSE_IF);
1821 OMP_CLAUSE_IF_MODIFIER (c)
1822 = OMP_CLAUSE_IF_MODIFIER (clauses);
1823 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
1824 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
1825 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
1826 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1828 else
1829 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1831 else if ((mask & (OMP_CLAUSE_MASK_1
1832 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1834 if ((mask & (OMP_CLAUSE_MASK_1
1835 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
1837 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1838 OMP_CLAUSE_IF);
1839 OMP_CLAUSE_IF_MODIFIER (c)
1840 = OMP_CLAUSE_IF_MODIFIER (clauses);
1841 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
1842 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1843 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1844 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1846 else
1847 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1849 else
1850 s = C_OMP_CLAUSE_SPLIT_TARGET;
1851 break;
1852 case OMP_CLAUSE_LINEAR:
1853 /* Linear clause is allowed on simd and for. Put it on the
1854 innermost construct. */
1855 if (code == OMP_SIMD)
1856 s = C_OMP_CLAUSE_SPLIT_SIMD;
1857 else
1858 s = C_OMP_CLAUSE_SPLIT_FOR;
1859 break;
1860 case OMP_CLAUSE_NOWAIT:
1861 /* Nowait clause is allowed on target, for and sections, but
1862 is not allowed on parallel for or parallel sections. Therefore,
1863 put it on target construct if present, because that can only
1864 be combined with parallel for{, simd} and not with for{, simd},
1865 otherwise to the worksharing construct. */
1866 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1867 != 0)
1868 s = C_OMP_CLAUSE_SPLIT_TARGET;
1869 else
1870 s = C_OMP_CLAUSE_SPLIT_FOR;
1871 break;
1872 default:
1873 gcc_unreachable ();
1875 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
1876 cclauses[s] = clauses;
1879 if (!flag_checking)
1880 return;
1882 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
1883 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
1884 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
1885 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
1886 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
1887 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
1888 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
1889 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
1890 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1891 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
1892 && code != OMP_SECTIONS)
1893 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
1894 if (code != OMP_SIMD)
1895 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
1899 /* qsort callback to compare #pragma omp declare simd clauses. */
1901 static int
1902 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
1904 tree a = *(const tree *) p;
1905 tree b = *(const tree *) q;
1906 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
1908 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
1909 return -1;
1910 return 1;
1912 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
1913 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
1914 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
1916 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
1917 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
1918 if (c < d)
1919 return 1;
1920 if (c > d)
1921 return -1;
1923 return 0;
1926 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1927 CLAUSES on FNDECL into argument indexes and sort them. */
1929 tree
1930 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1932 tree c;
1933 vec<tree> clvec = vNULL;
1935 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1937 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1938 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1939 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1941 tree decl = OMP_CLAUSE_DECL (c);
1942 tree arg;
1943 int idx;
1944 for (arg = parms, idx = 0; arg;
1945 arg = TREE_CHAIN (arg), idx++)
1946 if (arg == decl)
1947 break;
1948 if (arg == NULL_TREE)
1950 error_at (OMP_CLAUSE_LOCATION (c),
1951 "%qD is not an function argument", decl);
1952 continue;
1954 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1955 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1956 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
1958 decl = OMP_CLAUSE_LINEAR_STEP (c);
1959 for (arg = parms, idx = 0; arg;
1960 arg = TREE_CHAIN (arg), idx++)
1961 if (arg == decl)
1962 break;
1963 if (arg == NULL_TREE)
1965 error_at (OMP_CLAUSE_LOCATION (c),
1966 "%qD is not an function argument", decl);
1967 continue;
1969 OMP_CLAUSE_LINEAR_STEP (c)
1970 = build_int_cst (integer_type_node, idx);
1973 clvec.safe_push (c);
1975 if (!clvec.is_empty ())
1977 unsigned int len = clvec.length (), i;
1978 clvec.qsort (c_omp_declare_simd_clause_cmp);
1979 clauses = clvec[0];
1980 for (i = 0; i < len; i++)
1981 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1983 else
1984 clauses = NULL_TREE;
1985 clvec.release ();
1986 return clauses;
1989 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1991 void
1992 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1994 tree c;
1996 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1997 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1998 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1999 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
2001 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
2002 tree arg;
2003 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
2004 arg = TREE_CHAIN (arg), i++)
2005 if (i == idx)
2006 break;
2007 gcc_assert (arg);
2008 OMP_CLAUSE_DECL (c) = arg;
2009 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2010 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
2012 idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c));
2013 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
2014 arg = TREE_CHAIN (arg), i++)
2015 if (i == idx)
2016 break;
2017 gcc_assert (arg);
2018 OMP_CLAUSE_LINEAR_STEP (c) = arg;
2023 /* True if OpenMP sharing attribute of DECL is predetermined. */
2025 enum omp_clause_default_kind
2026 c_omp_predetermined_sharing (tree decl)
2028 /* Predetermine artificial variables holding integral values, those
2029 are usually result of gimplify_one_sizepos or SAVE_EXPR
2030 gimplification. */
2031 if (VAR_P (decl)
2032 && DECL_ARTIFICIAL (decl)
2033 && INTEGRAL_TYPE_P (TREE_TYPE (decl)))
2034 return OMP_CLAUSE_DEFAULT_SHARED;
2036 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;