1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2017 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
29 #include "gimple-expr.h"
31 #include "omp-general.h"
32 #include "gomp-constants.h"
35 /* Complete a #pragma oacc wait construct. LOC is the location of
39 c_finish_oacc_wait (location_t loc
, tree parms
, tree clauses
)
41 const int nparms
= list_length (parms
);
43 vec
<tree
, va_gc
> *args
;
45 vec_alloc (args
, nparms
+ 2);
46 stmt
= builtin_decl_explicit (BUILT_IN_GOACC_WAIT
);
48 if (omp_find_clause (clauses
, OMP_CLAUSE_ASYNC
))
49 t
= OMP_CLAUSE_ASYNC_EXPR (clauses
);
51 t
= build_int_cst (integer_type_node
, GOMP_ASYNC_SYNC
);
54 args
->quick_push (build_int_cst (integer_type_node
, nparms
));
56 for (t
= parms
; t
; t
= TREE_CHAIN (t
))
58 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t
)) == INTEGER_CST
)
59 args
->quick_push (build_int_cst (integer_type_node
,
60 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t
))));
62 args
->quick_push (OMP_CLAUSE_WAIT_EXPR (t
));
65 stmt
= build_call_expr_loc_vec (loc
, stmt
, args
);
72 /* Complete a #pragma omp master construct. STMT is the structured-block
73 that follows the pragma. LOC is the l*/
76 c_finish_omp_master (location_t loc
, tree stmt
)
78 tree t
= add_stmt (build1 (OMP_MASTER
, void_type_node
, stmt
));
79 SET_EXPR_LOCATION (t
, loc
);
83 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
84 that follows the pragma. LOC is the l*/
87 c_finish_omp_taskgroup (location_t loc
, tree stmt
)
89 tree t
= add_stmt (build1 (OMP_TASKGROUP
, void_type_node
, stmt
));
90 SET_EXPR_LOCATION (t
, loc
);
94 /* Complete a #pragma omp critical construct. STMT is the structured-block
95 that follows the pragma, NAME is the identifier in the pragma, or null
96 if it was omitted. LOC is the location of the #pragma. */
99 c_finish_omp_critical (location_t loc
, tree body
, tree name
, tree clauses
)
101 tree stmt
= make_node (OMP_CRITICAL
);
102 TREE_TYPE (stmt
) = void_type_node
;
103 OMP_CRITICAL_BODY (stmt
) = body
;
104 OMP_CRITICAL_NAME (stmt
) = name
;
105 OMP_CRITICAL_CLAUSES (stmt
) = clauses
;
106 SET_EXPR_LOCATION (stmt
, loc
);
107 return add_stmt (stmt
);
110 /* Complete a #pragma omp ordered construct. STMT is the structured-block
111 that follows the pragma. LOC is the location of the #pragma. */
114 c_finish_omp_ordered (location_t loc
, tree clauses
, tree stmt
)
116 tree t
= make_node (OMP_ORDERED
);
117 TREE_TYPE (t
) = void_type_node
;
118 OMP_ORDERED_BODY (t
) = stmt
;
119 if (!flag_openmp
/* flag_openmp_simd */
120 && (OMP_CLAUSE_CODE (clauses
) != OMP_CLAUSE_SIMD
121 || OMP_CLAUSE_CHAIN (clauses
)))
122 clauses
= build_omp_clause (loc
, OMP_CLAUSE_SIMD
);
123 OMP_ORDERED_CLAUSES (t
) = clauses
;
124 SET_EXPR_LOCATION (t
, loc
);
129 /* Complete a #pragma omp barrier construct. LOC is the location of
133 c_finish_omp_barrier (location_t loc
)
137 x
= builtin_decl_explicit (BUILT_IN_GOMP_BARRIER
);
138 x
= build_call_expr_loc (loc
, x
, 0);
143 /* Complete a #pragma omp taskwait construct. LOC is the location of the
147 c_finish_omp_taskwait (location_t loc
)
151 x
= builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT
);
152 x
= build_call_expr_loc (loc
, x
, 0);
157 /* Complete a #pragma omp taskyield construct. LOC is the location of the
161 c_finish_omp_taskyield (location_t loc
)
165 x
= builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD
);
166 x
= build_call_expr_loc (loc
, x
, 0);
171 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
172 the expression to be implemented atomically is LHS opcode= RHS.
173 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
174 opcode= RHS with the new or old content of LHS returned.
175 LOC is the location of the atomic statement. The value returned
176 is either error_mark_node (if the construct was erroneous) or an
177 OMP_ATOMIC* node which should be added to the current statement
178 tree with add_stmt. If TEST is set, avoid calling save_expr
179 or create_tmp_var*. */
182 c_finish_omp_atomic (location_t loc
, enum tree_code code
,
183 enum tree_code opcode
, tree lhs
, tree rhs
,
184 tree v
, tree lhs1
, tree rhs1
, bool swapped
, bool seq_cst
,
187 tree x
, type
, addr
, pre
= NULL_TREE
;
188 HOST_WIDE_INT bitpos
= 0, bitsize
= 0;
190 if (lhs
== error_mark_node
|| rhs
== error_mark_node
191 || v
== error_mark_node
|| lhs1
== error_mark_node
192 || rhs1
== error_mark_node
)
193 return error_mark_node
;
195 /* ??? According to one reading of the OpenMP spec, complex type are
196 supported, but there are no atomic stores for any architecture.
197 But at least icc 9.0 doesn't support complex types here either.
198 And lets not even talk about vector types... */
199 type
= TREE_TYPE (lhs
);
200 if (!INTEGRAL_TYPE_P (type
)
201 && !POINTER_TYPE_P (type
)
202 && !SCALAR_FLOAT_TYPE_P (type
))
204 error_at (loc
, "invalid expression type for %<#pragma omp atomic%>");
205 return error_mark_node
;
207 if (TYPE_ATOMIC (type
))
209 error_at (loc
, "%<_Atomic%> expression in %<#pragma omp atomic%>");
210 return error_mark_node
;
213 if (opcode
== RDIV_EXPR
)
214 opcode
= TRUNC_DIV_EXPR
;
216 /* ??? Validate that rhs does not overlap lhs. */
218 if (TREE_CODE (lhs
) == COMPONENT_REF
219 && TREE_CODE (TREE_OPERAND (lhs
, 1)) == FIELD_DECL
220 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs
, 1))
221 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs
, 1)))
223 tree field
= TREE_OPERAND (lhs
, 1);
224 tree repr
= DECL_BIT_FIELD_REPRESENTATIVE (field
);
225 if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field
))
226 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr
)))
227 bitpos
= (tree_to_uhwi (DECL_FIELD_OFFSET (field
))
228 - tree_to_uhwi (DECL_FIELD_OFFSET (repr
))) * BITS_PER_UNIT
;
231 bitpos
+= (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field
))
232 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr
)));
233 gcc_assert (tree_fits_shwi_p (DECL_SIZE (field
)));
234 bitsize
= tree_to_shwi (DECL_SIZE (field
));
236 type
= TREE_TYPE (repr
);
237 lhs
= build3 (COMPONENT_REF
, TREE_TYPE (repr
), TREE_OPERAND (lhs
, 0),
238 repr
, TREE_OPERAND (lhs
, 2));
241 /* Take and save the address of the lhs. From then on we'll reference it
243 addr
= build_unary_op (loc
, ADDR_EXPR
, lhs
, false);
244 if (addr
== error_mark_node
)
245 return error_mark_node
;
247 addr
= save_expr (addr
);
249 && TREE_CODE (addr
) != SAVE_EXPR
250 && (TREE_CODE (addr
) != ADDR_EXPR
251 || !VAR_P (TREE_OPERAND (addr
, 0))))
253 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
254 it even after unsharing function body. */
255 tree var
= create_tmp_var_raw (TREE_TYPE (addr
));
256 DECL_CONTEXT (var
) = current_function_decl
;
257 addr
= build4 (TARGET_EXPR
, TREE_TYPE (addr
), var
, addr
, NULL
, NULL
);
260 lhs
= build_indirect_ref (loc
, addr
, RO_NULL
);
263 if (code
== OMP_ATOMIC_READ
)
265 x
= build1 (OMP_ATOMIC_READ
, type
, addr
);
266 SET_EXPR_LOCATION (x
, loc
);
267 OMP_ATOMIC_SEQ_CST (x
) = seq_cst
;
269 x
= build3_loc (loc
, BIT_FIELD_REF
, TREE_TYPE (blhs
), x
,
270 bitsize_int (bitsize
), bitsize_int (bitpos
));
271 return build_modify_expr (loc
, v
, NULL_TREE
, NOP_EXPR
,
275 /* There are lots of warnings, errors, and conversions that need to happen
276 in the course of interpreting a statement. Use the normal mechanisms
277 to do this, and then take it apart again. */
280 lhs
= build3_loc (loc
, BIT_FIELD_REF
, TREE_TYPE (blhs
), lhs
,
281 bitsize_int (bitsize
), bitsize_int (bitpos
));
283 rhs
= build_binary_op (loc
, opcode
, rhs
, lhs
, true);
284 else if (opcode
!= NOP_EXPR
)
285 rhs
= build_binary_op (loc
, opcode
, lhs
, rhs
, true);
290 rhs
= build_binary_op (loc
, opcode
, rhs
, lhs
, true);
293 bool save
= in_late_binary_op
;
294 in_late_binary_op
= true;
295 x
= build_modify_expr (loc
, blhs
? blhs
: lhs
, NULL_TREE
, opcode
,
296 loc
, rhs
, NULL_TREE
);
297 in_late_binary_op
= save
;
298 if (x
== error_mark_node
)
299 return error_mark_node
;
300 if (TREE_CODE (x
) == COMPOUND_EXPR
)
302 pre
= TREE_OPERAND (x
, 0);
303 gcc_assert (TREE_CODE (pre
) == SAVE_EXPR
);
304 x
= TREE_OPERAND (x
, 1);
306 gcc_assert (TREE_CODE (x
) == MODIFY_EXPR
);
307 rhs
= TREE_OPERAND (x
, 1);
310 rhs
= build3_loc (loc
, BIT_INSERT_EXPR
, type
, new_lhs
,
311 rhs
, bitsize_int (bitpos
));
313 /* Punt the actual generation of atomic operations to common code. */
314 if (code
== OMP_ATOMIC
)
315 type
= void_type_node
;
316 x
= build2 (code
, type
, addr
, rhs
);
317 SET_EXPR_LOCATION (x
, loc
);
318 OMP_ATOMIC_SEQ_CST (x
) = seq_cst
;
320 /* Generally it is hard to prove lhs1 and lhs are the same memory
321 location, just diagnose different variables. */
328 if (code
== OMP_ATOMIC
)
329 error_at (loc
, "%<#pragma omp atomic update%> uses two different "
330 "variables for memory");
332 error_at (loc
, "%<#pragma omp atomic capture%> uses two different "
333 "variables for memory");
334 return error_mark_node
;
339 && TREE_CODE (lhs1
) == COMPONENT_REF
340 && TREE_CODE (TREE_OPERAND (lhs1
, 1)) == FIELD_DECL
341 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1
, 1))
342 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1
, 1)))
344 tree field
= TREE_OPERAND (lhs1
, 1);
345 tree repr
= DECL_BIT_FIELD_REPRESENTATIVE (field
);
346 lhs1
= build3 (COMPONENT_REF
, TREE_TYPE (repr
), TREE_OPERAND (lhs1
, 0),
347 repr
, TREE_OPERAND (lhs1
, 2));
351 && TREE_CODE (rhs1
) == COMPONENT_REF
352 && TREE_CODE (TREE_OPERAND (rhs1
, 1)) == FIELD_DECL
353 && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1
, 1))
354 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1
, 1)))
356 tree field
= TREE_OPERAND (rhs1
, 1);
357 tree repr
= DECL_BIT_FIELD_REPRESENTATIVE (field
);
358 rhs1
= build3 (COMPONENT_REF
, TREE_TYPE (repr
), TREE_OPERAND (rhs1
, 0),
359 repr
, TREE_OPERAND (rhs1
, 2));
362 if (code
!= OMP_ATOMIC
)
364 /* Generally it is hard to prove lhs1 and lhs are the same memory
365 location, just diagnose different variables. */
366 if (lhs1
&& VAR_P (lhs1
) && VAR_P (orig_lhs
))
368 if (lhs1
!= orig_lhs
&& !test
)
370 error_at (loc
, "%<#pragma omp atomic capture%> uses two "
371 "different variables for memory");
372 return error_mark_node
;
376 x
= build3_loc (loc
, BIT_FIELD_REF
, TREE_TYPE (blhs
), x
,
377 bitsize_int (bitsize
), bitsize_int (bitpos
));
378 x
= build_modify_expr (loc
, v
, NULL_TREE
, NOP_EXPR
,
380 if (rhs1
&& rhs1
!= orig_lhs
)
382 tree rhs1addr
= build_unary_op (loc
, ADDR_EXPR
, rhs1
, false);
383 if (rhs1addr
== error_mark_node
)
384 return error_mark_node
;
385 x
= omit_one_operand_loc (loc
, type
, x
, rhs1addr
);
387 if (lhs1
&& lhs1
!= orig_lhs
)
389 tree lhs1addr
= build_unary_op (loc
, ADDR_EXPR
, lhs1
, false);
390 if (lhs1addr
== error_mark_node
)
391 return error_mark_node
;
392 if (code
== OMP_ATOMIC_CAPTURE_OLD
)
393 x
= omit_one_operand_loc (loc
, type
, x
, lhs1addr
);
398 x
= omit_two_operands_loc (loc
, type
, x
, x
, lhs1addr
);
402 else if (rhs1
&& rhs1
!= orig_lhs
)
404 tree rhs1addr
= build_unary_op (loc
, ADDR_EXPR
, rhs1
, false);
405 if (rhs1addr
== error_mark_node
)
406 return error_mark_node
;
407 x
= omit_one_operand_loc (loc
, type
, x
, rhs1addr
);
411 x
= omit_one_operand_loc (loc
, type
, x
, pre
);
416 /* Complete a #pragma omp flush construct. We don't do anything with
417 the variable list that the syntax allows. LOC is the location of
421 c_finish_omp_flush (location_t loc
)
425 x
= builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE
);
426 x
= build_call_expr_loc (loc
, x
, 0);
431 /* Check and canonicalize OMP_FOR increment expression.
432 Helper function for c_finish_omp_for. */
435 check_omp_for_incr_expr (location_t loc
, tree exp
, tree decl
)
439 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp
))
440 || TYPE_PRECISION (TREE_TYPE (exp
)) < TYPE_PRECISION (TREE_TYPE (decl
)))
441 return error_mark_node
;
444 return build_int_cst (TREE_TYPE (exp
), 0);
446 switch (TREE_CODE (exp
))
449 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
450 if (t
!= error_mark_node
)
451 return fold_convert_loc (loc
, TREE_TYPE (exp
), t
);
454 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
455 if (t
!= error_mark_node
)
456 return fold_build2_loc (loc
, MINUS_EXPR
,
457 TREE_TYPE (exp
), t
, TREE_OPERAND (exp
, 1));
460 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
461 if (t
!= error_mark_node
)
462 return fold_build2_loc (loc
, PLUS_EXPR
,
463 TREE_TYPE (exp
), t
, TREE_OPERAND (exp
, 1));
464 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 1), decl
);
465 if (t
!= error_mark_node
)
466 return fold_build2_loc (loc
, PLUS_EXPR
,
467 TREE_TYPE (exp
), TREE_OPERAND (exp
, 0), t
);
471 /* cp_build_modify_expr forces preevaluation of the RHS to make
472 sure that it is evaluated before the lvalue-rvalue conversion
473 is applied to the LHS. Reconstruct the original expression. */
474 tree op0
= TREE_OPERAND (exp
, 0);
475 if (TREE_CODE (op0
) == TARGET_EXPR
476 && !VOID_TYPE_P (TREE_TYPE (op0
)))
478 tree op1
= TREE_OPERAND (exp
, 1);
479 tree temp
= TARGET_EXPR_SLOT (op0
);
480 if (BINARY_CLASS_P (op1
)
481 && TREE_OPERAND (op1
, 1) == temp
)
483 op1
= copy_node (op1
);
484 TREE_OPERAND (op1
, 1) = TARGET_EXPR_INITIAL (op0
);
485 return check_omp_for_incr_expr (loc
, op1
, decl
);
494 return error_mark_node
;
497 /* If the OMP_FOR increment expression in INCR is of pointer type,
498 canonicalize it into an expression handled by gimplify_omp_for()
499 and return it. DECL is the iteration variable. */
502 c_omp_for_incr_canonicalize_ptr (location_t loc
, tree decl
, tree incr
)
504 if (POINTER_TYPE_P (TREE_TYPE (decl
))
505 && TREE_OPERAND (incr
, 1))
507 tree t
= fold_convert_loc (loc
,
508 sizetype
, TREE_OPERAND (incr
, 1));
510 if (TREE_CODE (incr
) == POSTDECREMENT_EXPR
511 || TREE_CODE (incr
) == PREDECREMENT_EXPR
)
512 t
= fold_build1_loc (loc
, NEGATE_EXPR
, sizetype
, t
);
513 t
= fold_build_pointer_plus (decl
, t
);
514 incr
= build2 (MODIFY_EXPR
, void_type_node
, decl
, t
);
519 /* Validate and generate OMP_FOR.
520 DECLV is a vector of iteration variables, for each collapsed loop.
522 ORIG_DECLV, if non-NULL, is a vector with the original iteration
523 variables (prior to any transformations, by say, C++ iterators).
525 INITV, CONDV and INCRV are vectors containing initialization
526 expressions, controlling predicates and increment expressions.
527 BODY is the body of the loop and PRE_BODY statements that go before
531 c_finish_omp_for (location_t locus
, enum tree_code code
, tree declv
,
532 tree orig_declv
, tree initv
, tree condv
, tree incrv
,
533 tree body
, tree pre_body
)
539 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (initv
));
540 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (condv
));
541 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (incrv
));
542 for (i
= 0; i
< TREE_VEC_LENGTH (declv
); i
++)
544 tree decl
= TREE_VEC_ELT (declv
, i
);
545 tree init
= TREE_VEC_ELT (initv
, i
);
546 tree cond
= TREE_VEC_ELT (condv
, i
);
547 tree incr
= TREE_VEC_ELT (incrv
, i
);
550 if (EXPR_HAS_LOCATION (init
))
551 elocus
= EXPR_LOCATION (init
);
553 /* Validate the iteration variable. */
554 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl
))
555 && TREE_CODE (TREE_TYPE (decl
)) != POINTER_TYPE
)
557 error_at (elocus
, "invalid type for iteration variable %qE", decl
);
560 else if (TYPE_ATOMIC (TREE_TYPE (decl
)))
562 error_at (elocus
, "%<_Atomic%> iteration variable %qE", decl
);
564 /* _Atomic iterator confuses stuff too much, so we risk ICE
565 trying to diagnose it further. */
569 /* In the case of "for (int i = 0...)", init will be a decl. It should
570 have a DECL_INITIAL that we can turn into an assignment. */
573 elocus
= DECL_SOURCE_LOCATION (decl
);
575 init
= DECL_INITIAL (decl
);
578 error_at (elocus
, "%qE is not initialized", decl
);
579 init
= integer_zero_node
;
582 DECL_INITIAL (decl
) = NULL_TREE
;
584 init
= build_modify_expr (elocus
, decl
, NULL_TREE
, NOP_EXPR
,
585 /* FIXME diagnostics: This should
586 be the location of the INIT. */
591 if (init
!= error_mark_node
)
593 gcc_assert (TREE_CODE (init
) == MODIFY_EXPR
);
594 gcc_assert (TREE_OPERAND (init
, 0) == decl
);
597 if (cond
== NULL_TREE
)
599 error_at (elocus
, "missing controlling predicate");
604 bool cond_ok
= false;
606 /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
607 evaluation of the vla VAR_DECL. We need to readd
608 them to the non-decl operand. See PR45784. */
609 while (TREE_CODE (cond
) == COMPOUND_EXPR
)
610 cond
= TREE_OPERAND (cond
, 1);
612 if (EXPR_HAS_LOCATION (cond
))
613 elocus
= EXPR_LOCATION (cond
);
615 if (TREE_CODE (cond
) == LT_EXPR
616 || TREE_CODE (cond
) == LE_EXPR
617 || TREE_CODE (cond
) == GT_EXPR
618 || TREE_CODE (cond
) == GE_EXPR
619 || TREE_CODE (cond
) == NE_EXPR
620 || TREE_CODE (cond
) == EQ_EXPR
)
622 tree op0
= TREE_OPERAND (cond
, 0);
623 tree op1
= TREE_OPERAND (cond
, 1);
625 /* 2.5.1. The comparison in the condition is computed in
626 the type of DECL, otherwise the behavior is undefined.
632 according to ISO will be evaluated as:
637 if (TREE_CODE (op0
) == NOP_EXPR
638 && decl
== TREE_OPERAND (op0
, 0))
640 TREE_OPERAND (cond
, 0) = TREE_OPERAND (op0
, 0);
641 TREE_OPERAND (cond
, 1)
642 = fold_build1_loc (elocus
, NOP_EXPR
, TREE_TYPE (decl
),
643 TREE_OPERAND (cond
, 1));
645 else if (TREE_CODE (op1
) == NOP_EXPR
646 && decl
== TREE_OPERAND (op1
, 0))
648 TREE_OPERAND (cond
, 1) = TREE_OPERAND (op1
, 0);
649 TREE_OPERAND (cond
, 0)
650 = fold_build1_loc (elocus
, NOP_EXPR
, TREE_TYPE (decl
),
651 TREE_OPERAND (cond
, 0));
654 if (decl
== TREE_OPERAND (cond
, 0))
656 else if (decl
== TREE_OPERAND (cond
, 1))
659 swap_tree_comparison (TREE_CODE (cond
)));
660 TREE_OPERAND (cond
, 1) = TREE_OPERAND (cond
, 0);
661 TREE_OPERAND (cond
, 0) = decl
;
665 if (TREE_CODE (cond
) == NE_EXPR
666 || TREE_CODE (cond
) == EQ_EXPR
)
668 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl
)))
672 else if (operand_equal_p (TREE_OPERAND (cond
, 1),
673 TYPE_MIN_VALUE (TREE_TYPE (decl
)),
675 TREE_SET_CODE (cond
, TREE_CODE (cond
) == NE_EXPR
676 ? GT_EXPR
: LE_EXPR
);
677 else if (operand_equal_p (TREE_OPERAND (cond
, 1),
678 TYPE_MAX_VALUE (TREE_TYPE (decl
)),
680 TREE_SET_CODE (cond
, TREE_CODE (cond
) == NE_EXPR
681 ? LT_EXPR
: GE_EXPR
);
686 if (cond_ok
&& TREE_VEC_ELT (condv
, i
) != cond
)
688 tree ce
= NULL_TREE
, *pce
= &ce
;
689 tree type
= TREE_TYPE (TREE_OPERAND (cond
, 1));
690 for (tree c
= TREE_VEC_ELT (condv
, i
); c
!= cond
;
691 c
= TREE_OPERAND (c
, 1))
693 *pce
= build2 (COMPOUND_EXPR
, type
, TREE_OPERAND (c
, 0),
694 TREE_OPERAND (cond
, 1));
695 pce
= &TREE_OPERAND (*pce
, 1);
697 TREE_OPERAND (cond
, 1) = ce
;
698 TREE_VEC_ELT (condv
, i
) = cond
;
704 error_at (elocus
, "invalid controlling predicate");
709 if (incr
== NULL_TREE
)
711 error_at (elocus
, "missing increment expression");
716 bool incr_ok
= false;
718 if (EXPR_HAS_LOCATION (incr
))
719 elocus
= EXPR_LOCATION (incr
);
721 /* Check all the valid increment expressions: v++, v--, ++v, --v,
722 v = v + incr, v = incr + v and v = v - incr. */
723 switch (TREE_CODE (incr
))
725 case POSTINCREMENT_EXPR
:
726 case PREINCREMENT_EXPR
:
727 case POSTDECREMENT_EXPR
:
728 case PREDECREMENT_EXPR
:
729 if (TREE_OPERAND (incr
, 0) != decl
)
733 incr
= c_omp_for_incr_canonicalize_ptr (elocus
, decl
, incr
);
737 if (TREE_CODE (TREE_OPERAND (incr
, 0)) != SAVE_EXPR
738 || TREE_CODE (TREE_OPERAND (incr
, 1)) != MODIFY_EXPR
)
740 incr
= TREE_OPERAND (incr
, 1);
743 if (TREE_OPERAND (incr
, 0) != decl
)
745 if (TREE_OPERAND (incr
, 1) == decl
)
747 if (TREE_CODE (TREE_OPERAND (incr
, 1)) == PLUS_EXPR
748 && (TREE_OPERAND (TREE_OPERAND (incr
, 1), 0) == decl
749 || TREE_OPERAND (TREE_OPERAND (incr
, 1), 1) == decl
))
751 else if ((TREE_CODE (TREE_OPERAND (incr
, 1)) == MINUS_EXPR
752 || (TREE_CODE (TREE_OPERAND (incr
, 1))
753 == POINTER_PLUS_EXPR
))
754 && TREE_OPERAND (TREE_OPERAND (incr
, 1), 0) == decl
)
758 tree t
= check_omp_for_incr_expr (elocus
,
759 TREE_OPERAND (incr
, 1),
761 if (t
!= error_mark_node
)
764 t
= build2 (PLUS_EXPR
, TREE_TYPE (decl
), decl
, t
);
765 incr
= build2 (MODIFY_EXPR
, void_type_node
, decl
, t
);
775 error_at (elocus
, "invalid increment expression");
780 TREE_VEC_ELT (initv
, i
) = init
;
781 TREE_VEC_ELT (incrv
, i
) = incr
;
788 tree t
= make_node (code
);
790 TREE_TYPE (t
) = void_type_node
;
791 OMP_FOR_INIT (t
) = initv
;
792 OMP_FOR_COND (t
) = condv
;
793 OMP_FOR_INCR (t
) = incrv
;
794 OMP_FOR_BODY (t
) = body
;
795 OMP_FOR_PRE_BODY (t
) = pre_body
;
796 OMP_FOR_ORIG_DECLS (t
) = orig_declv
;
798 SET_EXPR_LOCATION (t
, locus
);
803 /* Type for passing data in between c_omp_check_loop_iv and
804 c_omp_check_loop_iv_r. */
806 struct c_omp_check_loop_iv_data
814 hash_set
<tree
> *ppset
;
817 /* Helper function called via walk_tree, to diagnose uses
818 of associated loop IVs inside of lb, b and incr expressions
822 c_omp_check_loop_iv_r (tree
*tp
, int *walk_subtrees
, void *data
)
824 struct c_omp_check_loop_iv_data
*d
825 = (struct c_omp_check_loop_iv_data
*) data
;
829 for (i
= 0; i
< TREE_VEC_LENGTH (d
->declv
); i
++)
830 if (*tp
== TREE_VEC_ELT (d
->declv
, i
))
832 location_t loc
= d
->expr_loc
;
833 if (loc
== UNKNOWN_LOCATION
)
838 error_at (loc
, "initializer expression refers to "
839 "iteration variable %qD", *tp
);
842 error_at (loc
, "condition expression refers to "
843 "iteration variable %qD", *tp
);
846 error_at (loc
, "increment expression refers to "
847 "iteration variable %qD", *tp
);
853 /* Don't walk dtors added by C++ wrap_cleanups_r. */
854 else if (TREE_CODE (*tp
) == TRY_CATCH_EXPR
855 && TRY_CATCH_IS_CLEANUP (*tp
))
858 return walk_tree_1 (&TREE_OPERAND (*tp
, 0), c_omp_check_loop_iv_r
, data
,
865 /* Diagnose invalid references to loop iterators in lb, b and incr
869 c_omp_check_loop_iv (tree stmt
, tree declv
, walk_tree_lh lh
)
872 struct c_omp_check_loop_iv_data data
;
877 data
.stmt_loc
= EXPR_LOCATION (stmt
);
880 for (i
= 0; i
< TREE_VEC_LENGTH (OMP_FOR_INIT (stmt
)); i
++)
882 tree init
= TREE_VEC_ELT (OMP_FOR_INIT (stmt
), i
);
883 gcc_assert (TREE_CODE (init
) == MODIFY_EXPR
);
884 tree decl
= TREE_OPERAND (init
, 0);
885 tree cond
= TREE_VEC_ELT (OMP_FOR_COND (stmt
), i
);
886 gcc_assert (COMPARISON_CLASS_P (cond
));
887 gcc_assert (TREE_OPERAND (cond
, 0) == decl
);
888 tree incr
= TREE_VEC_ELT (OMP_FOR_INCR (stmt
), i
);
889 data
.expr_loc
= EXPR_LOCATION (TREE_OPERAND (init
, 1));
891 walk_tree_1 (&TREE_OPERAND (init
, 1),
892 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
893 /* Don't warn for C++ random access iterators here, the
894 expression then involves the subtraction and always refers
895 to the original value. The C++ FE needs to warn on those
897 if (decl
== TREE_VEC_ELT (declv
, i
))
899 data
.expr_loc
= EXPR_LOCATION (cond
);
901 walk_tree_1 (&TREE_OPERAND (cond
, 1),
902 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
904 if (TREE_CODE (incr
) == MODIFY_EXPR
)
906 gcc_assert (TREE_OPERAND (incr
, 0) == decl
);
907 incr
= TREE_OPERAND (incr
, 1);
909 if (TREE_CODE (incr
) == PLUS_EXPR
910 && TREE_OPERAND (incr
, 1) == decl
)
912 data
.expr_loc
= EXPR_LOCATION (TREE_OPERAND (incr
, 0));
913 walk_tree_1 (&TREE_OPERAND (incr
, 0),
914 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
918 data
.expr_loc
= EXPR_LOCATION (TREE_OPERAND (incr
, 1));
919 walk_tree_1 (&TREE_OPERAND (incr
, 1),
920 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
927 /* Similar, but allows to check the init or cond expressions individually. */
930 c_omp_check_loop_iv_exprs (location_t stmt_loc
, tree declv
, tree decl
,
931 tree init
, tree cond
, walk_tree_lh lh
)
934 struct c_omp_check_loop_iv_data data
;
938 data
.stmt_loc
= stmt_loc
;
943 data
.expr_loc
= EXPR_LOCATION (init
);
946 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
950 gcc_assert (COMPARISON_CLASS_P (cond
));
951 data
.expr_loc
= EXPR_LOCATION (init
);
953 if (TREE_OPERAND (cond
, 0) == decl
)
954 walk_tree_1 (&TREE_OPERAND (cond
, 1),
955 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
957 walk_tree_1 (&TREE_OPERAND (cond
, 0),
958 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
963 /* This function splits clauses for OpenACC combined loop
964 constructs. OpenACC combined loop constructs are:
965 #pragma acc kernels loop
966 #pragma acc parallel loop */
969 c_oacc_split_loop_clauses (tree clauses
, tree
*not_loop_clauses
,
972 tree next
, loop_clauses
, nc
;
974 loop_clauses
= *not_loop_clauses
= NULL_TREE
;
975 for (; clauses
; clauses
= next
)
977 next
= OMP_CLAUSE_CHAIN (clauses
);
979 switch (OMP_CLAUSE_CODE (clauses
))
982 case OMP_CLAUSE_COLLAPSE
:
983 case OMP_CLAUSE_TILE
:
984 case OMP_CLAUSE_GANG
:
985 case OMP_CLAUSE_WORKER
:
986 case OMP_CLAUSE_VECTOR
:
987 case OMP_CLAUSE_AUTO
:
989 case OMP_CLAUSE_INDEPENDENT
:
990 case OMP_CLAUSE_PRIVATE
:
991 OMP_CLAUSE_CHAIN (clauses
) = loop_clauses
;
992 loop_clauses
= clauses
;
995 /* Reductions must be duplicated on both constructs. */
996 case OMP_CLAUSE_REDUCTION
:
999 nc
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1000 OMP_CLAUSE_REDUCTION
);
1001 OMP_CLAUSE_DECL (nc
) = OMP_CLAUSE_DECL (clauses
);
1002 OMP_CLAUSE_REDUCTION_CODE (nc
)
1003 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
1004 OMP_CLAUSE_CHAIN (nc
) = *not_loop_clauses
;
1005 *not_loop_clauses
= nc
;
1008 OMP_CLAUSE_CHAIN (clauses
) = loop_clauses
;
1009 loop_clauses
= clauses
;
1012 /* Parallel/kernels clauses. */
1014 OMP_CLAUSE_CHAIN (clauses
) = *not_loop_clauses
;
1015 *not_loop_clauses
= clauses
;
1020 return loop_clauses
;
1023 /* This function attempts to split or duplicate clauses for OpenMP
1024 combined/composite constructs. Right now there are 21 different
1025 constructs. CODE is the innermost construct in the combined construct,
1026 and MASK allows to determine which constructs are combined together,
1027 as every construct has at least one clause that no other construct
1028 has (except for OMP_SECTIONS, but that can be only combined with parallel).
1029 OpenMP combined/composite constructs are:
1030 #pragma omp distribute parallel for
1031 #pragma omp distribute parallel for simd
1032 #pragma omp distribute simd
1033 #pragma omp for simd
1034 #pragma omp parallel for
1035 #pragma omp parallel for simd
1036 #pragma omp parallel sections
1037 #pragma omp target parallel
1038 #pragma omp target parallel for
1039 #pragma omp target parallel for simd
1040 #pragma omp target teams
1041 #pragma omp target teams distribute
1042 #pragma omp target teams distribute parallel for
1043 #pragma omp target teams distribute parallel for simd
1044 #pragma omp target teams distribute simd
1045 #pragma omp target simd
1046 #pragma omp taskloop simd
1047 #pragma omp teams distribute
1048 #pragma omp teams distribute parallel for
1049 #pragma omp teams distribute parallel for simd
1050 #pragma omp teams distribute simd */
1053 c_omp_split_clauses (location_t loc
, enum tree_code code
,
1054 omp_clause_mask mask
, tree clauses
, tree
*cclauses
)
1057 enum c_omp_clause_split s
;
1060 for (i
= 0; i
< C_OMP_CLAUSE_SPLIT_COUNT
; i
++)
1062 /* Add implicit nowait clause on
1063 #pragma omp parallel {for,for simd,sections}. */
1064 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
1069 cclauses
[C_OMP_CLAUSE_SPLIT_FOR
]
1070 = build_omp_clause (loc
, OMP_CLAUSE_NOWAIT
);
1073 cclauses
[C_OMP_CLAUSE_SPLIT_SECTIONS
]
1074 = build_omp_clause (loc
, OMP_CLAUSE_NOWAIT
);
1080 for (; clauses
; clauses
= next
)
1082 next
= OMP_CLAUSE_CHAIN (clauses
);
1084 switch (OMP_CLAUSE_CODE (clauses
))
1086 /* First the clauses that are unique to some constructs. */
1087 case OMP_CLAUSE_DEVICE
:
1088 case OMP_CLAUSE_MAP
:
1089 case OMP_CLAUSE_IS_DEVICE_PTR
:
1090 case OMP_CLAUSE_DEFAULTMAP
:
1091 case OMP_CLAUSE_DEPEND
:
1092 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1094 case OMP_CLAUSE_NUM_TEAMS
:
1095 case OMP_CLAUSE_THREAD_LIMIT
:
1096 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1098 case OMP_CLAUSE_DIST_SCHEDULE
:
1099 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1101 case OMP_CLAUSE_COPYIN
:
1102 case OMP_CLAUSE_NUM_THREADS
:
1103 case OMP_CLAUSE_PROC_BIND
:
1104 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1106 case OMP_CLAUSE_ORDERED
:
1107 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1109 case OMP_CLAUSE_SCHEDULE
:
1110 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1111 if (code
!= OMP_SIMD
)
1112 OMP_CLAUSE_SCHEDULE_SIMD (clauses
) = 0;
1114 case OMP_CLAUSE_SAFELEN
:
1115 case OMP_CLAUSE_SIMDLEN
:
1116 case OMP_CLAUSE_ALIGNED
:
1117 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1119 case OMP_CLAUSE_GRAINSIZE
:
1120 case OMP_CLAUSE_NUM_TASKS
:
1121 case OMP_CLAUSE_FINAL
:
1122 case OMP_CLAUSE_UNTIED
:
1123 case OMP_CLAUSE_MERGEABLE
:
1124 case OMP_CLAUSE_NOGROUP
:
1125 case OMP_CLAUSE_PRIORITY
:
1126 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1128 /* Duplicate this to all of taskloop, distribute, for and simd. */
1129 case OMP_CLAUSE_COLLAPSE
:
1130 if (code
== OMP_SIMD
)
1132 if ((mask
& ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)
1133 | (OMP_CLAUSE_MASK_1
1134 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)
1135 | (OMP_CLAUSE_MASK_1
1136 << PRAGMA_OMP_CLAUSE_NOGROUP
))) != 0)
1138 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1139 OMP_CLAUSE_COLLAPSE
);
1140 OMP_CLAUSE_COLLAPSE_EXPR (c
)
1141 = OMP_CLAUSE_COLLAPSE_EXPR (clauses
);
1142 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
];
1143 cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] = c
;
1147 /* This must be #pragma omp target simd */
1148 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1152 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
1154 if ((mask
& (OMP_CLAUSE_MASK_1
1155 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
1157 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1158 OMP_CLAUSE_COLLAPSE
);
1159 OMP_CLAUSE_COLLAPSE_EXPR (c
)
1160 = OMP_CLAUSE_COLLAPSE_EXPR (clauses
);
1161 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_FOR
];
1162 cclauses
[C_OMP_CLAUSE_SPLIT_FOR
] = c
;
1163 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1166 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1168 else if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP
))
1170 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1172 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1174 /* Private clause is supported on all constructs,
1175 it is enough to put it on the innermost one. For
1176 #pragma omp {for,sections} put it on parallel though,
1177 as that's what we did for OpenMP 3.1. */
1178 case OMP_CLAUSE_PRIVATE
:
1181 case OMP_SIMD
: s
= C_OMP_CLAUSE_SPLIT_SIMD
; break;
1182 case OMP_FOR
: case OMP_SECTIONS
:
1183 case OMP_PARALLEL
: s
= C_OMP_CLAUSE_SPLIT_PARALLEL
; break;
1184 case OMP_DISTRIBUTE
: s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
; break;
1185 case OMP_TEAMS
: s
= C_OMP_CLAUSE_SPLIT_TEAMS
; break;
1186 default: gcc_unreachable ();
1189 /* Firstprivate clause is supported on all constructs but
1190 simd. Put it on the outermost of those and duplicate on teams
1192 case OMP_CLAUSE_FIRSTPRIVATE
:
1193 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_MAP
))
1196 if (code
== OMP_SIMD
1197 && (mask
& ((OMP_CLAUSE_MASK_1
1198 << PRAGMA_OMP_CLAUSE_NUM_THREADS
)
1199 | (OMP_CLAUSE_MASK_1
1200 << PRAGMA_OMP_CLAUSE_NUM_TEAMS
))) == 0)
1202 /* This must be #pragma omp target simd. */
1203 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1206 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1207 OMP_CLAUSE_FIRSTPRIVATE
);
1208 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1209 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
];
1210 cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
] = c
;
1212 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
1215 if ((mask
& ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
)
1216 | (OMP_CLAUSE_MASK_1
1217 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
))) != 0)
1219 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1220 OMP_CLAUSE_FIRSTPRIVATE
);
1221 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1222 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
];
1223 cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] = c
;
1224 if ((mask
& (OMP_CLAUSE_MASK_1
1225 << PRAGMA_OMP_CLAUSE_NUM_TEAMS
)) != 0)
1226 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1228 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1232 #pragma omp parallel{, for{, simd}, sections}
1234 #pragma omp target parallel. */
1235 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1237 else if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
1240 /* This must be one of
1241 #pragma omp {,target }teams distribute
1242 #pragma omp target teams
1243 #pragma omp {,target }teams distribute simd. */
1244 gcc_assert (code
== OMP_DISTRIBUTE
1245 || code
== OMP_TEAMS
1246 || code
== OMP_SIMD
);
1247 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1249 else if ((mask
& (OMP_CLAUSE_MASK_1
1250 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
1252 /* This must be #pragma omp distribute simd. */
1253 gcc_assert (code
== OMP_SIMD
);
1254 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1256 else if ((mask
& (OMP_CLAUSE_MASK_1
1257 << PRAGMA_OMP_CLAUSE_NOGROUP
)) != 0)
1259 /* This must be #pragma omp taskloop simd. */
1260 gcc_assert (code
== OMP_SIMD
);
1261 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1265 /* This must be #pragma omp for simd. */
1266 gcc_assert (code
== OMP_SIMD
);
1267 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1270 /* Lastprivate is allowed on distribute, for, sections and simd. In
1271 parallel {for{, simd},sections} we actually want to put it on
1272 parallel rather than for or sections. */
1273 case OMP_CLAUSE_LASTPRIVATE
:
1274 if (code
== OMP_DISTRIBUTE
)
1276 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1279 if ((mask
& (OMP_CLAUSE_MASK_1
1280 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
1282 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1283 OMP_CLAUSE_LASTPRIVATE
);
1284 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1285 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_DISTRIBUTE
];
1286 cclauses
[C_OMP_CLAUSE_SPLIT_DISTRIBUTE
] = c
;
1288 if (code
== OMP_FOR
|| code
== OMP_SECTIONS
)
1290 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
1292 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1294 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1297 gcc_assert (code
== OMP_SIMD
);
1298 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
1300 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1301 OMP_CLAUSE_LASTPRIVATE
);
1302 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1303 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
1305 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1307 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1308 OMP_CLAUSE_CHAIN (c
) = cclauses
[s
];
1311 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1313 /* Shared and default clauses are allowed on parallel, teams and
1315 case OMP_CLAUSE_SHARED
:
1316 case OMP_CLAUSE_DEFAULT
:
1317 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP
))
1320 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1323 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
1326 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
1329 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1332 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1333 OMP_CLAUSE_CODE (clauses
));
1334 if (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_SHARED
)
1335 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1337 OMP_CLAUSE_DEFAULT_KIND (c
)
1338 = OMP_CLAUSE_DEFAULT_KIND (clauses
);
1339 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
];
1340 cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
] = c
;
1342 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1344 /* Reduction is allowed on simd, for, parallel, sections and teams.
1345 Duplicate it on all of them, but omit on for or sections if
1346 parallel is present. */
1347 case OMP_CLAUSE_REDUCTION
:
1348 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
1350 if (code
== OMP_SIMD
)
1352 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1353 OMP_CLAUSE_REDUCTION
);
1354 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1355 OMP_CLAUSE_REDUCTION_CODE (c
)
1356 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
1357 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
)
1358 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses
);
1359 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c
)
1360 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses
);
1361 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
];
1362 cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] = c
;
1364 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
1367 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1368 OMP_CLAUSE_REDUCTION
);
1369 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1370 OMP_CLAUSE_REDUCTION_CODE (c
)
1371 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
1372 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
)
1373 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses
);
1374 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c
)
1375 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses
);
1376 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
];
1377 cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] = c
;
1378 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1380 else if ((mask
& (OMP_CLAUSE_MASK_1
1381 << PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
1382 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1384 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1386 else if (code
== OMP_SECTIONS
|| code
== OMP_PARALLEL
)
1387 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1388 else if (code
== OMP_SIMD
)
1389 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1391 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1394 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP
))
1396 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1397 else if ((mask
& (OMP_CLAUSE_MASK_1
1398 << PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
1400 if ((mask
& (OMP_CLAUSE_MASK_1
1401 << PRAGMA_OMP_CLAUSE_MAP
)) != 0)
1403 if (OMP_CLAUSE_IF_MODIFIER (clauses
) == OMP_PARALLEL
)
1404 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1405 else if (OMP_CLAUSE_IF_MODIFIER (clauses
) == OMP_TARGET
)
1406 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1407 else if (OMP_CLAUSE_IF_MODIFIER (clauses
) == ERROR_MARK
)
1409 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1411 OMP_CLAUSE_IF_MODIFIER (c
)
1412 = OMP_CLAUSE_IF_MODIFIER (clauses
);
1413 OMP_CLAUSE_IF_EXPR (c
) = OMP_CLAUSE_IF_EXPR (clauses
);
1414 OMP_CLAUSE_CHAIN (c
)
1415 = cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
];
1416 cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
] = c
;
1417 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1421 error_at (OMP_CLAUSE_LOCATION (clauses
),
1422 "expected %<parallel%> or %<target%> %<if%> "
1428 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1431 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1433 case OMP_CLAUSE_LINEAR
:
1434 /* Linear clause is allowed on simd and for. Put it on the
1435 innermost construct. */
1436 if (code
== OMP_SIMD
)
1437 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1439 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1441 case OMP_CLAUSE_NOWAIT
:
1442 /* Nowait clause is allowed on target, for and sections, but
1443 is not allowed on parallel for or parallel sections. Therefore,
1444 put it on target construct if present, because that can only
1445 be combined with parallel for{, simd} and not with for{, simd},
1446 otherwise to the worksharing construct. */
1447 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_MAP
))
1449 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1451 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1456 OMP_CLAUSE_CHAIN (clauses
) = cclauses
[s
];
1457 cclauses
[s
] = clauses
;
1463 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_MAP
)) == 0)
1464 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
] == NULL_TREE
);
1465 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
)) == 0)
1466 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
] == NULL_TREE
);
1467 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) == 0)
1468 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_DISTRIBUTE
] == NULL_TREE
);
1469 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
)) == 0)
1470 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] == NULL_TREE
);
1471 if ((mask
& ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)
1472 | (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP
))) == 0
1473 && code
!= OMP_SECTIONS
)
1474 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_FOR
] == NULL_TREE
);
1475 if (code
!= OMP_SIMD
)
1476 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] == NULL_TREE
);
1480 /* qsort callback to compare #pragma omp declare simd clauses. */
1483 c_omp_declare_simd_clause_cmp (const void *p
, const void *q
)
1485 tree a
= *(const tree
*) p
;
1486 tree b
= *(const tree
*) q
;
1487 if (OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_CODE (b
))
1489 if (OMP_CLAUSE_CODE (a
) > OMP_CLAUSE_CODE (b
))
1493 if (OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_SIMDLEN
1494 && OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_INBRANCH
1495 && OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_NOTINBRANCH
)
1497 int c
= tree_to_shwi (OMP_CLAUSE_DECL (a
));
1498 int d
= tree_to_shwi (OMP_CLAUSE_DECL (b
));
1507 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1508 CLAUSES on FNDECL into argument indexes and sort them. */
1511 c_omp_declare_simd_clauses_to_numbers (tree parms
, tree clauses
)
1514 vec
<tree
> clvec
= vNULL
;
1516 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1518 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_SIMDLEN
1519 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_INBRANCH
1520 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_NOTINBRANCH
)
1522 tree decl
= OMP_CLAUSE_DECL (c
);
1525 for (arg
= parms
, idx
= 0; arg
;
1526 arg
= TREE_CHAIN (arg
), idx
++)
1529 if (arg
== NULL_TREE
)
1531 error_at (OMP_CLAUSE_LOCATION (c
),
1532 "%qD is not an function argument", decl
);
1535 OMP_CLAUSE_DECL (c
) = build_int_cst (integer_type_node
, idx
);
1536 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
1537 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c
))
1539 decl
= OMP_CLAUSE_LINEAR_STEP (c
);
1540 for (arg
= parms
, idx
= 0; arg
;
1541 arg
= TREE_CHAIN (arg
), idx
++)
1544 if (arg
== NULL_TREE
)
1546 error_at (OMP_CLAUSE_LOCATION (c
),
1547 "%qD is not an function argument", decl
);
1550 OMP_CLAUSE_LINEAR_STEP (c
)
1551 = build_int_cst (integer_type_node
, idx
);
1554 clvec
.safe_push (c
);
1556 if (!clvec
.is_empty ())
1558 unsigned int len
= clvec
.length (), i
;
1559 clvec
.qsort (c_omp_declare_simd_clause_cmp
);
1561 for (i
= 0; i
< len
; i
++)
1562 OMP_CLAUSE_CHAIN (clvec
[i
]) = (i
< len
- 1) ? clvec
[i
+ 1] : NULL_TREE
;
1565 clauses
= NULL_TREE
;
1570 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1573 c_omp_declare_simd_clauses_to_decls (tree fndecl
, tree clauses
)
1577 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1578 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_SIMDLEN
1579 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_INBRANCH
1580 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_NOTINBRANCH
)
1582 int idx
= tree_to_shwi (OMP_CLAUSE_DECL (c
)), i
;
1584 for (arg
= DECL_ARGUMENTS (fndecl
), i
= 0; arg
;
1585 arg
= TREE_CHAIN (arg
), i
++)
1589 OMP_CLAUSE_DECL (c
) = arg
;
1590 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
1591 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c
))
1593 idx
= tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c
));
1594 for (arg
= DECL_ARGUMENTS (fndecl
), i
= 0; arg
;
1595 arg
= TREE_CHAIN (arg
), i
++)
1599 OMP_CLAUSE_LINEAR_STEP (c
) = arg
;
1604 /* True if OpenMP sharing attribute of DECL is predetermined. */
1606 enum omp_clause_default_kind
1607 c_omp_predetermined_sharing (tree decl
)
1609 /* Variables with const-qualified type having no mutable member
1610 are predetermined shared. */
1611 if (TREE_READONLY (decl
))
1612 return OMP_CLAUSE_DEFAULT_SHARED
;
1614 return OMP_CLAUSE_DEFAULT_UNSPECIFIED
;