1 /* Lowering pass for OMP directives. Converts OMP directives into explicit
2 calls to the runtime library (libgomp), data marshalling to implement data
3 sharing and copying clauses, offloading to accelerators, and more.
5 Contributed by Diego Novillo <dnovillo@redhat.com>
7 Copyright (C) 2005-2015 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "double-int.h"
39 #include "fold-const.h"
40 #include "stringpool.h"
41 #include "stor-layout.h"
44 #include "hard-reg-set.h"
46 #include "dominance.h"
49 #include "basic-block.h"
50 #include "tree-ssa-alias.h"
51 #include "internal-fn.h"
52 #include "gimple-fold.h"
53 #include "gimple-expr.h"
57 #include "gimple-iterator.h"
58 #include "gimplify-me.h"
59 #include "gimple-walk.h"
60 #include "tree-iterator.h"
61 #include "tree-inline.h"
62 #include "langhooks.h"
63 #include "diagnostic-core.h"
64 #include "gimple-ssa.h"
66 #include "plugin-api.h"
70 #include "tree-phinodes.h"
71 #include "ssa-iterators.h"
72 #include "tree-ssanames.h"
73 #include "tree-into-ssa.h"
76 #include "statistics.h"
78 #include "fixed-value.h"
79 #include "insn-config.h"
90 #include "tree-pass.h"
92 #include "splay-tree.h"
93 #include "insn-codes.h"
97 #include "common/common-target.h"
99 #include "gimple-low.h"
100 #include "tree-cfgcleanup.h"
101 #include "pretty-print.h"
102 #include "alloc-pool.h"
103 #include "symbol-summary.h"
104 #include "ipa-prop.h"
105 #include "tree-nested.h"
109 #include "lto-section-names.h"
110 #include "gomp-constants.h"
113 /* Lowering of OMP parallel and workshare constructs proceeds in two
114 phases. The first phase scans the function looking for OMP statements
115 and then for variables that must be replaced to satisfy data sharing
116 clauses. The second phase expands code for the constructs, as well as
117 re-gimplifying things when variables have been replaced with complex
120 Final code generation is done by pass_expand_omp. The flowgraph is
121 scanned for regions which are then moved to a new
122 function, to be invoked by the thread library, or offloaded. */
124 /* OMP region information. Every parallel and workshare
125 directive is enclosed between two markers, the OMP_* directive
126 and a corresponding OMP_RETURN statement. */
130 /* The enclosing region. */
131 struct omp_region
*outer
;
133 /* First child region. */
134 struct omp_region
*inner
;
136 /* Next peer region. */
137 struct omp_region
*next
;
139 /* Block containing the omp directive as its last stmt. */
142 /* Block containing the OMP_RETURN as its last stmt. */
145 /* Block containing the OMP_CONTINUE as its last stmt. */
148 /* If this is a combined parallel+workshare region, this is a list
149 of additional arguments needed by the combined parallel+workshare
151 vec
<tree
, va_gc
> *ws_args
;
153 /* The code for the omp directive of this region. */
154 enum gimple_code type
;
156 /* Schedule kind, only used for OMP_FOR type regions. */
157 enum omp_clause_schedule_kind sched_kind
;
159 /* True if this is a combined parallel+workshare region. */
160 bool is_combined_parallel
;
163 /* Levels of parallelism as defined by OpenACC. Increasing numbers
164 correspond to deeper loop nesting levels. */
166 #define MASK_WORKER 2
167 #define MASK_VECTOR 4
169 /* Context structure. Used to store information about each parallel
170 directive in the code. */
172 typedef struct omp_context
174 /* This field must be at the beginning, as we do "inheritance": Some
175 callback functions for tree-inline.c (e.g., omp_copy_decl)
176 receive a copy_body_data pointer that is up-casted to an
177 omp_context pointer. */
180 /* The tree of contexts corresponding to the encountered constructs. */
181 struct omp_context
*outer
;
184 /* Map variables to fields in a structure that allows communication
185 between sending and receiving threads. */
186 splay_tree field_map
;
191 /* These are used just by task contexts, if task firstprivate fn is
192 needed. srecord_type is used to communicate from the thread
193 that encountered the task construct to task firstprivate fn,
194 record_type is allocated by GOMP_task, initialized by task firstprivate
195 fn and passed to the task body fn. */
196 splay_tree sfield_map
;
199 /* A chain of variables to add to the top-level block surrounding the
200 construct. In the case of a parallel, this is in the child function. */
203 /* A map of reduction pointer variables. For accelerators, each
204 reduction variable is replaced with an array. Each thread, in turn,
205 is assigned to a slot on that array. */
206 splay_tree reduction_map
;
208 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
209 barriers should jump to during omplower pass. */
212 /* What to do with variables with implicitly determined sharing
214 enum omp_clause_default_kind default_kind
;
216 /* Nesting depth of this context. Used to beautify error messages re
217 invalid gotos. The outermost ctx is depth 1, with depth 0 being
218 reserved for the main body of the function. */
221 /* True if this parallel directive is nested within another. */
224 /* True if this construct can be cancelled. */
227 /* For OpenACC loops, a mask of gang, worker and vector used at
228 levels below this one. */
230 /* For OpenACC loops, a mask of gang, worker and vector used at
231 this level and above. For parallel and kernels clauses, a mask
232 indicating which of num_gangs/num_workers/num_vectors was used. */
236 /* A structure holding the elements of:
237 for (V = N1; V cond N2; V += STEP) [...] */
239 struct omp_for_data_loop
241 tree v
, n1
, n2
, step
;
242 enum tree_code cond_code
;
245 /* A structure describing the main elements of a parallel loop. */
249 struct omp_for_data_loop loop
;
254 bool have_nowait
, have_ordered
;
255 enum omp_clause_schedule_kind sched_kind
;
256 struct omp_for_data_loop
*loops
;
260 static splay_tree all_contexts
;
261 static int taskreg_nesting_level
;
262 static int target_nesting_level
;
263 static struct omp_region
*root_omp_region
;
264 static bitmap task_shared_vars
;
265 static vec
<omp_context
*> taskreg_contexts
;
267 static void scan_omp (gimple_seq
*, omp_context
*);
268 static tree
scan_omp_1_op (tree
*, int *, void *);
270 #define WALK_SUBSTMTS \
274 case GIMPLE_EH_FILTER: \
275 case GIMPLE_TRANSACTION: \
276 /* The sub-statements for these should be walked. */ \
277 *handled_ops_p = false; \
280 /* Helper function to get the name of the array containing the partial
281 reductions for OpenACC reductions. */
283 oacc_get_reduction_array_id (tree node
)
285 const char *id
= IDENTIFIER_POINTER (DECL_NAME (node
));
286 int len
= strlen ("OACC") + strlen (id
);
287 char *temp_name
= XALLOCAVEC (char, len
+ 1);
288 snprintf (temp_name
, len
+ 1, "OACC%s", id
);
289 return IDENTIFIER_POINTER (get_identifier (temp_name
));
292 /* Determine the number of threads OpenACC threads used to determine the
293 size of the array of partial reductions. Currently, this is num_gangs
294 * vector_length. This value may be different than GOACC_GET_NUM_THREADS,
295 because it is independed of the device used. */
298 oacc_max_threads (omp_context
*ctx
)
300 tree nthreads
, vector_length
, gangs
, clauses
;
302 gangs
= fold_convert (sizetype
, integer_one_node
);
303 vector_length
= gangs
;
305 /* The reduction clause may be nested inside a loop directive.
306 Scan for the innermost vector_length clause. */
307 for (omp_context
*oc
= ctx
; oc
; oc
= oc
->outer
)
309 if (gimple_code (oc
->stmt
) != GIMPLE_OMP_TARGET
310 || (gimple_omp_target_kind (oc
->stmt
)
311 != GF_OMP_TARGET_KIND_OACC_PARALLEL
))
314 clauses
= gimple_omp_target_clauses (oc
->stmt
);
316 vector_length
= find_omp_clause (clauses
, OMP_CLAUSE_VECTOR_LENGTH
);
318 vector_length
= fold_convert_loc (OMP_CLAUSE_LOCATION (vector_length
),
320 OMP_CLAUSE_VECTOR_LENGTH_EXPR
323 vector_length
= fold_convert (sizetype
, integer_one_node
);
325 gangs
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_GANGS
);
327 gangs
= fold_convert_loc (OMP_CLAUSE_LOCATION (gangs
), sizetype
,
328 OMP_CLAUSE_NUM_GANGS_EXPR (gangs
));
330 gangs
= fold_convert (sizetype
, integer_one_node
);
335 nthreads
= fold_build2 (MULT_EXPR
, sizetype
, gangs
, vector_length
);
340 /* Holds offload tables with decls. */
341 vec
<tree
, va_gc
> *offload_funcs
, *offload_vars
;
343 /* Convenience function for calling scan_omp_1_op on tree operands. */
346 scan_omp_op (tree
*tp
, omp_context
*ctx
)
348 struct walk_stmt_info wi
;
350 memset (&wi
, 0, sizeof (wi
));
352 wi
.want_locations
= true;
354 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
357 static void lower_omp (gimple_seq
*, omp_context
*);
358 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
359 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
361 /* Find an OMP clause of type KIND within CLAUSES. */
364 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
366 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
367 if (OMP_CLAUSE_CODE (clauses
) == kind
)
373 /* Return true if CTX is for an omp parallel. */
376 is_parallel_ctx (omp_context
*ctx
)
378 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
382 /* Return true if CTX is for an omp task. */
385 is_task_ctx (omp_context
*ctx
)
387 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
391 /* Return true if CTX is for an omp parallel or omp task. */
394 is_taskreg_ctx (omp_context
*ctx
)
396 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
397 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
401 /* Return true if REGION is a combined parallel+workshare region. */
404 is_combined_parallel (struct omp_region
*region
)
406 return region
->is_combined_parallel
;
410 /* Extract the header elements of parallel loop FOR_STMT and store
414 extract_omp_for_data (gomp_for
*for_stmt
, struct omp_for_data
*fd
,
415 struct omp_for_data_loop
*loops
)
417 tree t
, var
, *collapse_iter
, *collapse_count
;
418 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
419 struct omp_for_data_loop
*loop
;
421 struct omp_for_data_loop dummy_loop
;
422 location_t loc
= gimple_location (for_stmt
);
423 bool simd
= gimple_omp_for_kind (for_stmt
) & GF_OMP_FOR_SIMD
;
424 bool distribute
= gimple_omp_for_kind (for_stmt
)
425 == GF_OMP_FOR_KIND_DISTRIBUTE
;
427 fd
->for_stmt
= for_stmt
;
429 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
430 if (fd
->collapse
> 1)
433 fd
->loops
= &fd
->loop
;
435 fd
->have_nowait
= distribute
|| simd
;
436 fd
->have_ordered
= false;
437 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
438 fd
->chunk_size
= NULL_TREE
;
439 if (gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_CILKFOR
)
440 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_CILKFOR
;
441 collapse_iter
= NULL
;
442 collapse_count
= NULL
;
444 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
445 switch (OMP_CLAUSE_CODE (t
))
447 case OMP_CLAUSE_NOWAIT
:
448 fd
->have_nowait
= true;
450 case OMP_CLAUSE_ORDERED
:
451 fd
->have_ordered
= true;
453 case OMP_CLAUSE_SCHEDULE
:
454 gcc_assert (!distribute
);
455 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
456 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
458 case OMP_CLAUSE_DIST_SCHEDULE
:
459 gcc_assert (distribute
);
460 fd
->chunk_size
= OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t
);
462 case OMP_CLAUSE_COLLAPSE
:
463 if (fd
->collapse
> 1)
465 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
466 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
473 /* FIXME: for now map schedule(auto) to schedule(static).
474 There should be analysis to determine whether all iterations
475 are approximately the same amount of work (then schedule(static)
476 is best) or if it varies (then schedule(dynamic,N) is better). */
477 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
479 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
480 gcc_assert (fd
->chunk_size
== NULL
);
482 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
483 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
484 gcc_assert (fd
->chunk_size
== NULL
);
485 else if (fd
->chunk_size
== NULL
)
487 /* We only need to compute a default chunk size for ordered
488 static loops and dynamic loops. */
489 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
491 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
492 ? integer_zero_node
: integer_one_node
;
495 for (i
= 0; i
< fd
->collapse
; i
++)
497 if (fd
->collapse
== 1)
499 else if (loops
!= NULL
)
504 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
505 gcc_assert (SSA_VAR_P (loop
->v
));
506 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
507 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
508 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
509 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
511 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
512 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
513 switch (loop
->cond_code
)
519 gcc_assert (gimple_omp_for_kind (for_stmt
)
520 == GF_OMP_FOR_KIND_CILKSIMD
521 || (gimple_omp_for_kind (for_stmt
)
522 == GF_OMP_FOR_KIND_CILKFOR
));
525 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
526 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, 1);
528 loop
->n2
= fold_build2_loc (loc
,
529 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
530 build_int_cst (TREE_TYPE (loop
->n2
), 1));
531 loop
->cond_code
= LT_EXPR
;
534 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
535 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, -1);
537 loop
->n2
= fold_build2_loc (loc
,
538 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
539 build_int_cst (TREE_TYPE (loop
->n2
), 1));
540 loop
->cond_code
= GT_EXPR
;
546 t
= gimple_omp_for_incr (for_stmt
, i
);
547 gcc_assert (TREE_OPERAND (t
, 0) == var
);
548 switch (TREE_CODE (t
))
551 loop
->step
= TREE_OPERAND (t
, 1);
553 case POINTER_PLUS_EXPR
:
554 loop
->step
= fold_convert (ssizetype
, TREE_OPERAND (t
, 1));
557 loop
->step
= TREE_OPERAND (t
, 1);
558 loop
->step
= fold_build1_loc (loc
,
559 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
567 || (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
568 && !fd
->have_ordered
))
570 if (fd
->collapse
== 1)
571 iter_type
= TREE_TYPE (loop
->v
);
573 || TYPE_PRECISION (iter_type
)
574 < TYPE_PRECISION (TREE_TYPE (loop
->v
)))
576 = build_nonstandard_integer_type
577 (TYPE_PRECISION (TREE_TYPE (loop
->v
)), 1);
579 else if (iter_type
!= long_long_unsigned_type_node
)
581 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
582 iter_type
= long_long_unsigned_type_node
;
583 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
584 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
585 >= TYPE_PRECISION (iter_type
))
589 if (loop
->cond_code
== LT_EXPR
)
590 n
= fold_build2_loc (loc
,
591 PLUS_EXPR
, TREE_TYPE (loop
->v
),
592 loop
->n2
, loop
->step
);
595 if (TREE_CODE (n
) != INTEGER_CST
596 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
597 iter_type
= long_long_unsigned_type_node
;
599 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
600 > TYPE_PRECISION (iter_type
))
604 if (loop
->cond_code
== LT_EXPR
)
607 n2
= fold_build2_loc (loc
,
608 PLUS_EXPR
, TREE_TYPE (loop
->v
),
609 loop
->n2
, loop
->step
);
613 n1
= fold_build2_loc (loc
,
614 MINUS_EXPR
, TREE_TYPE (loop
->v
),
615 loop
->n2
, loop
->step
);
618 if (TREE_CODE (n1
) != INTEGER_CST
619 || TREE_CODE (n2
) != INTEGER_CST
620 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
621 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
622 iter_type
= long_long_unsigned_type_node
;
626 if (collapse_count
&& *collapse_count
== NULL
)
628 t
= fold_binary (loop
->cond_code
, boolean_type_node
,
629 fold_convert (TREE_TYPE (loop
->v
), loop
->n1
),
630 fold_convert (TREE_TYPE (loop
->v
), loop
->n2
));
631 if (t
&& integer_zerop (t
))
632 count
= build_zero_cst (long_long_unsigned_type_node
);
633 else if ((i
== 0 || count
!= NULL_TREE
)
634 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
635 && TREE_CONSTANT (loop
->n1
)
636 && TREE_CONSTANT (loop
->n2
)
637 && TREE_CODE (loop
->step
) == INTEGER_CST
)
639 tree itype
= TREE_TYPE (loop
->v
);
641 if (POINTER_TYPE_P (itype
))
642 itype
= signed_type_for (itype
);
643 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
644 t
= fold_build2_loc (loc
,
646 fold_convert_loc (loc
, itype
, loop
->step
), t
);
647 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
648 fold_convert_loc (loc
, itype
, loop
->n2
));
649 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
650 fold_convert_loc (loc
, itype
, loop
->n1
));
651 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
652 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
653 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
654 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
655 fold_convert_loc (loc
, itype
,
658 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
659 fold_convert_loc (loc
, itype
, loop
->step
));
660 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
661 if (count
!= NULL_TREE
)
662 count
= fold_build2_loc (loc
,
663 MULT_EXPR
, long_long_unsigned_type_node
,
667 if (TREE_CODE (count
) != INTEGER_CST
)
670 else if (count
&& !integer_zerop (count
))
677 && (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
678 || fd
->have_ordered
))
680 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
681 iter_type
= long_long_unsigned_type_node
;
683 iter_type
= long_integer_type_node
;
685 else if (collapse_iter
&& *collapse_iter
!= NULL
)
686 iter_type
= TREE_TYPE (*collapse_iter
);
687 fd
->iter_type
= iter_type
;
688 if (collapse_iter
&& *collapse_iter
== NULL
)
689 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
690 if (collapse_count
&& *collapse_count
== NULL
)
693 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
695 *collapse_count
= create_tmp_var (iter_type
, ".count");
698 if (fd
->collapse
> 1)
700 fd
->loop
.v
= *collapse_iter
;
701 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
702 fd
->loop
.n2
= *collapse_count
;
703 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
704 fd
->loop
.cond_code
= LT_EXPR
;
707 /* For OpenACC loops, force a chunk size of one, as this avoids the default
708 scheduling where several subsequent iterations are being executed by the
710 if (gimple_omp_for_kind (for_stmt
) == GF_OMP_FOR_KIND_OACC_LOOP
)
712 gcc_assert (fd
->chunk_size
== NULL_TREE
);
713 fd
->chunk_size
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
718 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
719 is the immediate dominator of PAR_ENTRY_BB, return true if there
720 are no data dependencies that would prevent expanding the parallel
721 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
723 When expanding a combined parallel+workshare region, the call to
724 the child function may need additional arguments in the case of
725 GIMPLE_OMP_FOR regions. In some cases, these arguments are
726 computed out of variables passed in from the parent to the child
727 via 'struct .omp_data_s'. For instance:
729 #pragma omp parallel for schedule (guided, i * 4)
734 # BLOCK 2 (PAR_ENTRY_BB)
736 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
738 # BLOCK 3 (WS_ENTRY_BB)
739 .omp_data_i = &.omp_data_o;
740 D.1667 = .omp_data_i->i;
742 #pragma omp for schedule (guided, D.1598)
744 When we outline the parallel region, the call to the child function
745 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
746 that value is computed *after* the call site. So, in principle we
747 cannot do the transformation.
749 To see whether the code in WS_ENTRY_BB blocks the combined
750 parallel+workshare call, we collect all the variables used in the
751 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
752 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
755 FIXME. If we had the SSA form built at this point, we could merely
756 hoist the code in block 3 into block 2 and be done with it. But at
757 this point we don't have dataflow information and though we could
758 hack something up here, it is really not worth the aggravation. */
761 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
763 struct omp_for_data fd
;
764 gimple ws_stmt
= last_stmt (ws_entry_bb
);
766 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
769 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
771 extract_omp_for_data (as_a
<gomp_for
*> (ws_stmt
), &fd
, NULL
);
773 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
775 if (fd
.iter_type
!= long_integer_type_node
)
778 /* FIXME. We give up too easily here. If any of these arguments
779 are not constants, they will likely involve variables that have
780 been mapped into fields of .omp_data_s for sharing with the child
781 function. With appropriate data flow, it would be possible to
783 if (!is_gimple_min_invariant (fd
.loop
.n1
)
784 || !is_gimple_min_invariant (fd
.loop
.n2
)
785 || !is_gimple_min_invariant (fd
.loop
.step
)
786 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
793 /* Collect additional arguments needed to emit a combined
794 parallel+workshare call. WS_STMT is the workshare directive being
797 static vec
<tree
, va_gc
> *
798 get_ws_args_for (gimple par_stmt
, gimple ws_stmt
)
801 location_t loc
= gimple_location (ws_stmt
);
802 vec
<tree
, va_gc
> *ws_args
;
804 if (gomp_for
*for_stmt
= dyn_cast
<gomp_for
*> (ws_stmt
))
806 struct omp_for_data fd
;
809 extract_omp_for_data (for_stmt
, &fd
, NULL
);
813 if (gimple_omp_for_combined_into_p (for_stmt
))
816 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt
),
817 OMP_CLAUSE__LOOPTEMP_
);
819 n1
= OMP_CLAUSE_DECL (innerc
);
820 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
821 OMP_CLAUSE__LOOPTEMP_
);
823 n2
= OMP_CLAUSE_DECL (innerc
);
826 vec_alloc (ws_args
, 3 + (fd
.chunk_size
!= 0));
828 t
= fold_convert_loc (loc
, long_integer_type_node
, n1
);
829 ws_args
->quick_push (t
);
831 t
= fold_convert_loc (loc
, long_integer_type_node
, n2
);
832 ws_args
->quick_push (t
);
834 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
835 ws_args
->quick_push (t
);
839 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
840 ws_args
->quick_push (t
);
845 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
847 /* Number of sections is equal to the number of edges from the
848 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
849 the exit of the sections region. */
850 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
851 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
852 vec_alloc (ws_args
, 1);
853 ws_args
->quick_push (t
);
861 /* Discover whether REGION is a combined parallel+workshare region. */
864 determine_parallel_type (struct omp_region
*region
)
866 basic_block par_entry_bb
, par_exit_bb
;
867 basic_block ws_entry_bb
, ws_exit_bb
;
869 if (region
== NULL
|| region
->inner
== NULL
870 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
871 || region
->inner
->cont
== NULL
)
874 /* We only support parallel+for and parallel+sections. */
875 if (region
->type
!= GIMPLE_OMP_PARALLEL
876 || (region
->inner
->type
!= GIMPLE_OMP_FOR
877 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
880 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
881 WS_EXIT_BB -> PAR_EXIT_BB. */
882 par_entry_bb
= region
->entry
;
883 par_exit_bb
= region
->exit
;
884 ws_entry_bb
= region
->inner
->entry
;
885 ws_exit_bb
= region
->inner
->exit
;
887 if (single_succ (par_entry_bb
) == ws_entry_bb
888 && single_succ (ws_exit_bb
) == par_exit_bb
889 && workshare_safe_to_combine_p (ws_entry_bb
)
890 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
891 || (last_and_only_stmt (ws_entry_bb
)
892 && last_and_only_stmt (par_exit_bb
))))
894 gimple par_stmt
= last_stmt (par_entry_bb
);
895 gimple ws_stmt
= last_stmt (ws_entry_bb
);
897 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
899 /* If this is a combined parallel loop, we need to determine
900 whether or not to use the combined library calls. There
901 are two cases where we do not apply the transformation:
902 static loops and any kind of ordered loop. In the first
903 case, we already open code the loop so there is no need
904 to do anything else. In the latter case, the combined
905 parallel loop call would still need extra synchronization
906 to implement ordered semantics, so there would not be any
907 gain in using the combined call. */
908 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
909 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
911 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
912 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
914 region
->is_combined_parallel
= false;
915 region
->inner
->is_combined_parallel
= false;
920 region
->is_combined_parallel
= true;
921 region
->inner
->is_combined_parallel
= true;
922 region
->ws_args
= get_ws_args_for (par_stmt
, ws_stmt
);
927 /* Return true if EXPR is variable sized. */
930 is_variable_sized (const_tree expr
)
932 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
935 /* Return true if DECL is a reference type. */
938 is_reference (tree decl
)
940 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
943 /* Return the type of a decl. If the decl is reference type,
944 return its base type. */
946 get_base_type (tree decl
)
948 tree type
= TREE_TYPE (decl
);
949 if (is_reference (decl
))
950 type
= TREE_TYPE (type
);
954 /* Lookup variables. The "maybe" form
955 allows for the variable form to not have been entered, otherwise we
956 assert that the variable must have been entered. */
959 lookup_decl (tree var
, omp_context
*ctx
)
961 tree
*n
= ctx
->cb
.decl_map
->get (var
);
966 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
968 tree
*n
= ctx
->cb
.decl_map
->get (const_cast<tree
> (var
));
969 return n
? *n
: NULL_TREE
;
973 lookup_field (tree var
, omp_context
*ctx
)
976 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
977 return (tree
) n
->value
;
981 lookup_sfield (tree var
, omp_context
*ctx
)
984 n
= splay_tree_lookup (ctx
->sfield_map
985 ? ctx
->sfield_map
: ctx
->field_map
,
986 (splay_tree_key
) var
);
987 return (tree
) n
->value
;
991 maybe_lookup_field (tree var
, omp_context
*ctx
)
994 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
995 return n
? (tree
) n
->value
: NULL_TREE
;
999 lookup_oacc_reduction (const char *id
, omp_context
*ctx
)
1002 n
= splay_tree_lookup (ctx
->reduction_map
, (splay_tree_key
) id
);
1003 return (tree
) n
->value
;
1007 maybe_lookup_oacc_reduction (tree var
, omp_context
*ctx
)
1009 splay_tree_node n
= NULL
;
1010 if (ctx
->reduction_map
)
1011 n
= splay_tree_lookup (ctx
->reduction_map
, (splay_tree_key
) var
);
1012 return n
? (tree
) n
->value
: NULL_TREE
;
1015 /* Return true if DECL should be copied by pointer. SHARED_CTX is
1016 the parallel context if DECL is to be shared. */
1019 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
1021 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
1024 /* We can only use copy-in/copy-out semantics for shared variables
1025 when we know the value is not accessible from an outer scope. */
1028 gcc_assert (!is_gimple_omp_oacc (shared_ctx
->stmt
));
1030 /* ??? Trivially accessible from anywhere. But why would we even
1031 be passing an address in this case? Should we simply assert
1032 this to be false, or should we have a cleanup pass that removes
1033 these from the list of mappings? */
1034 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
1037 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
1038 without analyzing the expression whether or not its location
1039 is accessible to anyone else. In the case of nested parallel
1040 regions it certainly may be. */
1041 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
1044 /* Do not use copy-in/copy-out for variables that have their
1046 if (TREE_ADDRESSABLE (decl
))
1049 /* lower_send_shared_vars only uses copy-in, but not copy-out
1051 if (TREE_READONLY (decl
)
1052 || ((TREE_CODE (decl
) == RESULT_DECL
1053 || TREE_CODE (decl
) == PARM_DECL
)
1054 && DECL_BY_REFERENCE (decl
)))
1057 /* Disallow copy-in/out in nested parallel if
1058 decl is shared in outer parallel, otherwise
1059 each thread could store the shared variable
1060 in its own copy-in location, making the
1061 variable no longer really shared. */
1062 if (shared_ctx
->is_nested
)
1066 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
1067 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
1074 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
1075 c
; c
= OMP_CLAUSE_CHAIN (c
))
1076 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
1077 && OMP_CLAUSE_DECL (c
) == decl
)
1081 goto maybe_mark_addressable_and_ret
;
1085 /* For tasks avoid using copy-in/out. As tasks can be
1086 deferred or executed in different thread, when GOMP_task
1087 returns, the task hasn't necessarily terminated. */
1088 if (is_task_ctx (shared_ctx
))
1091 maybe_mark_addressable_and_ret
:
1092 outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
1093 if (is_gimple_reg (outer
))
1095 /* Taking address of OUTER in lower_send_shared_vars
1096 might need regimplification of everything that uses the
1098 if (!task_shared_vars
)
1099 task_shared_vars
= BITMAP_ALLOC (NULL
);
1100 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
1101 TREE_ADDRESSABLE (outer
) = 1;
1110 /* Construct a new automatic decl similar to VAR. */
1113 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
1115 tree copy
= copy_var_decl (var
, name
, type
);
1117 DECL_CONTEXT (copy
) = current_function_decl
;
1118 DECL_CHAIN (copy
) = ctx
->block_vars
;
1119 ctx
->block_vars
= copy
;
1125 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
1127 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
1130 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
1133 omp_build_component_ref (tree obj
, tree field
)
1135 tree ret
= build3 (COMPONENT_REF
, TREE_TYPE (field
), obj
, field
, NULL
);
1136 if (TREE_THIS_VOLATILE (field
))
1137 TREE_THIS_VOLATILE (ret
) |= 1;
1138 if (TREE_READONLY (field
))
1139 TREE_READONLY (ret
) |= 1;
1143 /* Build tree nodes to access the field for VAR on the receiver side. */
1146 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
1148 tree x
, field
= lookup_field (var
, ctx
);
1150 /* If the receiver record type was remapped in the child function,
1151 remap the field into the new record type. */
1152 x
= maybe_lookup_field (field
, ctx
);
1156 x
= build_simple_mem_ref (ctx
->receiver_decl
);
1157 x
= omp_build_component_ref (x
, field
);
1159 x
= build_simple_mem_ref (x
);
1164 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1165 of a parallel, this is a component reference; for workshare constructs
1166 this is some variable. */
1169 build_outer_var_ref (tree var
, omp_context
*ctx
)
1173 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
1175 else if (is_variable_sized (var
))
1177 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
1178 x
= build_outer_var_ref (x
, ctx
);
1179 x
= build_simple_mem_ref (x
);
1181 else if (is_taskreg_ctx (ctx
))
1183 bool by_ref
= use_pointer_for_field (var
, NULL
);
1184 x
= build_receiver_ref (var
, by_ref
, ctx
);
1186 else if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
1187 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
1189 /* #pragma omp simd isn't a worksharing construct, and can reference even
1190 private vars in its linear etc. clauses. */
1192 if (ctx
->outer
&& is_taskreg_ctx (ctx
))
1193 x
= lookup_decl (var
, ctx
->outer
);
1194 else if (ctx
->outer
)
1195 x
= maybe_lookup_decl_in_outer_ctx (var
, ctx
);
1199 else if (ctx
->outer
)
1200 x
= lookup_decl (var
, ctx
->outer
);
1201 else if (is_reference (var
))
1202 /* This can happen with orphaned constructs. If var is reference, it is
1203 possible it is shared and as such valid. */
1208 if (is_reference (var
))
1209 x
= build_simple_mem_ref (x
);
1214 /* Build tree nodes to access the field for VAR on the sender side. */
1217 build_sender_ref (tree var
, omp_context
*ctx
)
1219 tree field
= lookup_sfield (var
, ctx
);
1220 return omp_build_component_ref (ctx
->sender_decl
, field
);
1223 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1226 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
1228 tree field
, type
, sfield
= NULL_TREE
;
1230 gcc_assert ((mask
& 1) == 0
1231 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
1232 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
1233 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
1234 gcc_assert ((mask
& 3) == 3
1235 || !is_gimple_omp_oacc (ctx
->stmt
));
1237 type
= TREE_TYPE (var
);
1240 gcc_assert (TREE_CODE (type
) == ARRAY_TYPE
);
1241 type
= build_pointer_type (build_pointer_type (type
));
1244 type
= build_pointer_type (type
);
1245 else if ((mask
& 3) == 1 && is_reference (var
))
1246 type
= TREE_TYPE (type
);
1248 field
= build_decl (DECL_SOURCE_LOCATION (var
),
1249 FIELD_DECL
, DECL_NAME (var
), type
);
1251 /* Remember what variable this field was created for. This does have a
1252 side effect of making dwarf2out ignore this member, so for helpful
1253 debugging we clear it later in delete_omp_context. */
1254 DECL_ABSTRACT_ORIGIN (field
) = var
;
1255 if (type
== TREE_TYPE (var
))
1257 DECL_ALIGN (field
) = DECL_ALIGN (var
);
1258 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
1259 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
1262 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
1264 if ((mask
& 3) == 3)
1266 insert_field_into_struct (ctx
->record_type
, field
);
1267 if (ctx
->srecord_type
)
1269 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
1270 FIELD_DECL
, DECL_NAME (var
), type
);
1271 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
1272 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
1273 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
1274 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
1275 insert_field_into_struct (ctx
->srecord_type
, sfield
);
1280 if (ctx
->srecord_type
== NULL_TREE
)
1284 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1285 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1286 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
1288 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
1289 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
1290 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
1291 insert_field_into_struct (ctx
->srecord_type
, sfield
);
1292 splay_tree_insert (ctx
->sfield_map
,
1293 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
1294 (splay_tree_value
) sfield
);
1298 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
1299 : ctx
->srecord_type
, field
);
1303 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
1304 (splay_tree_value
) field
);
1305 if ((mask
& 2) && ctx
->sfield_map
)
1306 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
1307 (splay_tree_value
) sfield
);
1311 install_var_local (tree var
, omp_context
*ctx
)
1313 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1314 insert_decl_map (&ctx
->cb
, var
, new_var
);
1318 /* Adjust the replacement for DECL in CTX for the new context. This means
1319 copying the DECL_VALUE_EXPR, and fixing up the type. */
1322 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1324 tree new_decl
, size
;
1326 new_decl
= lookup_decl (decl
, ctx
);
1328 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1330 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1331 && DECL_HAS_VALUE_EXPR_P (decl
))
1333 tree ve
= DECL_VALUE_EXPR (decl
);
1334 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1335 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1336 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1339 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1341 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1342 if (size
== error_mark_node
)
1343 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1344 DECL_SIZE (new_decl
) = size
;
1346 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1347 if (size
== error_mark_node
)
1348 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1349 DECL_SIZE_UNIT (new_decl
) = size
;
1353 /* The callback for remap_decl. Search all containing contexts for a
1354 mapping of the variable; this avoids having to duplicate the splay
1355 tree ahead of time. We know a mapping doesn't already exist in the
1356 given context. Create new mappings to implement default semantics. */
1359 omp_copy_decl (tree var
, copy_body_data
*cb
)
1361 omp_context
*ctx
= (omp_context
*) cb
;
1364 if (TREE_CODE (var
) == LABEL_DECL
)
1366 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1367 DECL_CONTEXT (new_var
) = current_function_decl
;
1368 insert_decl_map (&ctx
->cb
, var
, new_var
);
1372 while (!is_taskreg_ctx (ctx
))
1377 new_var
= maybe_lookup_decl (var
, ctx
);
1382 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1385 return error_mark_node
;
1389 /* Debugging dumps for parallel regions. */
1390 void dump_omp_region (FILE *, struct omp_region
*, int);
1391 void debug_omp_region (struct omp_region
*);
1392 void debug_all_omp_regions (void);
1394 /* Dump the parallel region tree rooted at REGION. */
1397 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1399 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1400 gimple_code_name
[region
->type
]);
1403 dump_omp_region (file
, region
->inner
, indent
+ 4);
1407 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1408 region
->cont
->index
);
1412 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1413 region
->exit
->index
);
1415 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1418 dump_omp_region (file
, region
->next
, indent
);
1422 debug_omp_region (struct omp_region
*region
)
1424 dump_omp_region (stderr
, region
, 0);
1428 debug_all_omp_regions (void)
1430 dump_omp_region (stderr
, root_omp_region
, 0);
1434 /* Create a new parallel region starting at STMT inside region PARENT. */
1436 static struct omp_region
*
1437 new_omp_region (basic_block bb
, enum gimple_code type
,
1438 struct omp_region
*parent
)
1440 struct omp_region
*region
= XCNEW (struct omp_region
);
1442 region
->outer
= parent
;
1444 region
->type
= type
;
1448 /* This is a nested region. Add it to the list of inner
1449 regions in PARENT. */
1450 region
->next
= parent
->inner
;
1451 parent
->inner
= region
;
1455 /* This is a toplevel region. Add it to the list of toplevel
1456 regions in ROOT_OMP_REGION. */
1457 region
->next
= root_omp_region
;
1458 root_omp_region
= region
;
1464 /* Release the memory associated with the region tree rooted at REGION. */
1467 free_omp_region_1 (struct omp_region
*region
)
1469 struct omp_region
*i
, *n
;
1471 for (i
= region
->inner
; i
; i
= n
)
1474 free_omp_region_1 (i
);
1480 /* Release the memory for the entire omp region tree. */
1483 free_omp_regions (void)
1485 struct omp_region
*r
, *n
;
1486 for (r
= root_omp_region
; r
; r
= n
)
1489 free_omp_region_1 (r
);
1491 root_omp_region
= NULL
;
1495 /* Create a new context, with OUTER_CTX being the surrounding context. */
1497 static omp_context
*
1498 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1500 omp_context
*ctx
= XCNEW (omp_context
);
1502 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1503 (splay_tree_value
) ctx
);
1508 ctx
->outer
= outer_ctx
;
1509 ctx
->cb
= outer_ctx
->cb
;
1510 ctx
->cb
.block
= NULL
;
1511 ctx
->depth
= outer_ctx
->depth
+ 1;
1512 ctx
->reduction_map
= outer_ctx
->reduction_map
;
1516 ctx
->cb
.src_fn
= current_function_decl
;
1517 ctx
->cb
.dst_fn
= current_function_decl
;
1518 ctx
->cb
.src_node
= cgraph_node::get (current_function_decl
);
1519 gcc_checking_assert (ctx
->cb
.src_node
);
1520 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1521 ctx
->cb
.src_cfun
= cfun
;
1522 ctx
->cb
.copy_decl
= omp_copy_decl
;
1523 ctx
->cb
.eh_lp_nr
= 0;
1524 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1528 ctx
->cb
.decl_map
= new hash_map
<tree
, tree
>;
1533 static gimple_seq
maybe_catch_exception (gimple_seq
);
1535 /* Finalize task copyfn. */
1538 finalize_task_copyfn (gomp_task
*task_stmt
)
1540 struct function
*child_cfun
;
1542 gimple_seq seq
= NULL
, new_seq
;
1545 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1546 if (child_fn
== NULL_TREE
)
1549 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1550 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
= cfun
->curr_properties
;
1552 push_cfun (child_cfun
);
1553 bind
= gimplify_body (child_fn
, false);
1554 gimple_seq_add_stmt (&seq
, bind
);
1555 new_seq
= maybe_catch_exception (seq
);
1558 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1560 gimple_seq_add_stmt (&seq
, bind
);
1562 gimple_set_body (child_fn
, seq
);
1565 /* Inform the callgraph about the new function. */
1566 cgraph_node::add_new_function (child_fn
, false);
1567 cgraph_node::get (child_fn
)->parallelized_function
= 1;
1570 /* Destroy a omp_context data structures. Called through the splay tree
1571 value delete callback. */
1574 delete_omp_context (splay_tree_value value
)
1576 omp_context
*ctx
= (omp_context
*) value
;
1578 delete ctx
->cb
.decl_map
;
1581 splay_tree_delete (ctx
->field_map
);
1582 if (ctx
->sfield_map
)
1583 splay_tree_delete (ctx
->sfield_map
);
1584 /* Reduction map is copied to nested contexts, so only delete it in the
1586 if (ctx
->reduction_map
1587 && gimple_code (ctx
->stmt
) == GIMPLE_OMP_TARGET
1588 && is_gimple_omp_offloaded (ctx
->stmt
)
1589 && is_gimple_omp_oacc (ctx
->stmt
))
1590 splay_tree_delete (ctx
->reduction_map
);
1592 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1593 it produces corrupt debug information. */
1594 if (ctx
->record_type
)
1597 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= DECL_CHAIN (t
))
1598 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1600 if (ctx
->srecord_type
)
1603 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= DECL_CHAIN (t
))
1604 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1607 if (is_task_ctx (ctx
))
1608 finalize_task_copyfn (as_a
<gomp_task
*> (ctx
->stmt
));
1613 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1617 fixup_child_record_type (omp_context
*ctx
)
1619 tree f
, type
= ctx
->record_type
;
1621 /* ??? It isn't sufficient to just call remap_type here, because
1622 variably_modified_type_p doesn't work the way we expect for
1623 record types. Testing each field for whether it needs remapping
1624 and creating a new record by hand works, however. */
1625 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
1626 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1630 tree name
, new_fields
= NULL
;
1632 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1633 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1634 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1635 TYPE_DECL
, name
, type
);
1636 TYPE_NAME (type
) = name
;
1638 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= DECL_CHAIN (f
))
1640 tree new_f
= copy_node (f
);
1641 DECL_CONTEXT (new_f
) = type
;
1642 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1643 DECL_CHAIN (new_f
) = new_fields
;
1644 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1645 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1647 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1651 /* Arrange to be able to look up the receiver field
1652 given the sender field. */
1653 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1654 (splay_tree_value
) new_f
);
1656 TYPE_FIELDS (type
) = nreverse (new_fields
);
1660 TREE_TYPE (ctx
->receiver_decl
)
1661 = build_qualified_type (build_reference_type (type
), TYPE_QUAL_RESTRICT
);
1664 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1665 specified by CLAUSES. */
1668 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1671 bool scan_array_reductions
= false;
1673 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1677 switch (OMP_CLAUSE_CODE (c
))
1679 case OMP_CLAUSE_PRIVATE
:
1680 decl
= OMP_CLAUSE_DECL (c
);
1681 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1683 else if (!is_variable_sized (decl
))
1684 install_var_local (decl
, ctx
);
1687 case OMP_CLAUSE_SHARED
:
1688 decl
= OMP_CLAUSE_DECL (c
);
1689 /* Ignore shared directives in teams construct. */
1690 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
1692 /* Global variables don't need to be copied,
1693 the receiver side will use them directly. */
1694 tree odecl
= maybe_lookup_decl_in_outer_ctx (decl
, ctx
);
1695 if (is_global_var (odecl
))
1697 insert_decl_map (&ctx
->cb
, decl
, odecl
);
1700 gcc_assert (is_taskreg_ctx (ctx
));
1701 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1702 || !is_variable_sized (decl
));
1703 /* Global variables don't need to be copied,
1704 the receiver side will use them directly. */
1705 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1707 by_ref
= use_pointer_for_field (decl
, ctx
);
1708 if (! TREE_READONLY (decl
)
1709 || TREE_ADDRESSABLE (decl
)
1711 || is_reference (decl
))
1713 install_var_field (decl
, by_ref
, 3, ctx
);
1714 install_var_local (decl
, ctx
);
1717 /* We don't need to copy const scalar vars back. */
1718 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1721 case OMP_CLAUSE_LASTPRIVATE
:
1722 /* Let the corresponding firstprivate clause create
1724 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1728 case OMP_CLAUSE_FIRSTPRIVATE
:
1729 if (is_gimple_omp_oacc (ctx
->stmt
))
1731 sorry ("clause not supported yet");
1735 case OMP_CLAUSE_REDUCTION
:
1736 case OMP_CLAUSE_LINEAR
:
1737 decl
= OMP_CLAUSE_DECL (c
);
1739 if (is_variable_sized (decl
))
1741 if (is_task_ctx (ctx
))
1742 install_var_field (decl
, false, 1, ctx
);
1745 else if (is_taskreg_ctx (ctx
))
1748 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1749 by_ref
= use_pointer_for_field (decl
, NULL
);
1751 if (is_task_ctx (ctx
)
1752 && (global
|| by_ref
|| is_reference (decl
)))
1754 install_var_field (decl
, false, 1, ctx
);
1756 install_var_field (decl
, by_ref
, 2, ctx
);
1759 install_var_field (decl
, by_ref
, 3, ctx
);
1761 install_var_local (decl
, ctx
);
1762 if (is_gimple_omp_oacc (ctx
->stmt
)
1763 && OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
1765 /* Create a decl for the reduction array. */
1766 tree var
= OMP_CLAUSE_DECL (c
);
1767 tree type
= get_base_type (var
);
1768 tree ptype
= build_pointer_type (type
);
1769 tree array
= create_tmp_var (ptype
,
1770 oacc_get_reduction_array_id (var
));
1771 omp_context
*c
= (ctx
->field_map
? ctx
: ctx
->outer
);
1772 install_var_field (array
, true, 3, c
);
1773 install_var_local (array
, c
);
1775 /* Insert it into the current context. */
1776 splay_tree_insert (ctx
->reduction_map
, (splay_tree_key
)
1777 oacc_get_reduction_array_id (var
),
1778 (splay_tree_value
) array
);
1779 splay_tree_insert (ctx
->reduction_map
,
1780 (splay_tree_key
) array
,
1781 (splay_tree_value
) array
);
1785 case OMP_CLAUSE__LOOPTEMP_
:
1786 gcc_assert (is_parallel_ctx (ctx
));
1787 decl
= OMP_CLAUSE_DECL (c
);
1788 install_var_field (decl
, false, 3, ctx
);
1789 install_var_local (decl
, ctx
);
1792 case OMP_CLAUSE_COPYPRIVATE
:
1793 case OMP_CLAUSE_COPYIN
:
1794 decl
= OMP_CLAUSE_DECL (c
);
1795 by_ref
= use_pointer_for_field (decl
, NULL
);
1796 install_var_field (decl
, by_ref
, 3, ctx
);
1799 case OMP_CLAUSE_DEFAULT
:
1800 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1803 case OMP_CLAUSE_FINAL
:
1805 case OMP_CLAUSE_NUM_THREADS
:
1806 case OMP_CLAUSE_NUM_TEAMS
:
1807 case OMP_CLAUSE_THREAD_LIMIT
:
1808 case OMP_CLAUSE_DEVICE
:
1809 case OMP_CLAUSE_SCHEDULE
:
1810 case OMP_CLAUSE_DIST_SCHEDULE
:
1811 case OMP_CLAUSE_DEPEND
:
1812 case OMP_CLAUSE__CILK_FOR_COUNT_
:
1813 case OMP_CLAUSE_NUM_GANGS
:
1814 case OMP_CLAUSE_NUM_WORKERS
:
1815 case OMP_CLAUSE_VECTOR_LENGTH
:
1817 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1821 case OMP_CLAUSE_FROM
:
1822 case OMP_CLAUSE_MAP
:
1824 scan_omp_op (&OMP_CLAUSE_SIZE (c
), ctx
->outer
);
1825 decl
= OMP_CLAUSE_DECL (c
);
1826 /* Global variables with "omp declare target" attribute
1827 don't need to be copied, the receiver side will use them
1829 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
1831 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
))
1832 && varpool_node::get_create (decl
)->offloadable
)
1834 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
1835 && OMP_CLAUSE_MAP_KIND (c
) == GOMP_MAP_POINTER
)
1837 /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
1838 not offloaded; there is nothing to map for those. */
1839 if (!is_gimple_omp_offloaded (ctx
->stmt
)
1840 && !POINTER_TYPE_P (TREE_TYPE (decl
))
1841 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
))
1846 if (DECL_SIZE (decl
)
1847 && TREE_CODE (DECL_SIZE (decl
)) != INTEGER_CST
)
1849 tree decl2
= DECL_VALUE_EXPR (decl
);
1850 gcc_assert (TREE_CODE (decl2
) == INDIRECT_REF
);
1851 decl2
= TREE_OPERAND (decl2
, 0);
1852 gcc_assert (DECL_P (decl2
));
1853 install_var_field (decl2
, true, 3, ctx
);
1854 install_var_local (decl2
, ctx
);
1855 install_var_local (decl
, ctx
);
1859 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
1860 && OMP_CLAUSE_MAP_KIND (c
) == GOMP_MAP_POINTER
1861 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
)
1862 && TREE_CODE (TREE_TYPE (decl
)) == ARRAY_TYPE
)
1863 install_var_field (decl
, true, 7, ctx
);
1865 install_var_field (decl
, true, 3, ctx
);
1866 if (is_gimple_omp_offloaded (ctx
->stmt
))
1867 install_var_local (decl
, ctx
);
1872 tree base
= get_base_address (decl
);
1873 tree nc
= OMP_CLAUSE_CHAIN (c
);
1876 && OMP_CLAUSE_CODE (nc
) == OMP_CLAUSE_MAP
1877 && OMP_CLAUSE_DECL (nc
) == base
1878 && OMP_CLAUSE_MAP_KIND (nc
) == GOMP_MAP_POINTER
1879 && integer_zerop (OMP_CLAUSE_SIZE (nc
)))
1881 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
) = 1;
1882 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc
) = 1;
1888 scan_omp_op (&OMP_CLAUSE_DECL (c
), ctx
->outer
);
1889 decl
= OMP_CLAUSE_DECL (c
);
1891 gcc_assert (!splay_tree_lookup (ctx
->field_map
,
1892 (splay_tree_key
) decl
));
1894 = build_decl (OMP_CLAUSE_LOCATION (c
),
1895 FIELD_DECL
, NULL_TREE
, ptr_type_node
);
1896 DECL_ALIGN (field
) = TYPE_ALIGN (ptr_type_node
);
1897 insert_field_into_struct (ctx
->record_type
, field
);
1898 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) decl
,
1899 (splay_tree_value
) field
);
1904 case OMP_CLAUSE_NOWAIT
:
1905 case OMP_CLAUSE_ORDERED
:
1906 case OMP_CLAUSE_COLLAPSE
:
1907 case OMP_CLAUSE_UNTIED
:
1908 case OMP_CLAUSE_MERGEABLE
:
1909 case OMP_CLAUSE_PROC_BIND
:
1910 case OMP_CLAUSE_SAFELEN
:
1911 case OMP_CLAUSE_ASYNC
:
1912 case OMP_CLAUSE_WAIT
:
1913 case OMP_CLAUSE_GANG
:
1914 case OMP_CLAUSE_WORKER
:
1915 case OMP_CLAUSE_VECTOR
:
1918 case OMP_CLAUSE_ALIGNED
:
1919 decl
= OMP_CLAUSE_DECL (c
);
1920 if (is_global_var (decl
)
1921 && TREE_CODE (TREE_TYPE (decl
)) == ARRAY_TYPE
)
1922 install_var_local (decl
, ctx
);
1925 case OMP_CLAUSE_DEVICE_RESIDENT
:
1926 case OMP_CLAUSE_USE_DEVICE
:
1927 case OMP_CLAUSE__CACHE_
:
1928 case OMP_CLAUSE_INDEPENDENT
:
1929 case OMP_CLAUSE_AUTO
:
1930 case OMP_CLAUSE_SEQ
:
1931 sorry ("Clause not supported yet");
1939 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1941 switch (OMP_CLAUSE_CODE (c
))
1943 case OMP_CLAUSE_LASTPRIVATE
:
1944 /* Let the corresponding firstprivate clause create
1946 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1947 scan_array_reductions
= true;
1948 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1952 case OMP_CLAUSE_FIRSTPRIVATE
:
1953 if (is_gimple_omp_oacc (ctx
->stmt
))
1955 sorry ("clause not supported yet");
1959 case OMP_CLAUSE_PRIVATE
:
1960 case OMP_CLAUSE_REDUCTION
:
1961 case OMP_CLAUSE_LINEAR
:
1962 decl
= OMP_CLAUSE_DECL (c
);
1963 if (is_variable_sized (decl
))
1964 install_var_local (decl
, ctx
);
1965 fixup_remapped_decl (decl
, ctx
,
1966 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1967 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1968 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1969 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1970 scan_array_reductions
= true;
1971 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
1972 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
))
1973 scan_array_reductions
= true;
1976 case OMP_CLAUSE_SHARED
:
1977 /* Ignore shared directives in teams construct. */
1978 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
1980 decl
= OMP_CLAUSE_DECL (c
);
1981 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1982 fixup_remapped_decl (decl
, ctx
, false);
1985 case OMP_CLAUSE_MAP
:
1986 if (!is_gimple_omp_offloaded (ctx
->stmt
))
1988 decl
= OMP_CLAUSE_DECL (c
);
1990 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
))
1991 && varpool_node::get_create (decl
)->offloadable
)
1995 if (OMP_CLAUSE_MAP_KIND (c
) == GOMP_MAP_POINTER
1996 && TREE_CODE (TREE_TYPE (decl
)) == ARRAY_TYPE
1997 && !COMPLETE_TYPE_P (TREE_TYPE (decl
)))
1999 tree new_decl
= lookup_decl (decl
, ctx
);
2000 TREE_TYPE (new_decl
)
2001 = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
2003 else if (DECL_SIZE (decl
)
2004 && TREE_CODE (DECL_SIZE (decl
)) != INTEGER_CST
)
2006 tree decl2
= DECL_VALUE_EXPR (decl
);
2007 gcc_assert (TREE_CODE (decl2
) == INDIRECT_REF
);
2008 decl2
= TREE_OPERAND (decl2
, 0);
2009 gcc_assert (DECL_P (decl2
));
2010 fixup_remapped_decl (decl2
, ctx
, false);
2011 fixup_remapped_decl (decl
, ctx
, true);
2014 fixup_remapped_decl (decl
, ctx
, false);
2018 case OMP_CLAUSE_COPYPRIVATE
:
2019 case OMP_CLAUSE_COPYIN
:
2020 case OMP_CLAUSE_DEFAULT
:
2022 case OMP_CLAUSE_NUM_THREADS
:
2023 case OMP_CLAUSE_NUM_TEAMS
:
2024 case OMP_CLAUSE_THREAD_LIMIT
:
2025 case OMP_CLAUSE_DEVICE
:
2026 case OMP_CLAUSE_SCHEDULE
:
2027 case OMP_CLAUSE_DIST_SCHEDULE
:
2028 case OMP_CLAUSE_NOWAIT
:
2029 case OMP_CLAUSE_ORDERED
:
2030 case OMP_CLAUSE_COLLAPSE
:
2031 case OMP_CLAUSE_UNTIED
:
2032 case OMP_CLAUSE_FINAL
:
2033 case OMP_CLAUSE_MERGEABLE
:
2034 case OMP_CLAUSE_PROC_BIND
:
2035 case OMP_CLAUSE_SAFELEN
:
2036 case OMP_CLAUSE_ALIGNED
:
2037 case OMP_CLAUSE_DEPEND
:
2038 case OMP_CLAUSE__LOOPTEMP_
:
2040 case OMP_CLAUSE_FROM
:
2041 case OMP_CLAUSE__CILK_FOR_COUNT_
:
2042 case OMP_CLAUSE_ASYNC
:
2043 case OMP_CLAUSE_WAIT
:
2044 case OMP_CLAUSE_NUM_GANGS
:
2045 case OMP_CLAUSE_NUM_WORKERS
:
2046 case OMP_CLAUSE_VECTOR_LENGTH
:
2047 case OMP_CLAUSE_GANG
:
2048 case OMP_CLAUSE_WORKER
:
2049 case OMP_CLAUSE_VECTOR
:
2052 case OMP_CLAUSE_DEVICE_RESIDENT
:
2053 case OMP_CLAUSE_USE_DEVICE
:
2054 case OMP_CLAUSE__CACHE_
:
2055 case OMP_CLAUSE_INDEPENDENT
:
2056 case OMP_CLAUSE_AUTO
:
2057 case OMP_CLAUSE_SEQ
:
2058 sorry ("Clause not supported yet");
2066 gcc_checking_assert (!scan_array_reductions
2067 || !is_gimple_omp_oacc (ctx
->stmt
));
2068 if (scan_array_reductions
)
2069 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2070 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
2071 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2073 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
2074 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
2076 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
2077 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
2078 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
2079 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
2080 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
))
2081 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
), ctx
);
2084 /* Create a new name for omp child function. Returns an identifier. If
2085 IS_CILK_FOR is true then the suffix for the child function is
2089 create_omp_child_function_name (bool task_copy
, bool is_cilk_for
)
2092 return clone_function_name (current_function_decl
, "_cilk_for_fn");
2093 return clone_function_name (current_function_decl
,
2094 task_copy
? "_omp_cpyfn" : "_omp_fn");
2097 /* Returns the type of the induction variable for the child function for
2098 _Cilk_for and the types for _high and _low variables based on TYPE. */
2101 cilk_for_check_loop_diff_type (tree type
)
2103 if (TYPE_PRECISION (type
) <= TYPE_PRECISION (uint32_type_node
))
2105 if (TYPE_UNSIGNED (type
))
2106 return uint32_type_node
;
2108 return integer_type_node
;
2112 if (TYPE_UNSIGNED (type
))
2113 return uint64_type_node
;
2115 return long_long_integer_type_node
;
2119 /* Build a decl for the omp child function. It'll not contain a body
2120 yet, just the bare decl. */
2123 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
2125 tree decl
, type
, name
, t
;
2128 = (flag_cilkplus
&& gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
)
2129 ? find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2130 OMP_CLAUSE__CILK_FOR_COUNT_
) : NULL_TREE
;
2131 tree cilk_var_type
= NULL_TREE
;
2133 name
= create_omp_child_function_name (task_copy
,
2134 cilk_for_count
!= NULL_TREE
);
2136 type
= build_function_type_list (void_type_node
, ptr_type_node
,
2137 ptr_type_node
, NULL_TREE
);
2138 else if (cilk_for_count
)
2140 type
= TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count
, 0));
2141 cilk_var_type
= cilk_for_check_loop_diff_type (type
);
2142 type
= build_function_type_list (void_type_node
, ptr_type_node
,
2143 cilk_var_type
, cilk_var_type
, NULL_TREE
);
2146 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
2148 decl
= build_decl (gimple_location (ctx
->stmt
), FUNCTION_DECL
, name
, type
);
2150 gcc_checking_assert (!is_gimple_omp_oacc (ctx
->stmt
)
2153 ctx
->cb
.dst_fn
= decl
;
2155 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
2157 TREE_STATIC (decl
) = 1;
2158 TREE_USED (decl
) = 1;
2159 DECL_ARTIFICIAL (decl
) = 1;
2160 DECL_IGNORED_P (decl
) = 0;
2161 TREE_PUBLIC (decl
) = 0;
2162 DECL_UNINLINABLE (decl
) = 1;
2163 DECL_EXTERNAL (decl
) = 0;
2164 DECL_CONTEXT (decl
) = NULL_TREE
;
2165 DECL_INITIAL (decl
) = make_node (BLOCK
);
2166 if (cgraph_node::get (current_function_decl
)->offloadable
)
2167 cgraph_node::get_create (decl
)->offloadable
= 1;
2171 for (octx
= ctx
; octx
; octx
= octx
->outer
)
2172 if (is_gimple_omp_offloaded (octx
->stmt
))
2174 cgraph_node::get_create (decl
)->offloadable
= 1;
2175 #ifdef ENABLE_OFFLOADING
2176 g
->have_offload
= true;
2182 if (cgraph_node::get_create (decl
)->offloadable
2183 && !lookup_attribute ("omp declare target",
2184 DECL_ATTRIBUTES (current_function_decl
)))
2185 DECL_ATTRIBUTES (decl
)
2186 = tree_cons (get_identifier ("omp target entrypoint"),
2187 NULL_TREE
, DECL_ATTRIBUTES (decl
));
2189 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
2190 RESULT_DECL
, NULL_TREE
, void_type_node
);
2191 DECL_ARTIFICIAL (t
) = 1;
2192 DECL_IGNORED_P (t
) = 1;
2193 DECL_CONTEXT (t
) = decl
;
2194 DECL_RESULT (decl
) = t
;
2196 /* _Cilk_for's child function requires two extra parameters called
2197 __low and __high that are set the by Cilk runtime when it calls this
2201 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
2202 PARM_DECL
, get_identifier ("__high"), cilk_var_type
);
2203 DECL_ARTIFICIAL (t
) = 1;
2204 DECL_NAMELESS (t
) = 1;
2205 DECL_ARG_TYPE (t
) = ptr_type_node
;
2206 DECL_CONTEXT (t
) = current_function_decl
;
2208 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
2209 DECL_ARGUMENTS (decl
) = t
;
2211 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
2212 PARM_DECL
, get_identifier ("__low"), cilk_var_type
);
2213 DECL_ARTIFICIAL (t
) = 1;
2214 DECL_NAMELESS (t
) = 1;
2215 DECL_ARG_TYPE (t
) = ptr_type_node
;
2216 DECL_CONTEXT (t
) = current_function_decl
;
2218 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
2219 DECL_ARGUMENTS (decl
) = t
;
2222 tree data_name
= get_identifier (".omp_data_i");
2223 t
= build_decl (DECL_SOURCE_LOCATION (decl
), PARM_DECL
, data_name
,
2225 DECL_ARTIFICIAL (t
) = 1;
2226 DECL_NAMELESS (t
) = 1;
2227 DECL_ARG_TYPE (t
) = ptr_type_node
;
2228 DECL_CONTEXT (t
) = current_function_decl
;
2231 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
2232 DECL_ARGUMENTS (decl
) = t
;
2234 ctx
->receiver_decl
= t
;
2237 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
2238 PARM_DECL
, get_identifier (".omp_data_o"),
2240 DECL_ARTIFICIAL (t
) = 1;
2241 DECL_NAMELESS (t
) = 1;
2242 DECL_ARG_TYPE (t
) = ptr_type_node
;
2243 DECL_CONTEXT (t
) = current_function_decl
;
2245 TREE_ADDRESSABLE (t
) = 1;
2246 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
2247 DECL_ARGUMENTS (decl
) = t
;
2250 /* Allocate memory for the function structure. The call to
2251 allocate_struct_function clobbers CFUN, so we need to restore
2253 push_struct_function (decl
);
2254 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
2258 /* Callback for walk_gimple_seq. Check if combined parallel
2259 contains gimple_omp_for_combined_into_p OMP_FOR. */
2262 find_combined_for (gimple_stmt_iterator
*gsi_p
,
2263 bool *handled_ops_p
,
2264 struct walk_stmt_info
*wi
)
2266 gimple stmt
= gsi_stmt (*gsi_p
);
2268 *handled_ops_p
= true;
2269 switch (gimple_code (stmt
))
2273 case GIMPLE_OMP_FOR
:
2274 if (gimple_omp_for_combined_into_p (stmt
)
2275 && gimple_omp_for_kind (stmt
) == GF_OMP_FOR_KIND_FOR
)
2278 return integer_zero_node
;
2287 /* Scan an OpenMP parallel directive. */
2290 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
2294 gomp_parallel
*stmt
= as_a
<gomp_parallel
*> (gsi_stmt (*gsi
));
2296 /* Ignore parallel directives with empty bodies, unless there
2297 are copyin clauses. */
2299 && empty_body_p (gimple_omp_body (stmt
))
2300 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
2301 OMP_CLAUSE_COPYIN
) == NULL
)
2303 gsi_replace (gsi
, gimple_build_nop (), false);
2307 if (gimple_omp_parallel_combined_p (stmt
))
2309 struct walk_stmt_info wi
;
2311 memset (&wi
, 0, sizeof (wi
));
2313 walk_gimple_seq (gimple_omp_body (stmt
),
2314 find_combined_for
, NULL
, &wi
);
2317 gomp_for
*for_stmt
= as_a
<gomp_for
*> ((gimple
) wi
.info
);
2318 struct omp_for_data fd
;
2319 extract_omp_for_data (for_stmt
, &fd
, NULL
);
2320 /* We need two temporaries with fd.loop.v type (istart/iend)
2321 and then (fd.collapse - 1) temporaries with the same
2322 type for count2 ... countN-1 vars if not constant. */
2323 size_t count
= 2, i
;
2324 tree type
= fd
.iter_type
;
2326 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
2327 count
+= fd
.collapse
- 1;
2328 for (i
= 0; i
< count
; i
++)
2330 tree temp
= create_tmp_var (type
);
2331 tree c
= build_omp_clause (UNKNOWN_LOCATION
,
2332 OMP_CLAUSE__LOOPTEMP_
);
2333 insert_decl_map (&outer_ctx
->cb
, temp
, temp
);
2334 OMP_CLAUSE_DECL (c
) = temp
;
2335 OMP_CLAUSE_CHAIN (c
) = gimple_omp_parallel_clauses (stmt
);
2336 gimple_omp_parallel_set_clauses (stmt
, c
);
2341 ctx
= new_omp_context (stmt
, outer_ctx
);
2342 taskreg_contexts
.safe_push (ctx
);
2343 if (taskreg_nesting_level
> 1)
2344 ctx
->is_nested
= true;
2345 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
2346 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
2347 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
2348 name
= create_tmp_var_name (".omp_data_s");
2349 name
= build_decl (gimple_location (stmt
),
2350 TYPE_DECL
, name
, ctx
->record_type
);
2351 DECL_ARTIFICIAL (name
) = 1;
2352 DECL_NAMELESS (name
) = 1;
2353 TYPE_NAME (ctx
->record_type
) = name
;
2354 TYPE_ARTIFICIAL (ctx
->record_type
) = 1;
2355 create_omp_child_function (ctx
, false);
2356 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
2358 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
2359 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2361 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
2362 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
2365 /* Scan an OpenMP task directive. */
2368 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
2372 gomp_task
*stmt
= as_a
<gomp_task
*> (gsi_stmt (*gsi
));
2374 /* Ignore task directives with empty bodies. */
2376 && empty_body_p (gimple_omp_body (stmt
)))
2378 gsi_replace (gsi
, gimple_build_nop (), false);
2382 ctx
= new_omp_context (stmt
, outer_ctx
);
2383 taskreg_contexts
.safe_push (ctx
);
2384 if (taskreg_nesting_level
> 1)
2385 ctx
->is_nested
= true;
2386 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
2387 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
2388 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
2389 name
= create_tmp_var_name (".omp_data_s");
2390 name
= build_decl (gimple_location (stmt
),
2391 TYPE_DECL
, name
, ctx
->record_type
);
2392 DECL_ARTIFICIAL (name
) = 1;
2393 DECL_NAMELESS (name
) = 1;
2394 TYPE_NAME (ctx
->record_type
) = name
;
2395 TYPE_ARTIFICIAL (ctx
->record_type
) = 1;
2396 create_omp_child_function (ctx
, false);
2397 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
2399 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
2401 if (ctx
->srecord_type
)
2403 name
= create_tmp_var_name (".omp_data_a");
2404 name
= build_decl (gimple_location (stmt
),
2405 TYPE_DECL
, name
, ctx
->srecord_type
);
2406 DECL_ARTIFICIAL (name
) = 1;
2407 DECL_NAMELESS (name
) = 1;
2408 TYPE_NAME (ctx
->srecord_type
) = name
;
2409 TYPE_ARTIFICIAL (ctx
->srecord_type
) = 1;
2410 create_omp_child_function (ctx
, true);
2413 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2415 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
2417 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
2418 t
= build_int_cst (long_integer_type_node
, 0);
2419 gimple_omp_task_set_arg_size (stmt
, t
);
2420 t
= build_int_cst (long_integer_type_node
, 1);
2421 gimple_omp_task_set_arg_align (stmt
, t
);
2426 /* If any decls have been made addressable during scan_omp,
2427 adjust their fields if needed, and layout record types
2428 of parallel/task constructs. */
2431 finish_taskreg_scan (omp_context
*ctx
)
2433 if (ctx
->record_type
== NULL_TREE
)
2436 /* If any task_shared_vars were needed, verify all
2437 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2438 statements if use_pointer_for_field hasn't changed
2439 because of that. If it did, update field types now. */
2440 if (task_shared_vars
)
2444 for (c
= gimple_omp_taskreg_clauses (ctx
->stmt
);
2445 c
; c
= OMP_CLAUSE_CHAIN (c
))
2446 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
)
2448 tree decl
= OMP_CLAUSE_DECL (c
);
2450 /* Global variables don't need to be copied,
2451 the receiver side will use them directly. */
2452 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
2454 if (!bitmap_bit_p (task_shared_vars
, DECL_UID (decl
))
2455 || !use_pointer_for_field (decl
, ctx
))
2457 tree field
= lookup_field (decl
, ctx
);
2458 if (TREE_CODE (TREE_TYPE (field
)) == POINTER_TYPE
2459 && TREE_TYPE (TREE_TYPE (field
)) == TREE_TYPE (decl
))
2461 TREE_TYPE (field
) = build_pointer_type (TREE_TYPE (decl
));
2462 TREE_THIS_VOLATILE (field
) = 0;
2463 DECL_USER_ALIGN (field
) = 0;
2464 DECL_ALIGN (field
) = TYPE_ALIGN (TREE_TYPE (field
));
2465 if (TYPE_ALIGN (ctx
->record_type
) < DECL_ALIGN (field
))
2466 TYPE_ALIGN (ctx
->record_type
) = DECL_ALIGN (field
);
2467 if (ctx
->srecord_type
)
2469 tree sfield
= lookup_sfield (decl
, ctx
);
2470 TREE_TYPE (sfield
) = TREE_TYPE (field
);
2471 TREE_THIS_VOLATILE (sfield
) = 0;
2472 DECL_USER_ALIGN (sfield
) = 0;
2473 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
2474 if (TYPE_ALIGN (ctx
->srecord_type
) < DECL_ALIGN (sfield
))
2475 TYPE_ALIGN (ctx
->srecord_type
) = DECL_ALIGN (sfield
);
2480 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
)
2482 layout_type (ctx
->record_type
);
2483 fixup_child_record_type (ctx
);
2487 location_t loc
= gimple_location (ctx
->stmt
);
2488 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
2489 /* Move VLA fields to the end. */
2490 p
= &TYPE_FIELDS (ctx
->record_type
);
2492 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
2493 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
2496 *p
= TREE_CHAIN (*p
);
2497 TREE_CHAIN (*q
) = NULL_TREE
;
2498 q
= &TREE_CHAIN (*q
);
2501 p
= &DECL_CHAIN (*p
);
2503 layout_type (ctx
->record_type
);
2504 fixup_child_record_type (ctx
);
2505 if (ctx
->srecord_type
)
2506 layout_type (ctx
->srecord_type
);
2507 tree t
= fold_convert_loc (loc
, long_integer_type_node
,
2508 TYPE_SIZE_UNIT (ctx
->record_type
));
2509 gimple_omp_task_set_arg_size (ctx
->stmt
, t
);
2510 t
= build_int_cst (long_integer_type_node
,
2511 TYPE_ALIGN_UNIT (ctx
->record_type
));
2512 gimple_omp_task_set_arg_align (ctx
->stmt
, t
);
2517 static omp_context
*
2518 enclosing_target_ctx (omp_context
*ctx
)
2521 && gimple_code (ctx
->stmt
) != GIMPLE_OMP_TARGET
)
2523 gcc_assert (ctx
!= NULL
);
2528 oacc_loop_or_target_p (gimple stmt
)
2530 enum gimple_code outer_type
= gimple_code (stmt
);
2531 return ((outer_type
== GIMPLE_OMP_TARGET
2532 && ((gimple_omp_target_kind (stmt
)
2533 == GF_OMP_TARGET_KIND_OACC_PARALLEL
)
2534 || (gimple_omp_target_kind (stmt
)
2535 == GF_OMP_TARGET_KIND_OACC_KERNELS
)))
2536 || (outer_type
== GIMPLE_OMP_FOR
2537 && gimple_omp_for_kind (stmt
) == GF_OMP_FOR_KIND_OACC_LOOP
));
2540 /* Scan a GIMPLE_OMP_FOR. */
2543 scan_omp_for (gomp_for
*stmt
, omp_context
*outer_ctx
)
2545 enum gimple_code outer_type
= GIMPLE_ERROR_MARK
;
2548 tree clauses
= gimple_omp_for_clauses (stmt
);
2551 outer_type
= gimple_code (outer_ctx
->stmt
);
2553 ctx
= new_omp_context (stmt
, outer_ctx
);
2555 if (is_gimple_omp_oacc (stmt
))
2557 if (outer_ctx
&& outer_type
== GIMPLE_OMP_FOR
)
2558 ctx
->gwv_this
= outer_ctx
->gwv_this
;
2559 for (tree c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2562 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_GANG
)
2564 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_WORKER
)
2566 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_VECTOR
)
2570 ctx
->gwv_this
|= val
;
2573 /* Skip; not nested inside a region. */
2576 if (!oacc_loop_or_target_p (outer_ctx
->stmt
))
2578 /* Skip; not nested inside an OpenACC region. */
2581 if (outer_type
== GIMPLE_OMP_FOR
)
2582 outer_ctx
->gwv_below
|= val
;
2583 if (OMP_CLAUSE_OPERAND (c
, 0) != NULL_TREE
)
2585 omp_context
*enclosing
= enclosing_target_ctx (outer_ctx
);
2586 if (gimple_omp_target_kind (enclosing
->stmt
)
2587 == GF_OMP_TARGET_KIND_OACC_PARALLEL
)
2588 error_at (gimple_location (stmt
),
2589 "no arguments allowed to gang, worker and vector clauses inside parallel");
2594 scan_sharing_clauses (clauses
, ctx
);
2596 scan_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
2597 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
2599 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
2600 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
2601 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
2602 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
2604 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2606 if (is_gimple_omp_oacc (stmt
))
2608 if (ctx
->gwv_this
& ctx
->gwv_below
)
2609 error_at (gimple_location (stmt
),
2610 "gang, worker and vector may occur only once in a loop nest");
2611 else if (ctx
->gwv_below
!= 0
2612 && ctx
->gwv_this
> ctx
->gwv_below
)
2613 error_at (gimple_location (stmt
),
2614 "gang, worker and vector must occur in this order in a loop nest");
2615 if (outer_ctx
&& outer_type
== GIMPLE_OMP_FOR
)
2616 outer_ctx
->gwv_below
|= ctx
->gwv_below
;
2620 /* Scan an OpenMP sections directive. */
2623 scan_omp_sections (gomp_sections
*stmt
, omp_context
*outer_ctx
)
2627 ctx
= new_omp_context (stmt
, outer_ctx
);
2628 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
2629 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2632 /* Scan an OpenMP single directive. */
2635 scan_omp_single (gomp_single
*stmt
, omp_context
*outer_ctx
)
2640 ctx
= new_omp_context (stmt
, outer_ctx
);
2641 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
2642 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
2643 name
= create_tmp_var_name (".omp_copy_s");
2644 name
= build_decl (gimple_location (stmt
),
2645 TYPE_DECL
, name
, ctx
->record_type
);
2646 TYPE_NAME (ctx
->record_type
) = name
;
2648 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
2649 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2651 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
2652 ctx
->record_type
= NULL
;
2654 layout_type (ctx
->record_type
);
2657 /* Scan a GIMPLE_OMP_TARGET. */
2660 scan_omp_target (gomp_target
*stmt
, omp_context
*outer_ctx
)
2664 bool offloaded
= is_gimple_omp_offloaded (stmt
);
2665 tree clauses
= gimple_omp_target_clauses (stmt
);
2667 ctx
= new_omp_context (stmt
, outer_ctx
);
2668 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
2669 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
2670 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
2671 name
= create_tmp_var_name (".omp_data_t");
2672 name
= build_decl (gimple_location (stmt
),
2673 TYPE_DECL
, name
, ctx
->record_type
);
2674 DECL_ARTIFICIAL (name
) = 1;
2675 DECL_NAMELESS (name
) = 1;
2676 TYPE_NAME (ctx
->record_type
) = name
;
2677 TYPE_ARTIFICIAL (ctx
->record_type
) = 1;
2680 if (is_gimple_omp_oacc (stmt
))
2681 ctx
->reduction_map
= splay_tree_new (splay_tree_compare_pointers
,
2684 create_omp_child_function (ctx
, false);
2685 gimple_omp_target_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
2688 if (is_gimple_omp_oacc (stmt
))
2690 for (tree c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2692 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_NUM_GANGS
)
2693 ctx
->gwv_this
|= MASK_GANG
;
2694 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_NUM_WORKERS
)
2695 ctx
->gwv_this
|= MASK_WORKER
;
2696 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_VECTOR_LENGTH
)
2697 ctx
->gwv_this
|= MASK_VECTOR
;
2701 scan_sharing_clauses (clauses
, ctx
);
2702 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2704 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
2705 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
2708 TYPE_FIELDS (ctx
->record_type
)
2709 = nreverse (TYPE_FIELDS (ctx
->record_type
));
2710 #ifdef ENABLE_CHECKING
2712 unsigned int align
= DECL_ALIGN (TYPE_FIELDS (ctx
->record_type
));
2713 for (field
= TYPE_FIELDS (ctx
->record_type
);
2715 field
= DECL_CHAIN (field
))
2716 gcc_assert (DECL_ALIGN (field
) == align
);
2718 layout_type (ctx
->record_type
);
2720 fixup_child_record_type (ctx
);
2724 /* Scan an OpenMP teams directive. */
2727 scan_omp_teams (gomp_teams
*stmt
, omp_context
*outer_ctx
)
2729 omp_context
*ctx
= new_omp_context (stmt
, outer_ctx
);
2730 scan_sharing_clauses (gimple_omp_teams_clauses (stmt
), ctx
);
2731 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2734 /* Check nesting restrictions. */
2736 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
2738 /* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
2739 inside an OpenACC CTX. */
2740 if (!(is_gimple_omp (stmt
)
2741 && is_gimple_omp_oacc (stmt
)))
2743 for (omp_context
*ctx_
= ctx
; ctx_
!= NULL
; ctx_
= ctx_
->outer
)
2744 if (is_gimple_omp (ctx_
->stmt
)
2745 && is_gimple_omp_oacc (ctx_
->stmt
))
2747 error_at (gimple_location (stmt
),
2748 "non-OpenACC construct inside of OpenACC region");
2755 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
2756 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
2758 error_at (gimple_location (stmt
),
2759 "OpenMP constructs may not be nested inside simd region");
2762 else if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
2764 if ((gimple_code (stmt
) != GIMPLE_OMP_FOR
2765 || (gimple_omp_for_kind (stmt
)
2766 != GF_OMP_FOR_KIND_DISTRIBUTE
))
2767 && gimple_code (stmt
) != GIMPLE_OMP_PARALLEL
)
2769 error_at (gimple_location (stmt
),
2770 "only distribute or parallel constructs are allowed to "
2771 "be closely nested inside teams construct");
2776 switch (gimple_code (stmt
))
2778 case GIMPLE_OMP_FOR
:
2779 if (gimple_omp_for_kind (stmt
) & GF_OMP_FOR_SIMD
)
2781 if (gimple_omp_for_kind (stmt
) == GF_OMP_FOR_KIND_DISTRIBUTE
)
2783 if (ctx
!= NULL
&& gimple_code (ctx
->stmt
) != GIMPLE_OMP_TEAMS
)
2785 error_at (gimple_location (stmt
),
2786 "distribute construct must be closely nested inside "
2794 if (is_gimple_call (stmt
)
2795 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2796 == BUILT_IN_GOMP_CANCEL
2797 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2798 == BUILT_IN_GOMP_CANCELLATION_POINT
))
2800 const char *bad
= NULL
;
2801 const char *kind
= NULL
;
2804 error_at (gimple_location (stmt
), "orphaned %qs construct",
2805 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2806 == BUILT_IN_GOMP_CANCEL
2807 ? "#pragma omp cancel"
2808 : "#pragma omp cancellation point");
2811 switch (tree_fits_shwi_p (gimple_call_arg (stmt
, 0))
2812 ? tree_to_shwi (gimple_call_arg (stmt
, 0))
2816 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_PARALLEL
)
2817 bad
= "#pragma omp parallel";
2818 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2819 == BUILT_IN_GOMP_CANCEL
2820 && !integer_zerop (gimple_call_arg (stmt
, 1)))
2821 ctx
->cancellable
= true;
2825 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_FOR
2826 || gimple_omp_for_kind (ctx
->stmt
) != GF_OMP_FOR_KIND_FOR
)
2827 bad
= "#pragma omp for";
2828 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2829 == BUILT_IN_GOMP_CANCEL
2830 && !integer_zerop (gimple_call_arg (stmt
, 1)))
2832 ctx
->cancellable
= true;
2833 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
2835 warning_at (gimple_location (stmt
), 0,
2836 "%<#pragma omp cancel for%> inside "
2837 "%<nowait%> for construct");
2838 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
2839 OMP_CLAUSE_ORDERED
))
2840 warning_at (gimple_location (stmt
), 0,
2841 "%<#pragma omp cancel for%> inside "
2842 "%<ordered%> for construct");
2847 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_SECTIONS
2848 && gimple_code (ctx
->stmt
) != GIMPLE_OMP_SECTION
)
2849 bad
= "#pragma omp sections";
2850 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2851 == BUILT_IN_GOMP_CANCEL
2852 && !integer_zerop (gimple_call_arg (stmt
, 1)))
2854 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_SECTIONS
)
2856 ctx
->cancellable
= true;
2857 if (find_omp_clause (gimple_omp_sections_clauses
2860 warning_at (gimple_location (stmt
), 0,
2861 "%<#pragma omp cancel sections%> inside "
2862 "%<nowait%> sections construct");
2866 gcc_assert (ctx
->outer
2867 && gimple_code (ctx
->outer
->stmt
)
2868 == GIMPLE_OMP_SECTIONS
);
2869 ctx
->outer
->cancellable
= true;
2870 if (find_omp_clause (gimple_omp_sections_clauses
2873 warning_at (gimple_location (stmt
), 0,
2874 "%<#pragma omp cancel sections%> inside "
2875 "%<nowait%> sections construct");
2881 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_TASK
)
2882 bad
= "#pragma omp task";
2884 ctx
->cancellable
= true;
2888 error_at (gimple_location (stmt
), "invalid arguments");
2893 error_at (gimple_location (stmt
),
2894 "%<%s %s%> construct not closely nested inside of %qs",
2895 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2896 == BUILT_IN_GOMP_CANCEL
2897 ? "#pragma omp cancel"
2898 : "#pragma omp cancellation point", kind
, bad
);
2903 case GIMPLE_OMP_SECTIONS
:
2904 case GIMPLE_OMP_SINGLE
:
2905 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
2906 switch (gimple_code (ctx
->stmt
))
2908 case GIMPLE_OMP_FOR
:
2909 case GIMPLE_OMP_SECTIONS
:
2910 case GIMPLE_OMP_SINGLE
:
2911 case GIMPLE_OMP_ORDERED
:
2912 case GIMPLE_OMP_MASTER
:
2913 case GIMPLE_OMP_TASK
:
2914 case GIMPLE_OMP_CRITICAL
:
2915 if (is_gimple_call (stmt
))
2917 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2918 != BUILT_IN_GOMP_BARRIER
)
2920 error_at (gimple_location (stmt
),
2921 "barrier region may not be closely nested inside "
2922 "of work-sharing, critical, ordered, master or "
2923 "explicit task region");
2926 error_at (gimple_location (stmt
),
2927 "work-sharing region may not be closely nested inside "
2928 "of work-sharing, critical, ordered, master or explicit "
2931 case GIMPLE_OMP_PARALLEL
:
2937 case GIMPLE_OMP_MASTER
:
2938 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
2939 switch (gimple_code (ctx
->stmt
))
2941 case GIMPLE_OMP_FOR
:
2942 case GIMPLE_OMP_SECTIONS
:
2943 case GIMPLE_OMP_SINGLE
:
2944 case GIMPLE_OMP_TASK
:
2945 error_at (gimple_location (stmt
),
2946 "master region may not be closely nested inside "
2947 "of work-sharing or explicit task region");
2949 case GIMPLE_OMP_PARALLEL
:
2955 case GIMPLE_OMP_ORDERED
:
2956 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
2957 switch (gimple_code (ctx
->stmt
))
2959 case GIMPLE_OMP_CRITICAL
:
2960 case GIMPLE_OMP_TASK
:
2961 error_at (gimple_location (stmt
),
2962 "ordered region may not be closely nested inside "
2963 "of critical or explicit task region");
2965 case GIMPLE_OMP_FOR
:
2966 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
2967 OMP_CLAUSE_ORDERED
) == NULL
)
2969 error_at (gimple_location (stmt
),
2970 "ordered region must be closely nested inside "
2971 "a loop region with an ordered clause");
2975 case GIMPLE_OMP_PARALLEL
:
2976 error_at (gimple_location (stmt
),
2977 "ordered region must be closely nested inside "
2978 "a loop region with an ordered clause");
2984 case GIMPLE_OMP_CRITICAL
:
2987 = gimple_omp_critical_name (as_a
<gomp_critical
*> (stmt
));
2988 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
2989 if (gomp_critical
*other_crit
2990 = dyn_cast
<gomp_critical
*> (ctx
->stmt
))
2991 if (this_stmt_name
== gimple_omp_critical_name (other_crit
))
2993 error_at (gimple_location (stmt
),
2994 "critical region may not be nested inside a critical "
2995 "region with the same name");
3000 case GIMPLE_OMP_TEAMS
:
3002 || gimple_code (ctx
->stmt
) != GIMPLE_OMP_TARGET
3003 || gimple_omp_target_kind (ctx
->stmt
) != GF_OMP_TARGET_KIND_REGION
)
3005 error_at (gimple_location (stmt
),
3006 "teams construct not closely nested inside of target "
3011 case GIMPLE_OMP_TARGET
:
3012 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
3014 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_TARGET
)
3016 if (is_gimple_omp (stmt
)
3017 && is_gimple_omp_oacc (stmt
)
3018 && is_gimple_omp (ctx
->stmt
))
3020 error_at (gimple_location (stmt
),
3021 "OpenACC construct inside of non-OpenACC region");
3027 const char *stmt_name
, *ctx_stmt_name
;
3028 switch (gimple_omp_target_kind (stmt
))
3030 case GF_OMP_TARGET_KIND_REGION
: stmt_name
= "target"; break;
3031 case GF_OMP_TARGET_KIND_DATA
: stmt_name
= "target data"; break;
3032 case GF_OMP_TARGET_KIND_UPDATE
: stmt_name
= "target update"; break;
3033 case GF_OMP_TARGET_KIND_OACC_PARALLEL
: stmt_name
= "parallel"; break;
3034 case GF_OMP_TARGET_KIND_OACC_KERNELS
: stmt_name
= "kernels"; break;
3035 case GF_OMP_TARGET_KIND_OACC_DATA
: stmt_name
= "data"; break;
3036 case GF_OMP_TARGET_KIND_OACC_UPDATE
: stmt_name
= "update"; break;
3037 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
: stmt_name
= "enter/exit data"; break;
3038 default: gcc_unreachable ();
3040 switch (gimple_omp_target_kind (ctx
->stmt
))
3042 case GF_OMP_TARGET_KIND_REGION
: ctx_stmt_name
= "target"; break;
3043 case GF_OMP_TARGET_KIND_DATA
: ctx_stmt_name
= "target data"; break;
3044 case GF_OMP_TARGET_KIND_OACC_PARALLEL
: ctx_stmt_name
= "parallel"; break;
3045 case GF_OMP_TARGET_KIND_OACC_KERNELS
: ctx_stmt_name
= "kernels"; break;
3046 case GF_OMP_TARGET_KIND_OACC_DATA
: ctx_stmt_name
= "data"; break;
3047 default: gcc_unreachable ();
3050 /* OpenACC/OpenMP mismatch? */
3051 if (is_gimple_omp_oacc (stmt
)
3052 != is_gimple_omp_oacc (ctx
->stmt
))
3054 error_at (gimple_location (stmt
),
3055 "%s %s construct inside of %s %s region",
3056 (is_gimple_omp_oacc (stmt
)
3057 ? "OpenACC" : "OpenMP"), stmt_name
,
3058 (is_gimple_omp_oacc (ctx
->stmt
)
3059 ? "OpenACC" : "OpenMP"), ctx_stmt_name
);
3062 if (is_gimple_omp_offloaded (ctx
->stmt
))
3064 /* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
3065 if (is_gimple_omp_oacc (ctx
->stmt
))
3067 error_at (gimple_location (stmt
),
3068 "%s construct inside of %s region",
3069 stmt_name
, ctx_stmt_name
);
3074 gcc_checking_assert (!is_gimple_omp_oacc (stmt
));
3075 warning_at (gimple_location (stmt
), 0,
3076 "%s construct inside of %s region",
3077 stmt_name
, ctx_stmt_name
);
3089 /* Helper function scan_omp.
3091 Callback for walk_tree or operators in walk_gimple_stmt used to
3092 scan for OMP directives in TP. */
3095 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
3097 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
3098 omp_context
*ctx
= (omp_context
*) wi
->info
;
3101 switch (TREE_CODE (t
))
3108 *tp
= remap_decl (t
, &ctx
->cb
);
3112 if (ctx
&& TYPE_P (t
))
3113 *tp
= remap_type (t
, &ctx
->cb
);
3114 else if (!DECL_P (t
))
3119 tree tem
= remap_type (TREE_TYPE (t
), &ctx
->cb
);
3120 if (tem
!= TREE_TYPE (t
))
3122 if (TREE_CODE (t
) == INTEGER_CST
)
3123 *tp
= wide_int_to_tree (tem
, t
);
3125 TREE_TYPE (t
) = tem
;
3135 /* Return true if FNDECL is a setjmp or a longjmp. */
3138 setjmp_or_longjmp_p (const_tree fndecl
)
3140 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
3141 && (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_SETJMP
3142 || DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_LONGJMP
))
3145 tree declname
= DECL_NAME (fndecl
);
3148 const char *name
= IDENTIFIER_POINTER (declname
);
3149 return !strcmp (name
, "setjmp") || !strcmp (name
, "longjmp");
3153 /* Helper function for scan_omp.
3155 Callback for walk_gimple_stmt used to scan for OMP directives in
3156 the current statement in GSI. */
3159 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
3160 struct walk_stmt_info
*wi
)
3162 gimple stmt
= gsi_stmt (*gsi
);
3163 omp_context
*ctx
= (omp_context
*) wi
->info
;
3165 if (gimple_has_location (stmt
))
3166 input_location
= gimple_location (stmt
);
3168 /* Check the nesting restrictions. */
3169 bool remove
= false;
3170 if (is_gimple_omp (stmt
))
3171 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
3172 else if (is_gimple_call (stmt
))
3174 tree fndecl
= gimple_call_fndecl (stmt
);
3177 if (setjmp_or_longjmp_p (fndecl
)
3179 && gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
3180 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
3183 error_at (gimple_location (stmt
),
3184 "setjmp/longjmp inside simd construct");
3186 else if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3187 switch (DECL_FUNCTION_CODE (fndecl
))
3189 case BUILT_IN_GOMP_BARRIER
:
3190 case BUILT_IN_GOMP_CANCEL
:
3191 case BUILT_IN_GOMP_CANCELLATION_POINT
:
3192 case BUILT_IN_GOMP_TASKYIELD
:
3193 case BUILT_IN_GOMP_TASKWAIT
:
3194 case BUILT_IN_GOMP_TASKGROUP_START
:
3195 case BUILT_IN_GOMP_TASKGROUP_END
:
3196 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
3205 stmt
= gimple_build_nop ();
3206 gsi_replace (gsi
, stmt
, false);
3209 *handled_ops_p
= true;
3211 switch (gimple_code (stmt
))
3213 case GIMPLE_OMP_PARALLEL
:
3214 taskreg_nesting_level
++;
3215 scan_omp_parallel (gsi
, ctx
);
3216 taskreg_nesting_level
--;
3219 case GIMPLE_OMP_TASK
:
3220 taskreg_nesting_level
++;
3221 scan_omp_task (gsi
, ctx
);
3222 taskreg_nesting_level
--;
3225 case GIMPLE_OMP_FOR
:
3226 scan_omp_for (as_a
<gomp_for
*> (stmt
), ctx
);
3229 case GIMPLE_OMP_SECTIONS
:
3230 scan_omp_sections (as_a
<gomp_sections
*> (stmt
), ctx
);
3233 case GIMPLE_OMP_SINGLE
:
3234 scan_omp_single (as_a
<gomp_single
*> (stmt
), ctx
);
3237 case GIMPLE_OMP_SECTION
:
3238 case GIMPLE_OMP_MASTER
:
3239 case GIMPLE_OMP_TASKGROUP
:
3240 case GIMPLE_OMP_ORDERED
:
3241 case GIMPLE_OMP_CRITICAL
:
3242 ctx
= new_omp_context (stmt
, ctx
);
3243 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
3246 case GIMPLE_OMP_TARGET
:
3247 scan_omp_target (as_a
<gomp_target
*> (stmt
), ctx
);
3250 case GIMPLE_OMP_TEAMS
:
3251 scan_omp_teams (as_a
<gomp_teams
*> (stmt
), ctx
);
3258 *handled_ops_p
= false;
3260 for (var
= gimple_bind_vars (as_a
<gbind
*> (stmt
));
3262 var
= DECL_CHAIN (var
))
3263 insert_decl_map (&ctx
->cb
, var
, var
);
3267 *handled_ops_p
= false;
3275 /* Scan all the statements starting at the current statement. CTX
3276 contains context information about the OMP directives and
3277 clauses found during the scan. */
3280 scan_omp (gimple_seq
*body_p
, omp_context
*ctx
)
3282 location_t saved_location
;
3283 struct walk_stmt_info wi
;
3285 memset (&wi
, 0, sizeof (wi
));
3287 wi
.want_locations
= true;
3289 saved_location
= input_location
;
3290 walk_gimple_seq_mod (body_p
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
3291 input_location
= saved_location
;
3294 /* Re-gimplification and code generation routines. */
3296 /* Build a call to GOMP_barrier. */
3299 build_omp_barrier (tree lhs
)
3301 tree fndecl
= builtin_decl_explicit (lhs
? BUILT_IN_GOMP_BARRIER_CANCEL
3302 : BUILT_IN_GOMP_BARRIER
);
3303 gcall
*g
= gimple_build_call (fndecl
, 0);
3305 gimple_call_set_lhs (g
, lhs
);
3309 /* If a context was created for STMT when it was scanned, return it. */
3311 static omp_context
*
3312 maybe_lookup_ctx (gimple stmt
)
3315 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
3316 return n
? (omp_context
*) n
->value
: NULL
;
3320 /* Find the mapping for DECL in CTX or the immediately enclosing
3321 context that has a mapping for DECL.
3323 If CTX is a nested parallel directive, we may have to use the decl
3324 mappings created in CTX's parent context. Suppose that we have the
3325 following parallel nesting (variable UIDs showed for clarity):
3328 #omp parallel shared(iD.1562) -> outer parallel
3329 iD.1562 = iD.1562 + 1;
3331 #omp parallel shared (iD.1562) -> inner parallel
3332 iD.1562 = iD.1562 - 1;
3334 Each parallel structure will create a distinct .omp_data_s structure
3335 for copying iD.1562 in/out of the directive:
3337 outer parallel .omp_data_s.1.i -> iD.1562
3338 inner parallel .omp_data_s.2.i -> iD.1562
3340 A shared variable mapping will produce a copy-out operation before
3341 the parallel directive and a copy-in operation after it. So, in
3342 this case we would have:
3345 .omp_data_o.1.i = iD.1562;
3346 #omp parallel shared(iD.1562) -> outer parallel
3347 .omp_data_i.1 = &.omp_data_o.1
3348 .omp_data_i.1->i = .omp_data_i.1->i + 1;
3350 .omp_data_o.2.i = iD.1562; -> **
3351 #omp parallel shared(iD.1562) -> inner parallel
3352 .omp_data_i.2 = &.omp_data_o.2
3353 .omp_data_i.2->i = .omp_data_i.2->i - 1;
3356 ** This is a problem. The symbol iD.1562 cannot be referenced
3357 inside the body of the outer parallel region. But since we are
3358 emitting this copy operation while expanding the inner parallel
3359 directive, we need to access the CTX structure of the outer
3360 parallel directive to get the correct mapping:
3362 .omp_data_o.2.i = .omp_data_i.1->i
3364 Since there may be other workshare or parallel directives enclosing
3365 the parallel directive, it may be necessary to walk up the context
3366 parent chain. This is not a problem in general because nested
3367 parallelism happens only rarely. */
3370 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
3375 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
3376 t
= maybe_lookup_decl (decl
, up
);
3378 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
3380 return t
? t
: decl
;
3384 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
3385 in outer contexts. */
3388 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
3393 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
3394 t
= maybe_lookup_decl (decl
, up
);
3396 return t
? t
: decl
;
3400 /* Construct the initialization value for reduction CLAUSE. */
3403 omp_reduction_init (tree clause
, tree type
)
3405 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
3406 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
3413 case TRUTH_ORIF_EXPR
:
3414 case TRUTH_XOR_EXPR
:
3416 return build_zero_cst (type
);
3419 case TRUTH_AND_EXPR
:
3420 case TRUTH_ANDIF_EXPR
:
3422 return fold_convert_loc (loc
, type
, integer_one_node
);
3425 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
3428 if (SCALAR_FLOAT_TYPE_P (type
))
3430 REAL_VALUE_TYPE max
, min
;
3431 if (HONOR_INFINITIES (type
))
3434 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
3437 real_maxval (&min
, 1, TYPE_MODE (type
));
3438 return build_real (type
, min
);
3442 gcc_assert (INTEGRAL_TYPE_P (type
));
3443 return TYPE_MIN_VALUE (type
);
3447 if (SCALAR_FLOAT_TYPE_P (type
))
3449 REAL_VALUE_TYPE max
;
3450 if (HONOR_INFINITIES (type
))
3453 real_maxval (&max
, 0, TYPE_MODE (type
));
3454 return build_real (type
, max
);
3458 gcc_assert (INTEGRAL_TYPE_P (type
));
3459 return TYPE_MAX_VALUE (type
);
3467 /* Return alignment to be assumed for var in CLAUSE, which should be
3468 OMP_CLAUSE_ALIGNED. */
3471 omp_clause_aligned_alignment (tree clause
)
3473 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause
))
3474 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause
);
3476 /* Otherwise return implementation defined alignment. */
3477 unsigned int al
= 1;
3478 machine_mode mode
, vmode
;
3479 int vs
= targetm
.vectorize
.autovectorize_vector_sizes ();
3481 vs
= 1 << floor_log2 (vs
);
3482 static enum mode_class classes
[]
3483 = { MODE_INT
, MODE_VECTOR_INT
, MODE_FLOAT
, MODE_VECTOR_FLOAT
};
3484 for (int i
= 0; i
< 4; i
+= 2)
3485 for (mode
= GET_CLASS_NARROWEST_MODE (classes
[i
]);
3487 mode
= GET_MODE_WIDER_MODE (mode
))
3489 vmode
= targetm
.vectorize
.preferred_simd_mode (mode
);
3490 if (GET_MODE_CLASS (vmode
) != classes
[i
+ 1])
3493 && GET_MODE_SIZE (vmode
) < vs
3494 && GET_MODE_2XWIDER_MODE (vmode
) != VOIDmode
)
3495 vmode
= GET_MODE_2XWIDER_MODE (vmode
);
3497 tree type
= lang_hooks
.types
.type_for_mode (mode
, 1);
3498 if (type
== NULL_TREE
|| TYPE_MODE (type
) != mode
)
3500 type
= build_vector_type (type
, GET_MODE_SIZE (vmode
)
3501 / GET_MODE_SIZE (mode
));
3502 if (TYPE_MODE (type
) != vmode
)
3504 if (TYPE_ALIGN_UNIT (type
) > al
)
3505 al
= TYPE_ALIGN_UNIT (type
);
3507 return build_int_cst (integer_type_node
, al
);
3510 /* Return maximum possible vectorization factor for the target. */
3517 || !flag_tree_loop_optimize
3518 || (!flag_tree_loop_vectorize
3519 && (global_options_set
.x_flag_tree_loop_vectorize
3520 || global_options_set
.x_flag_tree_vectorize
)))
3523 int vs
= targetm
.vectorize
.autovectorize_vector_sizes ();
3526 vs
= 1 << floor_log2 (vs
);
3529 machine_mode vqimode
= targetm
.vectorize
.preferred_simd_mode (QImode
);
3530 if (GET_MODE_CLASS (vqimode
) == MODE_VECTOR_INT
)
3531 return GET_MODE_NUNITS (vqimode
);
3535 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3539 lower_rec_simd_input_clauses (tree new_var
, omp_context
*ctx
, int &max_vf
,
3540 tree
&idx
, tree
&lane
, tree
&ivar
, tree
&lvar
)
3544 max_vf
= omp_max_vf ();
3547 tree c
= find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
3548 OMP_CLAUSE_SAFELEN
);
3549 if (c
&& TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c
)) != INTEGER_CST
)
3551 else if (c
&& compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c
),
3553 max_vf
= tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c
));
3557 idx
= create_tmp_var (unsigned_type_node
);
3558 lane
= create_tmp_var (unsigned_type_node
);
3564 tree atype
= build_array_type_nelts (TREE_TYPE (new_var
), max_vf
);
3565 tree avar
= create_tmp_var_raw (atype
);
3566 if (TREE_ADDRESSABLE (new_var
))
3567 TREE_ADDRESSABLE (avar
) = 1;
3568 DECL_ATTRIBUTES (avar
)
3569 = tree_cons (get_identifier ("omp simd array"), NULL
,
3570 DECL_ATTRIBUTES (avar
));
3571 gimple_add_tmp_var (avar
);
3572 ivar
= build4 (ARRAY_REF
, TREE_TYPE (new_var
), avar
, idx
,
3573 NULL_TREE
, NULL_TREE
);
3574 lvar
= build4 (ARRAY_REF
, TREE_TYPE (new_var
), avar
, lane
,
3575 NULL_TREE
, NULL_TREE
);
3576 if (DECL_P (new_var
))
3578 SET_DECL_VALUE_EXPR (new_var
, lvar
);
3579 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
3584 /* Helper function of lower_rec_input_clauses. For a reference
3585 in simd reduction, add an underlying variable it will reference. */
3588 handle_simd_reference (location_t loc
, tree new_vard
, gimple_seq
*ilist
)
3590 tree z
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard
)));
3591 if (TREE_CONSTANT (z
))
3593 const char *name
= NULL
;
3594 if (DECL_NAME (new_vard
))
3595 name
= IDENTIFIER_POINTER (DECL_NAME (new_vard
));
3597 z
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard
)), name
);
3598 gimple_add_tmp_var (z
);
3599 TREE_ADDRESSABLE (z
) = 1;
3600 z
= build_fold_addr_expr_loc (loc
, z
);
3601 gimplify_assign (new_vard
, z
, ilist
);
3605 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3606 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3607 private variables. Initialization statements go in ILIST, while calls
3608 to destructors go in DLIST. */
3611 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
3612 omp_context
*ctx
, struct omp_for_data
*fd
)
3614 tree c
, dtor
, copyin_seq
, x
, ptr
;
3615 bool copyin_by_ref
= false;
3616 bool lastprivate_firstprivate
= false;
3617 bool reduction_omp_orig_ref
= false;
3619 bool is_simd
= (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
3620 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
);
3622 tree lane
= NULL_TREE
, idx
= NULL_TREE
;
3623 tree ivar
= NULL_TREE
, lvar
= NULL_TREE
;
3624 gimple_seq llist
[2] = { NULL
, NULL
};
3628 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3629 with data sharing clauses referencing variable sized vars. That
3630 is unnecessarily hard to support and very unlikely to result in
3631 vectorized code anyway. */
3633 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
3634 switch (OMP_CLAUSE_CODE (c
))
3636 case OMP_CLAUSE_LINEAR
:
3637 if (OMP_CLAUSE_LINEAR_ARRAY (c
))
3640 case OMP_CLAUSE_REDUCTION
:
3641 case OMP_CLAUSE_PRIVATE
:
3642 case OMP_CLAUSE_FIRSTPRIVATE
:
3643 case OMP_CLAUSE_LASTPRIVATE
:
3644 if (is_variable_sized (OMP_CLAUSE_DECL (c
)))
3651 /* Do all the fixed sized types in the first pass, and the variable sized
3652 types in the second pass. This makes sure that the scalar arguments to
3653 the variable sized types are processed before we use them in the
3654 variable sized operations. */
3655 for (pass
= 0; pass
< 2; ++pass
)
3657 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
3659 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
3662 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
3666 case OMP_CLAUSE_PRIVATE
:
3667 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
3670 case OMP_CLAUSE_SHARED
:
3671 /* Ignore shared directives in teams construct. */
3672 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
3674 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
3676 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
3679 case OMP_CLAUSE_FIRSTPRIVATE
:
3680 case OMP_CLAUSE_COPYIN
:
3681 case OMP_CLAUSE_LINEAR
:
3683 case OMP_CLAUSE_REDUCTION
:
3684 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c
))
3685 reduction_omp_orig_ref
= true;
3687 case OMP_CLAUSE__LOOPTEMP_
:
3688 /* Handle _looptemp_ clauses only on parallel. */
3692 case OMP_CLAUSE_LASTPRIVATE
:
3693 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
3695 lastprivate_firstprivate
= true;
3699 /* Even without corresponding firstprivate, if
3700 decl is Fortran allocatable, it needs outer var
3703 && lang_hooks
.decls
.omp_private_outer_ref
3704 (OMP_CLAUSE_DECL (c
)))
3705 lastprivate_firstprivate
= true;
3707 case OMP_CLAUSE_ALIGNED
:
3710 var
= OMP_CLAUSE_DECL (c
);
3711 if (TREE_CODE (TREE_TYPE (var
)) == POINTER_TYPE
3712 && !is_global_var (var
))
3714 new_var
= maybe_lookup_decl (var
, ctx
);
3715 if (new_var
== NULL_TREE
)
3716 new_var
= maybe_lookup_decl_in_outer_ctx (var
, ctx
);
3717 x
= builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED
);
3718 x
= build_call_expr_loc (clause_loc
, x
, 2, new_var
,
3719 omp_clause_aligned_alignment (c
));
3720 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
3721 x
= build2 (MODIFY_EXPR
, TREE_TYPE (new_var
), new_var
, x
);
3722 gimplify_and_add (x
, ilist
);
3724 else if (TREE_CODE (TREE_TYPE (var
)) == ARRAY_TYPE
3725 && is_global_var (var
))
3727 tree ptype
= build_pointer_type (TREE_TYPE (var
)), t
, t2
;
3728 new_var
= lookup_decl (var
, ctx
);
3729 t
= maybe_lookup_decl_in_outer_ctx (var
, ctx
);
3730 t
= build_fold_addr_expr_loc (clause_loc
, t
);
3731 t2
= builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED
);
3732 t
= build_call_expr_loc (clause_loc
, t2
, 2, t
,
3733 omp_clause_aligned_alignment (c
));
3734 t
= fold_convert_loc (clause_loc
, ptype
, t
);
3735 x
= create_tmp_var (ptype
);
3736 t
= build2 (MODIFY_EXPR
, ptype
, x
, t
);
3737 gimplify_and_add (t
, ilist
);
3738 t
= build_simple_mem_ref_loc (clause_loc
, x
);
3739 SET_DECL_VALUE_EXPR (new_var
, t
);
3740 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
3747 new_var
= var
= OMP_CLAUSE_DECL (c
);
3748 if (c_kind
!= OMP_CLAUSE_COPYIN
)
3749 new_var
= lookup_decl (var
, ctx
);
3751 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
3756 else if (is_variable_sized (var
))
3758 /* For variable sized types, we need to allocate the
3759 actual storage here. Call alloca and store the
3760 result in the pointer decl that we created elsewhere. */
3764 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
3769 ptr
= DECL_VALUE_EXPR (new_var
);
3770 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
3771 ptr
= TREE_OPERAND (ptr
, 0);
3772 gcc_assert (DECL_P (ptr
));
3773 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
3775 /* void *tmp = __builtin_alloca */
3776 atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
3777 stmt
= gimple_build_call (atmp
, 1, x
);
3778 tmp
= create_tmp_var_raw (ptr_type_node
);
3779 gimple_add_tmp_var (tmp
);
3780 gimple_call_set_lhs (stmt
, tmp
);
3782 gimple_seq_add_stmt (ilist
, stmt
);
3784 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
3785 gimplify_assign (ptr
, x
, ilist
);
3788 else if (is_reference (var
))
3790 /* For references that are being privatized for Fortran,
3791 allocate new backing storage for the new pointer
3792 variable. This allows us to avoid changing all the
3793 code that expects a pointer to something that expects
3794 a direct variable. */
3798 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
3799 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
3801 x
= build_receiver_ref (var
, false, ctx
);
3802 x
= build_fold_addr_expr_loc (clause_loc
, x
);
3804 else if (TREE_CONSTANT (x
))
3806 /* For reduction in SIMD loop, defer adding the
3807 initialization of the reference, because if we decide
3808 to use SIMD array for it, the initilization could cause
3810 if (c_kind
== OMP_CLAUSE_REDUCTION
&& is_simd
)
3814 const char *name
= NULL
;
3815 if (DECL_NAME (var
))
3816 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
3818 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
3820 gimple_add_tmp_var (x
);
3821 TREE_ADDRESSABLE (x
) = 1;
3822 x
= build_fold_addr_expr_loc (clause_loc
, x
);
3827 tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
3828 x
= build_call_expr_loc (clause_loc
, atmp
, 1, x
);
3833 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
3834 gimplify_assign (new_var
, x
, ilist
);
3837 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
3839 else if (c_kind
== OMP_CLAUSE_REDUCTION
3840 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
3848 switch (OMP_CLAUSE_CODE (c
))
3850 case OMP_CLAUSE_SHARED
:
3851 /* Ignore shared directives in teams construct. */
3852 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
3854 /* Shared global vars are just accessed directly. */
3855 if (is_global_var (new_var
))
3857 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3858 needs to be delayed until after fixup_child_record_type so
3859 that we get the correct type during the dereference. */
3860 by_ref
= use_pointer_for_field (var
, ctx
);
3861 x
= build_receiver_ref (var
, by_ref
, ctx
);
3862 SET_DECL_VALUE_EXPR (new_var
, x
);
3863 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
3865 /* ??? If VAR is not passed by reference, and the variable
3866 hasn't been initialized yet, then we'll get a warning for
3867 the store into the omp_data_s structure. Ideally, we'd be
3868 able to notice this and not store anything at all, but
3869 we're generating code too early. Suppress the warning. */
3871 TREE_NO_WARNING (var
) = 1;
3874 case OMP_CLAUSE_LASTPRIVATE
:
3875 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
3879 case OMP_CLAUSE_PRIVATE
:
3880 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
3881 x
= build_outer_var_ref (var
, ctx
);
3882 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
3884 if (is_task_ctx (ctx
))
3885 x
= build_receiver_ref (var
, false, ctx
);
3887 x
= build_outer_var_ref (var
, ctx
);
3893 nx
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
3896 tree y
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
3897 if ((TREE_ADDRESSABLE (new_var
) || nx
|| y
3898 || OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
3899 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
3900 idx
, lane
, ivar
, lvar
))
3903 x
= lang_hooks
.decls
.omp_clause_default_ctor
3904 (c
, unshare_expr (ivar
), x
);
3906 gimplify_and_add (x
, &llist
[0]);
3909 y
= lang_hooks
.decls
.omp_clause_dtor (c
, ivar
);
3912 gimple_seq tseq
= NULL
;
3915 gimplify_stmt (&dtor
, &tseq
);
3916 gimple_seq_add_seq (&llist
[1], tseq
);
3923 gimplify_and_add (nx
, ilist
);
3927 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
3930 gimple_seq tseq
= NULL
;
3933 gimplify_stmt (&dtor
, &tseq
);
3934 gimple_seq_add_seq (dlist
, tseq
);
3938 case OMP_CLAUSE_LINEAR
:
3939 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c
))
3940 goto do_firstprivate
;
3941 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c
))
3944 x
= build_outer_var_ref (var
, ctx
);
3947 case OMP_CLAUSE_FIRSTPRIVATE
:
3948 if (is_task_ctx (ctx
))
3950 if (is_reference (var
) || is_variable_sized (var
))
3952 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
3954 || use_pointer_for_field (var
, NULL
))
3956 x
= build_receiver_ref (var
, false, ctx
);
3957 SET_DECL_VALUE_EXPR (new_var
, x
);
3958 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
3963 x
= build_outer_var_ref (var
, ctx
);
3966 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
3967 && gimple_omp_for_combined_into_p (ctx
->stmt
))
3969 tree t
= OMP_CLAUSE_LINEAR_STEP (c
);
3970 tree stept
= TREE_TYPE (t
);
3971 tree ct
= find_omp_clause (clauses
,
3972 OMP_CLAUSE__LOOPTEMP_
);
3974 tree l
= OMP_CLAUSE_DECL (ct
);
3975 tree n1
= fd
->loop
.n1
;
3976 tree step
= fd
->loop
.step
;
3977 tree itype
= TREE_TYPE (l
);
3978 if (POINTER_TYPE_P (itype
))
3979 itype
= signed_type_for (itype
);
3980 l
= fold_build2 (MINUS_EXPR
, itype
, l
, n1
);
3981 if (TYPE_UNSIGNED (itype
)
3982 && fd
->loop
.cond_code
== GT_EXPR
)
3983 l
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3984 fold_build1 (NEGATE_EXPR
, itype
, l
),
3985 fold_build1 (NEGATE_EXPR
,
3988 l
= fold_build2 (TRUNC_DIV_EXPR
, itype
, l
, step
);
3989 t
= fold_build2 (MULT_EXPR
, stept
,
3990 fold_convert (stept
, l
), t
);
3992 if (OMP_CLAUSE_LINEAR_ARRAY (c
))
3994 x
= lang_hooks
.decls
.omp_clause_linear_ctor
3996 gimplify_and_add (x
, ilist
);
4000 if (POINTER_TYPE_P (TREE_TYPE (x
)))
4001 x
= fold_build2 (POINTER_PLUS_EXPR
,
4002 TREE_TYPE (x
), x
, t
);
4004 x
= fold_build2 (PLUS_EXPR
, TREE_TYPE (x
), x
, t
);
4007 if ((OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_LINEAR
4008 || TREE_ADDRESSABLE (new_var
))
4009 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
4010 idx
, lane
, ivar
, lvar
))
4012 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
)
4014 tree iv
= create_tmp_var (TREE_TYPE (new_var
));
4015 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, iv
, x
);
4016 gimplify_and_add (x
, ilist
);
4017 gimple_stmt_iterator gsi
4018 = gsi_start_1 (gimple_omp_body_ptr (ctx
->stmt
));
4020 = gimple_build_assign (unshare_expr (lvar
), iv
);
4021 gsi_insert_before_without_update (&gsi
, g
,
4023 tree t
= OMP_CLAUSE_LINEAR_STEP (c
);
4024 enum tree_code code
= PLUS_EXPR
;
4025 if (POINTER_TYPE_P (TREE_TYPE (new_var
)))
4026 code
= POINTER_PLUS_EXPR
;
4027 g
= gimple_build_assign (iv
, code
, iv
, t
);
4028 gsi_insert_before_without_update (&gsi
, g
,
4032 x
= lang_hooks
.decls
.omp_clause_copy_ctor
4033 (c
, unshare_expr (ivar
), x
);
4034 gimplify_and_add (x
, &llist
[0]);
4035 x
= lang_hooks
.decls
.omp_clause_dtor (c
, ivar
);
4038 gimple_seq tseq
= NULL
;
4041 gimplify_stmt (&dtor
, &tseq
);
4042 gimple_seq_add_seq (&llist
[1], tseq
);
4047 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
4048 gimplify_and_add (x
, ilist
);
4051 case OMP_CLAUSE__LOOPTEMP_
:
4052 gcc_assert (is_parallel_ctx (ctx
));
4053 x
= build_outer_var_ref (var
, ctx
);
4054 x
= build2 (MODIFY_EXPR
, TREE_TYPE (new_var
), new_var
, x
);
4055 gimplify_and_add (x
, ilist
);
4058 case OMP_CLAUSE_COPYIN
:
4059 by_ref
= use_pointer_for_field (var
, NULL
);
4060 x
= build_receiver_ref (var
, by_ref
, ctx
);
4061 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
4062 append_to_statement_list (x
, ©in_seq
);
4063 copyin_by_ref
|= by_ref
;
4066 case OMP_CLAUSE_REDUCTION
:
4067 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
4069 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
4071 x
= build_outer_var_ref (var
, ctx
);
4073 if (is_reference (var
)
4074 && !useless_type_conversion_p (TREE_TYPE (placeholder
),
4076 x
= build_fold_addr_expr_loc (clause_loc
, x
);
4077 SET_DECL_VALUE_EXPR (placeholder
, x
);
4078 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
4079 tree new_vard
= new_var
;
4080 if (is_reference (var
))
4082 gcc_assert (TREE_CODE (new_var
) == MEM_REF
);
4083 new_vard
= TREE_OPERAND (new_var
, 0);
4084 gcc_assert (DECL_P (new_vard
));
4087 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
4088 idx
, lane
, ivar
, lvar
))
4090 if (new_vard
== new_var
)
4092 gcc_assert (DECL_VALUE_EXPR (new_var
) == lvar
);
4093 SET_DECL_VALUE_EXPR (new_var
, ivar
);
4097 SET_DECL_VALUE_EXPR (new_vard
,
4098 build_fold_addr_expr (ivar
));
4099 DECL_HAS_VALUE_EXPR_P (new_vard
) = 1;
4101 x
= lang_hooks
.decls
.omp_clause_default_ctor
4102 (c
, unshare_expr (ivar
),
4103 build_outer_var_ref (var
, ctx
));
4105 gimplify_and_add (x
, &llist
[0]);
4106 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
))
4108 tseq
= OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
);
4109 lower_omp (&tseq
, ctx
);
4110 gimple_seq_add_seq (&llist
[0], tseq
);
4112 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
4113 tseq
= OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
);
4114 lower_omp (&tseq
, ctx
);
4115 gimple_seq_add_seq (&llist
[1], tseq
);
4116 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
4117 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
4118 if (new_vard
== new_var
)
4119 SET_DECL_VALUE_EXPR (new_var
, lvar
);
4121 SET_DECL_VALUE_EXPR (new_vard
,
4122 build_fold_addr_expr (lvar
));
4123 x
= lang_hooks
.decls
.omp_clause_dtor (c
, ivar
);
4128 gimplify_stmt (&dtor
, &tseq
);
4129 gimple_seq_add_seq (&llist
[1], tseq
);
4133 /* If this is a reference to constant size reduction var
4134 with placeholder, we haven't emitted the initializer
4135 for it because it is undesirable if SIMD arrays are used.
4136 But if they aren't used, we need to emit the deferred
4137 initialization now. */
4138 else if (is_reference (var
) && is_simd
)
4139 handle_simd_reference (clause_loc
, new_vard
, ilist
);
4140 x
= lang_hooks
.decls
.omp_clause_default_ctor
4141 (c
, unshare_expr (new_var
),
4142 build_outer_var_ref (var
, ctx
));
4144 gimplify_and_add (x
, ilist
);
4145 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
))
4147 tseq
= OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
);
4148 lower_omp (&tseq
, ctx
);
4149 gimple_seq_add_seq (ilist
, tseq
);
4151 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
4154 tseq
= OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
);
4155 lower_omp (&tseq
, ctx
);
4156 gimple_seq_add_seq (dlist
, tseq
);
4157 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
4159 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
4164 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
4165 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
4166 enum tree_code code
= OMP_CLAUSE_REDUCTION_CODE (c
);
4168 /* reduction(-:var) sums up the partial results, so it
4169 acts identically to reduction(+:var). */
4170 if (code
== MINUS_EXPR
)
4173 tree new_vard
= new_var
;
4174 if (is_simd
&& is_reference (var
))
4176 gcc_assert (TREE_CODE (new_var
) == MEM_REF
);
4177 new_vard
= TREE_OPERAND (new_var
, 0);
4178 gcc_assert (DECL_P (new_vard
));
4181 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
4182 idx
, lane
, ivar
, lvar
))
4184 tree ref
= build_outer_var_ref (var
, ctx
);
4186 gimplify_assign (unshare_expr (ivar
), x
, &llist
[0]);
4188 x
= build2 (code
, TREE_TYPE (ref
), ref
, ivar
);
4189 ref
= build_outer_var_ref (var
, ctx
);
4190 gimplify_assign (ref
, x
, &llist
[1]);
4192 if (new_vard
!= new_var
)
4194 SET_DECL_VALUE_EXPR (new_vard
,
4195 build_fold_addr_expr (lvar
));
4196 DECL_HAS_VALUE_EXPR_P (new_vard
) = 1;
4201 if (is_reference (var
) && is_simd
)
4202 handle_simd_reference (clause_loc
, new_vard
, ilist
);
4203 gimplify_assign (new_var
, x
, ilist
);
4206 tree ref
= build_outer_var_ref (var
, ctx
);
4208 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
4209 ref
= build_outer_var_ref (var
, ctx
);
4210 gimplify_assign (ref
, x
, dlist
);
4224 tree uid
= create_tmp_var (ptr_type_node
, "simduid");
4225 /* Don't want uninit warnings on simduid, it is always uninitialized,
4226 but we use it not for the value, but for the DECL_UID only. */
4227 TREE_NO_WARNING (uid
) = 1;
4229 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE
, 1, uid
);
4230 gimple_call_set_lhs (g
, lane
);
4231 gimple_stmt_iterator gsi
= gsi_start_1 (gimple_omp_body_ptr (ctx
->stmt
));
4232 gsi_insert_before_without_update (&gsi
, g
, GSI_SAME_STMT
);
4233 c
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE__SIMDUID_
);
4234 OMP_CLAUSE__SIMDUID__DECL (c
) = uid
;
4235 OMP_CLAUSE_CHAIN (c
) = gimple_omp_for_clauses (ctx
->stmt
);
4236 gimple_omp_for_set_clauses (ctx
->stmt
, c
);
4237 g
= gimple_build_assign (lane
, INTEGER_CST
,
4238 build_int_cst (unsigned_type_node
, 0));
4239 gimple_seq_add_stmt (ilist
, g
);
4240 for (int i
= 0; i
< 2; i
++)
4243 tree vf
= create_tmp_var (unsigned_type_node
);
4244 g
= gimple_build_call_internal (IFN_GOMP_SIMD_VF
, 1, uid
);
4245 gimple_call_set_lhs (g
, vf
);
4246 gimple_seq
*seq
= i
== 0 ? ilist
: dlist
;
4247 gimple_seq_add_stmt (seq
, g
);
4248 tree t
= build_int_cst (unsigned_type_node
, 0);
4249 g
= gimple_build_assign (idx
, INTEGER_CST
, t
);
4250 gimple_seq_add_stmt (seq
, g
);
4251 tree body
= create_artificial_label (UNKNOWN_LOCATION
);
4252 tree header
= create_artificial_label (UNKNOWN_LOCATION
);
4253 tree end
= create_artificial_label (UNKNOWN_LOCATION
);
4254 gimple_seq_add_stmt (seq
, gimple_build_goto (header
));
4255 gimple_seq_add_stmt (seq
, gimple_build_label (body
));
4256 gimple_seq_add_seq (seq
, llist
[i
]);
4257 t
= build_int_cst (unsigned_type_node
, 1);
4258 g
= gimple_build_assign (idx
, PLUS_EXPR
, idx
, t
);
4259 gimple_seq_add_stmt (seq
, g
);
4260 gimple_seq_add_stmt (seq
, gimple_build_label (header
));
4261 g
= gimple_build_cond (LT_EXPR
, idx
, vf
, body
, end
);
4262 gimple_seq_add_stmt (seq
, g
);
4263 gimple_seq_add_stmt (seq
, gimple_build_label (end
));
4267 /* The copyin sequence is not to be executed by the main thread, since
4268 that would result in self-copies. Perhaps not visible to scalars,
4269 but it certainly is to C++ operator=. */
4272 x
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
),
4274 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
4275 build_int_cst (TREE_TYPE (x
), 0));
4276 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
4277 gimplify_and_add (x
, ilist
);
4280 /* If any copyin variable is passed by reference, we must ensure the
4281 master thread doesn't modify it before it is copied over in all
4282 threads. Similarly for variables in both firstprivate and
4283 lastprivate clauses we need to ensure the lastprivate copying
4284 happens after firstprivate copying in all threads. And similarly
4285 for UDRs if initializer expression refers to omp_orig. */
4286 if (copyin_by_ref
|| lastprivate_firstprivate
|| reduction_omp_orig_ref
)
4288 /* Don't add any barrier for #pragma omp simd or
4289 #pragma omp distribute. */
4290 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_FOR
4291 || gimple_omp_for_kind (ctx
->stmt
) == GF_OMP_FOR_KIND_FOR
)
4292 gimple_seq_add_stmt (ilist
, build_omp_barrier (NULL_TREE
));
4295 /* If max_vf is non-zero, then we can use only a vectorization factor
4296 up to the max_vf we chose. So stick it into the safelen clause. */
4299 tree c
= find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
4300 OMP_CLAUSE_SAFELEN
);
4302 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c
)) == INTEGER_CST
4303 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c
),
4306 c
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE_SAFELEN
);
4307 OMP_CLAUSE_SAFELEN_EXPR (c
) = build_int_cst (integer_type_node
,
4309 OMP_CLAUSE_CHAIN (c
) = gimple_omp_for_clauses (ctx
->stmt
);
4310 gimple_omp_for_set_clauses (ctx
->stmt
, c
);
4316 /* Generate code to implement the LASTPRIVATE clauses. This is used for
4317 both parallel and workshare constructs. PREDICATE may be NULL if it's
4321 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
4324 tree x
, c
, label
= NULL
, orig_clauses
= clauses
;
4325 bool par_clauses
= false;
4326 tree simduid
= NULL
, lastlane
= NULL
;
4328 /* Early exit if there are no lastprivate or linear clauses. */
4329 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
4330 if (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_LASTPRIVATE
4331 || (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_LINEAR
4332 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses
)))
4334 if (clauses
== NULL
)
4336 /* If this was a workshare clause, see if it had been combined
4337 with its parallel. In that case, look for the clauses on the
4338 parallel statement itself. */
4339 if (is_parallel_ctx (ctx
))
4343 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
4346 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
4347 OMP_CLAUSE_LASTPRIVATE
);
4348 if (clauses
== NULL
)
4356 tree label_true
, arm1
, arm2
;
4358 label
= create_artificial_label (UNKNOWN_LOCATION
);
4359 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
4360 arm1
= TREE_OPERAND (predicate
, 0);
4361 arm2
= TREE_OPERAND (predicate
, 1);
4362 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
4363 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
4364 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
4366 gimple_seq_add_stmt (stmt_list
, stmt
);
4367 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
4370 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
4371 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
4373 simduid
= find_omp_clause (orig_clauses
, OMP_CLAUSE__SIMDUID_
);
4375 simduid
= OMP_CLAUSE__SIMDUID__DECL (simduid
);
4378 for (c
= clauses
; c
;)
4381 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
4383 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
4384 || (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
4385 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c
)))
4387 var
= OMP_CLAUSE_DECL (c
);
4388 new_var
= lookup_decl (var
, ctx
);
4390 if (simduid
&& DECL_HAS_VALUE_EXPR_P (new_var
))
4392 tree val
= DECL_VALUE_EXPR (new_var
);
4393 if (TREE_CODE (val
) == ARRAY_REF
4394 && VAR_P (TREE_OPERAND (val
, 0))
4395 && lookup_attribute ("omp simd array",
4396 DECL_ATTRIBUTES (TREE_OPERAND (val
,
4399 if (lastlane
== NULL
)
4401 lastlane
= create_tmp_var (unsigned_type_node
);
4403 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE
,
4405 TREE_OPERAND (val
, 1));
4406 gimple_call_set_lhs (g
, lastlane
);
4407 gimple_seq_add_stmt (stmt_list
, g
);
4409 new_var
= build4 (ARRAY_REF
, TREE_TYPE (val
),
4410 TREE_OPERAND (val
, 0), lastlane
,
4411 NULL_TREE
, NULL_TREE
);
4415 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
4416 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
4418 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
4419 gimple_seq_add_seq (stmt_list
,
4420 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
4421 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
4423 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
4424 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
))
4426 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
), ctx
);
4427 gimple_seq_add_seq (stmt_list
,
4428 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
));
4429 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
) = NULL
;
4432 x
= build_outer_var_ref (var
, ctx
);
4433 if (is_reference (var
))
4434 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
4435 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
4436 gimplify_and_add (x
, stmt_list
);
4438 c
= OMP_CLAUSE_CHAIN (c
);
4439 if (c
== NULL
&& !par_clauses
)
4441 /* If this was a workshare clause, see if it had been combined
4442 with its parallel. In that case, continue looking for the
4443 clauses also on the parallel statement itself. */
4444 if (is_parallel_ctx (ctx
))
4448 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
4451 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
4452 OMP_CLAUSE_LASTPRIVATE
);
4458 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
4462 oacc_lower_reduction_var_helper (gimple_seq
*stmt_seqp
, omp_context
*ctx
,
4463 tree tid
, tree var
, tree new_var
)
4465 /* The atomic add at the end of the sum creates unnecessary
4466 write contention on accelerators. To work around this,
4467 create an array to store the partial reductions. Later, in
4468 lower_omp_for (for openacc), the values of array will be
4471 tree t
= NULL_TREE
, array
, x
;
4472 tree type
= get_base_type (var
);
4475 /* Now insert the partial reductions into the array. */
4477 /* Find the reduction array. */
4479 tree ptype
= build_pointer_type (type
);
4481 t
= lookup_oacc_reduction (oacc_get_reduction_array_id (var
), ctx
);
4482 t
= build_receiver_ref (t
, false, ctx
->outer
);
4484 array
= create_tmp_var (ptype
);
4485 gimplify_assign (array
, t
, stmt_seqp
);
4487 tree ptr
= create_tmp_var (TREE_TYPE (array
));
4489 /* Find the reduction array. */
4491 /* testing a unary conversion. */
4492 tree offset
= create_tmp_var (sizetype
);
4493 gimplify_assign (offset
, TYPE_SIZE_UNIT (type
),
4495 t
= create_tmp_var (sizetype
);
4496 gimplify_assign (t
, unshare_expr (fold_build1 (NOP_EXPR
, sizetype
, tid
)),
4498 stmt
= gimple_build_assign (offset
, MULT_EXPR
, offset
, t
);
4499 gimple_seq_add_stmt (stmt_seqp
, stmt
);
4501 /* Offset expression. Does the POINTER_PLUS_EXPR take care
4502 of adding sizeof(var) to the array? */
4503 ptr
= create_tmp_var (ptype
);
4504 stmt
= gimple_build_assign (unshare_expr (ptr
), POINTER_PLUS_EXPR
, array
,
4506 gimple_seq_add_stmt (stmt_seqp
, stmt
);
4508 /* Move the local sum to gfc$sum[i]. */
4509 x
= unshare_expr (build_simple_mem_ref (ptr
));
4510 stmt
= gimplify_assign (x
, new_var
, stmt_seqp
);
4513 /* Generate code to implement the REDUCTION clauses. */
4516 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
4518 gimple_seq sub_seq
= NULL
;
4520 tree x
, c
, tid
= NULL_TREE
;
4523 /* SIMD reductions are handled in lower_rec_input_clauses. */
4524 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
4525 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
4528 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
4529 update in that case, otherwise use a lock. */
4530 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
4531 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
4533 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
4535 /* Never use OMP_ATOMIC for array reductions or UDRs. */
4545 /* Initialize thread info for OpenACC. */
4546 if (is_gimple_omp_oacc (ctx
->stmt
))
4548 /* Get the current thread id. */
4549 tree call
= builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM
);
4550 tid
= create_tmp_var (TREE_TYPE (TREE_TYPE (call
)));
4551 gimple stmt
= gimple_build_call (call
, 0);
4552 gimple_call_set_lhs (stmt
, tid
);
4553 gimple_seq_add_stmt (stmt_seqp
, stmt
);
4556 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
4558 tree var
, ref
, new_var
;
4559 enum tree_code code
;
4560 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
4562 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
4565 var
= OMP_CLAUSE_DECL (c
);
4566 new_var
= lookup_decl (var
, ctx
);
4567 if (is_reference (var
))
4568 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
4569 ref
= build_outer_var_ref (var
, ctx
);
4570 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
4572 /* reduction(-:var) sums up the partial results, so it acts
4573 identically to reduction(+:var). */
4574 if (code
== MINUS_EXPR
)
4577 if (is_gimple_omp_oacc (ctx
->stmt
))
4579 gcc_checking_assert (!OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
));
4581 oacc_lower_reduction_var_helper (stmt_seqp
, ctx
, tid
, var
, new_var
);
4583 else if (count
== 1)
4585 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
4587 addr
= save_expr (addr
);
4588 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
4589 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
4590 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
4591 gimplify_and_add (x
, stmt_seqp
);
4594 else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
4596 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
4598 if (is_reference (var
)
4599 && !useless_type_conversion_p (TREE_TYPE (placeholder
),
4601 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
4602 SET_DECL_VALUE_EXPR (placeholder
, ref
);
4603 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
4604 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
4605 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
4606 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
4607 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
4611 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
4612 ref
= build_outer_var_ref (var
, ctx
);
4613 gimplify_assign (ref
, x
, &sub_seq
);
4617 if (is_gimple_omp_oacc (ctx
->stmt
))
4620 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
),
4622 gimple_seq_add_stmt (stmt_seqp
, stmt
);
4624 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
4626 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
),
4628 gimple_seq_add_stmt (stmt_seqp
, stmt
);
4632 /* Generate code to implement the COPYPRIVATE clauses. */
4635 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
4640 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
4642 tree var
, new_var
, ref
, x
;
4644 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
4646 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
4649 var
= OMP_CLAUSE_DECL (c
);
4650 by_ref
= use_pointer_for_field (var
, NULL
);
4652 ref
= build_sender_ref (var
, ctx
);
4653 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
4656 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
4657 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
4659 gimplify_assign (ref
, x
, slist
);
4661 ref
= build_receiver_ref (var
, false, ctx
);
4664 ref
= fold_convert_loc (clause_loc
,
4665 build_pointer_type (TREE_TYPE (new_var
)),
4667 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
4669 if (is_reference (var
))
4671 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
4672 ref
= build_simple_mem_ref_loc (clause_loc
, ref
);
4673 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
4675 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
4676 gimplify_and_add (x
, rlist
);
4681 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4682 and REDUCTION from the sender (aka parent) side. */
4685 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
4690 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
4692 tree val
, ref
, x
, var
;
4693 bool by_ref
, do_in
= false, do_out
= false;
4694 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
4696 switch (OMP_CLAUSE_CODE (c
))
4698 case OMP_CLAUSE_PRIVATE
:
4699 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
4702 case OMP_CLAUSE_FIRSTPRIVATE
:
4703 case OMP_CLAUSE_COPYIN
:
4704 case OMP_CLAUSE_LASTPRIVATE
:
4705 case OMP_CLAUSE_REDUCTION
:
4706 case OMP_CLAUSE__LOOPTEMP_
:
4712 val
= OMP_CLAUSE_DECL (c
);
4713 var
= lookup_decl_in_outer_ctx (val
, ctx
);
4715 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
4716 && is_global_var (var
))
4718 if (is_variable_sized (val
))
4720 by_ref
= use_pointer_for_field (val
, NULL
);
4722 switch (OMP_CLAUSE_CODE (c
))
4724 case OMP_CLAUSE_PRIVATE
:
4725 case OMP_CLAUSE_FIRSTPRIVATE
:
4726 case OMP_CLAUSE_COPYIN
:
4727 case OMP_CLAUSE__LOOPTEMP_
:
4731 case OMP_CLAUSE_LASTPRIVATE
:
4732 if (by_ref
|| is_reference (val
))
4734 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
4741 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
4746 case OMP_CLAUSE_REDUCTION
:
4748 do_out
= !(by_ref
|| is_reference (val
));
4757 ref
= build_sender_ref (val
, ctx
);
4758 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
4759 gimplify_assign (ref
, x
, ilist
);
4760 if (is_task_ctx (ctx
))
4761 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
4766 ref
= build_sender_ref (val
, ctx
);
4767 gimplify_assign (var
, ref
, olist
);
4772 /* Generate code to implement SHARED from the sender (aka parent)
4773 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4774 list things that got automatically shared. */
4777 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
4779 tree var
, ovar
, nvar
, f
, x
, record_type
;
4781 if (ctx
->record_type
== NULL
)
4784 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
4785 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
4787 ovar
= DECL_ABSTRACT_ORIGIN (f
);
4788 nvar
= maybe_lookup_decl (ovar
, ctx
);
4789 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
4792 /* If CTX is a nested parallel directive. Find the immediately
4793 enclosing parallel or workshare construct that contains a
4794 mapping for OVAR. */
4795 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
4797 if (use_pointer_for_field (ovar
, ctx
))
4799 x
= build_sender_ref (ovar
, ctx
);
4800 var
= build_fold_addr_expr (var
);
4801 gimplify_assign (x
, var
, ilist
);
4805 x
= build_sender_ref (ovar
, ctx
);
4806 gimplify_assign (x
, var
, ilist
);
4808 if (!TREE_READONLY (var
)
4809 /* We don't need to receive a new reference to a result
4810 or parm decl. In fact we may not store to it as we will
4811 invalidate any pending RSO and generate wrong gimple
4813 && !((TREE_CODE (var
) == RESULT_DECL
4814 || TREE_CODE (var
) == PARM_DECL
)
4815 && DECL_BY_REFERENCE (var
)))
4817 x
= build_sender_ref (ovar
, ctx
);
4818 gimplify_assign (var
, x
, olist
);
4825 /* A convenience function to build an empty GIMPLE_COND with just the
4829 gimple_build_cond_empty (tree cond
)
4831 enum tree_code pred_code
;
4834 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
4835 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
4839 /* Build the function calls to GOMP_parallel_start etc to actually
4840 generate the parallel operation. REGION is the parallel region
4841 being expanded. BB is the block where to insert the code. WS_ARGS
4842 will be set if this is a call to a combined parallel+workshare
4843 construct, it contains the list of additional arguments needed by
4844 the workshare construct. */
4847 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
4848 gomp_parallel
*entry_stmt
,
4849 vec
<tree
, va_gc
> *ws_args
)
4851 tree t
, t1
, t2
, val
, cond
, c
, clauses
, flags
;
4852 gimple_stmt_iterator gsi
;
4854 enum built_in_function start_ix
;
4856 location_t clause_loc
;
4857 vec
<tree
, va_gc
> *args
;
4859 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
4861 /* Determine what flavor of GOMP_parallel we will be
4863 start_ix
= BUILT_IN_GOMP_PARALLEL
;
4864 if (is_combined_parallel (region
))
4866 switch (region
->inner
->type
)
4868 case GIMPLE_OMP_FOR
:
4869 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
4870 start_ix2
= ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4871 + (region
->inner
->sched_kind
4872 == OMP_CLAUSE_SCHEDULE_RUNTIME
4873 ? 3 : region
->inner
->sched_kind
));
4874 start_ix
= (enum built_in_function
)start_ix2
;
4876 case GIMPLE_OMP_SECTIONS
:
4877 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS
;
4884 /* By default, the value of NUM_THREADS is zero (selected at run time)
4885 and there is no conditional. */
4887 val
= build_int_cst (unsigned_type_node
, 0);
4888 flags
= build_int_cst (unsigned_type_node
, 0);
4890 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
4892 cond
= OMP_CLAUSE_IF_EXPR (c
);
4894 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
4897 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
4898 clause_loc
= OMP_CLAUSE_LOCATION (c
);
4901 clause_loc
= gimple_location (entry_stmt
);
4903 c
= find_omp_clause (clauses
, OMP_CLAUSE_PROC_BIND
);
4905 flags
= build_int_cst (unsigned_type_node
, OMP_CLAUSE_PROC_BIND_KIND (c
));
4907 /* Ensure 'val' is of the correct type. */
4908 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
4910 /* If we found the clause 'if (cond)', build either
4911 (cond != 0) or (cond ? val : 1u). */
4914 cond
= gimple_boolify (cond
);
4916 if (integer_zerop (val
))
4917 val
= fold_build2_loc (clause_loc
,
4918 EQ_EXPR
, unsigned_type_node
, cond
,
4919 build_int_cst (TREE_TYPE (cond
), 0));
4922 basic_block cond_bb
, then_bb
, else_bb
;
4923 edge e
, e_then
, e_else
;
4924 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
4926 tmp_var
= create_tmp_var (TREE_TYPE (val
));
4927 if (gimple_in_ssa_p (cfun
))
4929 tmp_then
= make_ssa_name (tmp_var
);
4930 tmp_else
= make_ssa_name (tmp_var
);
4931 tmp_join
= make_ssa_name (tmp_var
);
4940 e
= split_block_after_labels (bb
);
4945 then_bb
= create_empty_bb (cond_bb
);
4946 else_bb
= create_empty_bb (then_bb
);
4947 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
4948 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
4950 stmt
= gimple_build_cond_empty (cond
);
4951 gsi
= gsi_start_bb (cond_bb
);
4952 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4954 gsi
= gsi_start_bb (then_bb
);
4955 stmt
= gimple_build_assign (tmp_then
, val
);
4956 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4958 gsi
= gsi_start_bb (else_bb
);
4959 stmt
= gimple_build_assign
4960 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
4961 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4963 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
4964 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
4965 add_bb_to_loop (then_bb
, cond_bb
->loop_father
);
4966 add_bb_to_loop (else_bb
, cond_bb
->loop_father
);
4967 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
4968 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
4970 if (gimple_in_ssa_p (cfun
))
4972 gphi
*phi
= create_phi_node (tmp_join
, bb
);
4973 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
4974 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
4980 gsi
= gsi_start_bb (bb
);
4981 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
4982 false, GSI_CONTINUE_LINKING
);
4985 gsi
= gsi_last_bb (bb
);
4986 t
= gimple_omp_parallel_data_arg (entry_stmt
);
4988 t1
= null_pointer_node
;
4990 t1
= build_fold_addr_expr (t
);
4991 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
4993 vec_alloc (args
, 4 + vec_safe_length (ws_args
));
4994 args
->quick_push (t2
);
4995 args
->quick_push (t1
);
4996 args
->quick_push (val
);
4998 args
->splice (*ws_args
);
4999 args
->quick_push (flags
);
5001 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
5002 builtin_decl_explicit (start_ix
), args
);
5004 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5005 false, GSI_CONTINUE_LINKING
);
5008 /* Insert a function call whose name is FUNC_NAME with the information from
5009 ENTRY_STMT into the basic_block BB. */
5012 expand_cilk_for_call (basic_block bb
, gomp_parallel
*entry_stmt
,
5013 vec
<tree
, va_gc
> *ws_args
)
5016 gimple_stmt_iterator gsi
;
5017 vec
<tree
, va_gc
> *args
;
5019 gcc_assert (vec_safe_length (ws_args
) == 2);
5020 tree func_name
= (*ws_args
)[0];
5021 tree grain
= (*ws_args
)[1];
5023 tree clauses
= gimple_omp_parallel_clauses (entry_stmt
);
5024 tree count
= find_omp_clause (clauses
, OMP_CLAUSE__CILK_FOR_COUNT_
);
5025 gcc_assert (count
!= NULL_TREE
);
5026 count
= OMP_CLAUSE_OPERAND (count
, 0);
5028 gsi
= gsi_last_bb (bb
);
5029 t
= gimple_omp_parallel_data_arg (entry_stmt
);
5031 t1
= null_pointer_node
;
5033 t1
= build_fold_addr_expr (t
);
5034 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
5036 vec_alloc (args
, 4);
5037 args
->quick_push (t2
);
5038 args
->quick_push (t1
);
5039 args
->quick_push (count
);
5040 args
->quick_push (grain
);
5041 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
, func_name
, args
);
5043 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, false,
5044 GSI_CONTINUE_LINKING
);
5047 /* Build the function call to GOMP_task to actually
5048 generate the task operation. BB is the block where to insert the code. */
5051 expand_task_call (basic_block bb
, gomp_task
*entry_stmt
)
5053 tree t
, t1
, t2
, t3
, flags
, cond
, c
, c2
, clauses
, depend
;
5054 gimple_stmt_iterator gsi
;
5055 location_t loc
= gimple_location (entry_stmt
);
5057 clauses
= gimple_omp_task_clauses (entry_stmt
);
5059 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
5061 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
5063 cond
= boolean_true_node
;
5065 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
5066 c2
= find_omp_clause (clauses
, OMP_CLAUSE_MERGEABLE
);
5067 depend
= find_omp_clause (clauses
, OMP_CLAUSE_DEPEND
);
5068 flags
= build_int_cst (unsigned_type_node
,
5069 (c
? 1 : 0) + (c2
? 4 : 0) + (depend
? 8 : 0));
5071 c
= find_omp_clause (clauses
, OMP_CLAUSE_FINAL
);
5074 c
= gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c
));
5075 c
= fold_build3_loc (loc
, COND_EXPR
, unsigned_type_node
, c
,
5076 build_int_cst (unsigned_type_node
, 2),
5077 build_int_cst (unsigned_type_node
, 0));
5078 flags
= fold_build2_loc (loc
, PLUS_EXPR
, unsigned_type_node
, flags
, c
);
5081 depend
= OMP_CLAUSE_DECL (depend
);
5083 depend
= build_int_cst (ptr_type_node
, 0);
5085 gsi
= gsi_last_bb (bb
);
5086 t
= gimple_omp_task_data_arg (entry_stmt
);
5088 t2
= null_pointer_node
;
5090 t2
= build_fold_addr_expr_loc (loc
, t
);
5091 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
5092 t
= gimple_omp_task_copy_fn (entry_stmt
);
5094 t3
= null_pointer_node
;
5096 t3
= build_fold_addr_expr_loc (loc
, t
);
5098 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK
),
5100 gimple_omp_task_arg_size (entry_stmt
),
5101 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
,
5104 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5105 false, GSI_CONTINUE_LINKING
);
5109 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
5110 catch handler and return it. This prevents programs from violating the
5111 structured block semantics with throws. */
5114 maybe_catch_exception (gimple_seq body
)
5119 if (!flag_exceptions
)
5122 if (lang_hooks
.eh_protect_cleanup_actions
!= NULL
)
5123 decl
= lang_hooks
.eh_protect_cleanup_actions ();
5125 decl
= builtin_decl_explicit (BUILT_IN_TRAP
);
5127 g
= gimple_build_eh_must_not_throw (decl
);
5128 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
5131 return gimple_seq_alloc_with_stmt (g
);
5134 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
5137 vec2chain (vec
<tree
, va_gc
> *v
)
5139 tree chain
= NULL_TREE
, t
;
5142 FOR_EACH_VEC_SAFE_ELT_REVERSE (v
, ix
, t
)
5144 DECL_CHAIN (t
) = chain
;
5152 /* Remove barriers in REGION->EXIT's block. Note that this is only
5153 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
5154 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
5155 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
5159 remove_exit_barrier (struct omp_region
*region
)
5161 gimple_stmt_iterator gsi
;
5162 basic_block exit_bb
;
5166 int any_addressable_vars
= -1;
5168 exit_bb
= region
->exit
;
5170 /* If the parallel region doesn't return, we don't have REGION->EXIT
5175 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
5176 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
5177 statements that can appear in between are extremely limited -- no
5178 memory operations at all. Here, we allow nothing at all, so the
5179 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
5180 gsi
= gsi_last_bb (exit_bb
);
5181 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
5183 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
5186 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
5188 gsi
= gsi_last_bb (e
->src
);
5189 if (gsi_end_p (gsi
))
5191 stmt
= gsi_stmt (gsi
);
5192 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
5193 && !gimple_omp_return_nowait_p (stmt
))
5195 /* OpenMP 3.0 tasks unfortunately prevent this optimization
5196 in many cases. If there could be tasks queued, the barrier
5197 might be needed to let the tasks run before some local
5198 variable of the parallel that the task uses as shared
5199 runs out of scope. The task can be spawned either
5200 from within current function (this would be easy to check)
5201 or from some function it calls and gets passed an address
5202 of such a variable. */
5203 if (any_addressable_vars
< 0)
5205 gomp_parallel
*parallel_stmt
5206 = as_a
<gomp_parallel
*> (last_stmt (region
->entry
));
5207 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
5208 tree local_decls
, block
, decl
;
5211 any_addressable_vars
= 0;
5212 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
5213 if (TREE_ADDRESSABLE (decl
))
5215 any_addressable_vars
= 1;
5218 for (block
= gimple_block (stmt
);
5219 !any_addressable_vars
5221 && TREE_CODE (block
) == BLOCK
;
5222 block
= BLOCK_SUPERCONTEXT (block
))
5224 for (local_decls
= BLOCK_VARS (block
);
5226 local_decls
= DECL_CHAIN (local_decls
))
5227 if (TREE_ADDRESSABLE (local_decls
))
5229 any_addressable_vars
= 1;
5232 if (block
== gimple_block (parallel_stmt
))
5236 if (!any_addressable_vars
)
5237 gimple_omp_return_set_nowait (stmt
);
5243 remove_exit_barriers (struct omp_region
*region
)
5245 if (region
->type
== GIMPLE_OMP_PARALLEL
)
5246 remove_exit_barrier (region
);
5250 region
= region
->inner
;
5251 remove_exit_barriers (region
);
5252 while (region
->next
)
5254 region
= region
->next
;
5255 remove_exit_barriers (region
);
5260 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
5261 calls. These can't be declared as const functions, but
5262 within one parallel body they are constant, so they can be
5263 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
5264 which are declared const. Similarly for task body, except
5265 that in untied task omp_get_thread_num () can change at any task
5266 scheduling point. */
5269 optimize_omp_library_calls (gimple entry_stmt
)
5272 gimple_stmt_iterator gsi
;
5273 tree thr_num_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
5274 tree thr_num_id
= DECL_ASSEMBLER_NAME (thr_num_tree
);
5275 tree num_thr_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
5276 tree num_thr_id
= DECL_ASSEMBLER_NAME (num_thr_tree
);
5277 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
5278 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
5279 OMP_CLAUSE_UNTIED
) != NULL
);
5281 FOR_EACH_BB_FN (bb
, cfun
)
5282 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5284 gimple call
= gsi_stmt (gsi
);
5287 if (is_gimple_call (call
)
5288 && (decl
= gimple_call_fndecl (call
))
5289 && DECL_EXTERNAL (decl
)
5290 && TREE_PUBLIC (decl
)
5291 && DECL_INITIAL (decl
) == NULL
)
5295 if (DECL_NAME (decl
) == thr_num_id
)
5297 /* In #pragma omp task untied omp_get_thread_num () can change
5298 during the execution of the task region. */
5301 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
5303 else if (DECL_NAME (decl
) == num_thr_id
)
5304 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
5308 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
5309 || gimple_call_num_args (call
) != 0)
5312 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
5315 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
5316 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
5317 TREE_TYPE (TREE_TYPE (built_in
))))
5320 gimple_call_set_fndecl (call
, built_in
);
5325 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
5329 expand_omp_regimplify_p (tree
*tp
, int *walk_subtrees
, void *)
5333 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
5334 if (TREE_CODE (t
) == VAR_DECL
&& DECL_HAS_VALUE_EXPR_P (t
))
5337 if (TREE_CODE (t
) == ADDR_EXPR
)
5338 recompute_tree_invariant_for_addr_expr (t
);
5340 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
5344 /* Prepend TO = FROM assignment before *GSI_P. */
5347 expand_omp_build_assign (gimple_stmt_iterator
*gsi_p
, tree to
, tree from
)
5349 bool simple_p
= DECL_P (to
) && TREE_ADDRESSABLE (to
);
5350 from
= force_gimple_operand_gsi (gsi_p
, from
, simple_p
, NULL_TREE
,
5351 true, GSI_SAME_STMT
);
5352 gimple stmt
= gimple_build_assign (to
, from
);
5353 gsi_insert_before (gsi_p
, stmt
, GSI_SAME_STMT
);
5354 if (walk_tree (&from
, expand_omp_regimplify_p
, NULL
, NULL
)
5355 || walk_tree (&to
, expand_omp_regimplify_p
, NULL
, NULL
))
5357 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
5358 gimple_regimplify_operands (stmt
, &gsi
);
5362 /* Expand the OpenMP parallel or task directive starting at REGION. */
5365 expand_omp_taskreg (struct omp_region
*region
)
5367 basic_block entry_bb
, exit_bb
, new_bb
;
5368 struct function
*child_cfun
;
5369 tree child_fn
, block
, t
;
5370 gimple_stmt_iterator gsi
;
5371 gimple entry_stmt
, stmt
;
5373 vec
<tree
, va_gc
> *ws_args
;
5375 entry_stmt
= last_stmt (region
->entry
);
5376 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
5377 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
5379 entry_bb
= region
->entry
;
5380 if (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
)
5381 exit_bb
= region
->cont
;
5383 exit_bb
= region
->exit
;
5387 && gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
5388 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt
),
5389 OMP_CLAUSE__CILK_FOR_COUNT_
) != NULL_TREE
);
5392 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
5393 and the inner statement contains the name of the built-in function
5395 ws_args
= region
->inner
->ws_args
;
5396 else if (is_combined_parallel (region
))
5397 ws_args
= region
->ws_args
;
5401 if (child_cfun
->cfg
)
5403 /* Due to inlining, it may happen that we have already outlined
5404 the region, in which case all we need to do is make the
5405 sub-graph unreachable and emit the parallel call. */
5406 edge entry_succ_e
, exit_succ_e
;
5408 entry_succ_e
= single_succ_edge (entry_bb
);
5410 gsi
= gsi_last_bb (entry_bb
);
5411 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
5412 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
5413 gsi_remove (&gsi
, true);
5418 exit_succ_e
= single_succ_edge (exit_bb
);
5419 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
5421 remove_edge_and_dominated_blocks (entry_succ_e
);
5425 unsigned srcidx
, dstidx
, num
;
5427 /* If the parallel region needs data sent from the parent
5428 function, then the very first statement (except possible
5429 tree profile counter updates) of the parallel body
5430 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
5431 &.OMP_DATA_O is passed as an argument to the child function,
5432 we need to replace it with the argument as seen by the child
5435 In most cases, this will end up being the identity assignment
5436 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
5437 a function call that has been inlined, the original PARM_DECL
5438 .OMP_DATA_I may have been converted into a different local
5439 variable. In which case, we need to keep the assignment. */
5440 if (gimple_omp_taskreg_data_arg (entry_stmt
))
5442 basic_block entry_succ_bb
5443 = single_succ_p (entry_bb
) ? single_succ (entry_bb
)
5444 : FALLTHRU_EDGE (entry_bb
)->dest
;
5446 gimple parcopy_stmt
= NULL
;
5448 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
5452 gcc_assert (!gsi_end_p (gsi
));
5453 stmt
= gsi_stmt (gsi
);
5454 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
5457 if (gimple_num_ops (stmt
) == 2)
5459 tree arg
= gimple_assign_rhs1 (stmt
);
5461 /* We're ignore the subcode because we're
5462 effectively doing a STRIP_NOPS. */
5464 if (TREE_CODE (arg
) == ADDR_EXPR
5465 && TREE_OPERAND (arg
, 0)
5466 == gimple_omp_taskreg_data_arg (entry_stmt
))
5468 parcopy_stmt
= stmt
;
5474 gcc_assert (parcopy_stmt
!= NULL
);
5475 arg
= DECL_ARGUMENTS (child_fn
);
5477 if (!gimple_in_ssa_p (cfun
))
5479 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
5480 gsi_remove (&gsi
, true);
5483 /* ?? Is setting the subcode really necessary ?? */
5484 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
5485 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
5490 /* If we are in ssa form, we must load the value from the default
5491 definition of the argument. That should not be defined now,
5492 since the argument is not used uninitialized. */
5493 gcc_assert (ssa_default_def (cfun
, arg
) == NULL
);
5494 narg
= make_ssa_name (arg
, gimple_build_nop ());
5495 set_ssa_default_def (cfun
, arg
, narg
);
5496 /* ?? Is setting the subcode really necessary ?? */
5497 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
5498 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
5499 update_stmt (parcopy_stmt
);
5503 /* Declare local variables needed in CHILD_CFUN. */
5504 block
= DECL_INITIAL (child_fn
);
5505 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
5506 /* The gimplifier could record temporaries in parallel/task block
5507 rather than in containing function's local_decls chain,
5508 which would mean cgraph missed finalizing them. Do it now. */
5509 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
5510 if (TREE_CODE (t
) == VAR_DECL
5512 && !DECL_EXTERNAL (t
))
5513 varpool_node::finalize_decl (t
);
5514 DECL_SAVED_TREE (child_fn
) = NULL
;
5515 /* We'll create a CFG for child_fn, so no gimple body is needed. */
5516 gimple_set_body (child_fn
, NULL
);
5517 TREE_USED (block
) = 1;
5519 /* Reset DECL_CONTEXT on function arguments. */
5520 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
5521 DECL_CONTEXT (t
) = child_fn
;
5523 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
5524 so that it can be moved to the child function. */
5525 gsi
= gsi_last_bb (entry_bb
);
5526 stmt
= gsi_stmt (gsi
);
5527 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
5528 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
5529 e
= split_block (entry_bb
, stmt
);
5530 gsi_remove (&gsi
, true);
5533 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
5534 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
5537 e2
= make_edge (e
->src
, BRANCH_EDGE (entry_bb
)->dest
, EDGE_ABNORMAL
);
5538 gcc_assert (e2
->dest
== region
->exit
);
5539 remove_edge (BRANCH_EDGE (entry_bb
));
5540 set_immediate_dominator (CDI_DOMINATORS
, e2
->dest
, e
->src
);
5541 gsi
= gsi_last_bb (region
->exit
);
5542 gcc_assert (!gsi_end_p (gsi
)
5543 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
5544 gsi_remove (&gsi
, true);
5547 /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
5550 gsi
= gsi_last_bb (exit_bb
);
5551 gcc_assert (!gsi_end_p (gsi
)
5552 && (gimple_code (gsi_stmt (gsi
))
5553 == (e2
? GIMPLE_OMP_CONTINUE
: GIMPLE_OMP_RETURN
)));
5554 stmt
= gimple_build_return (NULL
);
5555 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
5556 gsi_remove (&gsi
, true);
5559 /* Move the parallel region into CHILD_CFUN. */
5561 if (gimple_in_ssa_p (cfun
))
5563 init_tree_ssa (child_cfun
);
5564 init_ssa_operands (child_cfun
);
5565 child_cfun
->gimple_df
->in_ssa_p
= true;
5569 block
= gimple_block (entry_stmt
);
5571 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
5573 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
5576 basic_block dest_bb
= e2
->dest
;
5578 make_edge (new_bb
, dest_bb
, EDGE_FALLTHRU
);
5580 set_immediate_dominator (CDI_DOMINATORS
, dest_bb
, new_bb
);
5582 /* When the OMP expansion process cannot guarantee an up-to-date
5583 loop tree arrange for the child function to fixup loops. */
5584 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
5585 child_cfun
->x_current_loops
->state
|= LOOPS_NEED_FIXUP
;
5587 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
5588 num
= vec_safe_length (child_cfun
->local_decls
);
5589 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
5591 t
= (*child_cfun
->local_decls
)[srcidx
];
5592 if (DECL_CONTEXT (t
) == cfun
->decl
)
5594 if (srcidx
!= dstidx
)
5595 (*child_cfun
->local_decls
)[dstidx
] = t
;
5599 vec_safe_truncate (child_cfun
->local_decls
, dstidx
);
5601 /* Inform the callgraph about the new function. */
5602 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
= cfun
->curr_properties
;
5603 cgraph_node::add_new_function (child_fn
, true);
5604 cgraph_node::get (child_fn
)->parallelized_function
= 1;
5606 /* Fix the callgraph edges for child_cfun. Those for cfun will be
5607 fixed in a following pass. */
5608 push_cfun (child_cfun
);
5610 optimize_omp_library_calls (entry_stmt
);
5611 cgraph_edge::rebuild_edges ();
5613 /* Some EH regions might become dead, see PR34608. If
5614 pass_cleanup_cfg isn't the first pass to happen with the
5615 new child, these dead EH edges might cause problems.
5616 Clean them up now. */
5617 if (flag_exceptions
)
5620 bool changed
= false;
5622 FOR_EACH_BB_FN (bb
, cfun
)
5623 changed
|= gimple_purge_dead_eh_edges (bb
);
5625 cleanup_tree_cfg ();
5627 if (gimple_in_ssa_p (cfun
))
5628 update_ssa (TODO_update_ssa
);
5632 /* Emit a library call to launch the children threads. */
5634 expand_cilk_for_call (new_bb
,
5635 as_a
<gomp_parallel
*> (entry_stmt
), ws_args
);
5636 else if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
5637 expand_parallel_call (region
, new_bb
,
5638 as_a
<gomp_parallel
*> (entry_stmt
), ws_args
);
5640 expand_task_call (new_bb
, as_a
<gomp_task
*> (entry_stmt
));
5641 if (gimple_in_ssa_p (cfun
))
5642 update_ssa (TODO_update_ssa_only_virtuals
);
5646 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
5647 of the combined collapse > 1 loop constructs, generate code like:
5648 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
5653 count3 = (adj + N32 - N31) / STEP3;
5654 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
5659 count2 = (adj + N22 - N21) / STEP2;
5660 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
5665 count1 = (adj + N12 - N11) / STEP1;
5666 count = count1 * count2 * count3;
5667 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
5669 and set ZERO_ITER_BB to that bb. If this isn't the outermost
5670 of the combined loop constructs, just initialize COUNTS array
5671 from the _looptemp_ clauses. */
5673 /* NOTE: It *could* be better to moosh all of the BBs together,
5674 creating one larger BB with all the computation and the unexpected
5675 jump at the end. I.e.
5677 bool zero3, zero2, zero1, zero;
5680 count3 = (N32 - N31) /[cl] STEP3;
5682 count2 = (N22 - N21) /[cl] STEP2;
5684 count1 = (N12 - N11) /[cl] STEP1;
5685 zero = zero3 || zero2 || zero1;
5686 count = count1 * count2 * count3;
5687 if (__builtin_expect(zero, false)) goto zero_iter_bb;
5689 After all, we expect the zero=false, and thus we expect to have to
5690 evaluate all of the comparison expressions, so short-circuiting
5691 oughtn't be a win. Since the condition isn't protecting a
5692 denominator, we're not concerned about divide-by-zero, so we can
5693 fully evaluate count even if a numerator turned out to be wrong.
5695 It seems like putting this all together would create much better
5696 scheduling opportunities, and less pressure on the chip's branch
5700 expand_omp_for_init_counts (struct omp_for_data
*fd
, gimple_stmt_iterator
*gsi
,
5701 basic_block
&entry_bb
, tree
*counts
,
5702 basic_block
&zero_iter_bb
, int &first_zero_iter
,
5703 basic_block
&l2_dom_bb
)
5705 tree t
, type
= TREE_TYPE (fd
->loop
.v
);
5709 /* Collapsed loops need work for expansion into SSA form. */
5710 gcc_assert (!gimple_in_ssa_p (cfun
));
5712 if (gimple_omp_for_combined_into_p (fd
->for_stmt
)
5713 && TREE_CODE (fd
->loop
.n2
) != INTEGER_CST
)
5715 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5716 isn't supposed to be handled, as the inner loop doesn't
5718 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
5719 OMP_CLAUSE__LOOPTEMP_
);
5720 gcc_assert (innerc
);
5721 for (i
= 0; i
< fd
->collapse
; i
++)
5723 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
5724 OMP_CLAUSE__LOOPTEMP_
);
5725 gcc_assert (innerc
);
5727 counts
[i
] = OMP_CLAUSE_DECL (innerc
);
5729 counts
[0] = NULL_TREE
;
5734 for (i
= 0; i
< fd
->collapse
; i
++)
5736 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
5738 if (SSA_VAR_P (fd
->loop
.n2
)
5739 && ((t
= fold_binary (fd
->loops
[i
].cond_code
, boolean_type_node
,
5740 fold_convert (itype
, fd
->loops
[i
].n1
),
5741 fold_convert (itype
, fd
->loops
[i
].n2
)))
5742 == NULL_TREE
|| !integer_onep (t
)))
5746 n1
= fold_convert (itype
, unshare_expr (fd
->loops
[i
].n1
));
5747 n1
= force_gimple_operand_gsi (gsi
, n1
, true, NULL_TREE
,
5748 true, GSI_SAME_STMT
);
5749 n2
= fold_convert (itype
, unshare_expr (fd
->loops
[i
].n2
));
5750 n2
= force_gimple_operand_gsi (gsi
, n2
, true, NULL_TREE
,
5751 true, GSI_SAME_STMT
);
5752 cond_stmt
= gimple_build_cond (fd
->loops
[i
].cond_code
, n1
, n2
,
5753 NULL_TREE
, NULL_TREE
);
5754 gsi_insert_before (gsi
, cond_stmt
, GSI_SAME_STMT
);
5755 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt
),
5756 expand_omp_regimplify_p
, NULL
, NULL
)
5757 || walk_tree (gimple_cond_rhs_ptr (cond_stmt
),
5758 expand_omp_regimplify_p
, NULL
, NULL
))
5760 *gsi
= gsi_for_stmt (cond_stmt
);
5761 gimple_regimplify_operands (cond_stmt
, gsi
);
5763 e
= split_block (entry_bb
, cond_stmt
);
5764 if (zero_iter_bb
== NULL
)
5766 gassign
*assign_stmt
;
5767 first_zero_iter
= i
;
5768 zero_iter_bb
= create_empty_bb (entry_bb
);
5769 add_bb_to_loop (zero_iter_bb
, entry_bb
->loop_father
);
5770 *gsi
= gsi_after_labels (zero_iter_bb
);
5771 assign_stmt
= gimple_build_assign (fd
->loop
.n2
,
5772 build_zero_cst (type
));
5773 gsi_insert_before (gsi
, assign_stmt
, GSI_SAME_STMT
);
5774 set_immediate_dominator (CDI_DOMINATORS
, zero_iter_bb
,
5777 ne
= make_edge (entry_bb
, zero_iter_bb
, EDGE_FALSE_VALUE
);
5778 ne
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
5779 e
->flags
= EDGE_TRUE_VALUE
;
5780 e
->probability
= REG_BR_PROB_BASE
- ne
->probability
;
5781 if (l2_dom_bb
== NULL
)
5782 l2_dom_bb
= entry_bb
;
5784 *gsi
= gsi_last_bb (entry_bb
);
5787 if (POINTER_TYPE_P (itype
))
5788 itype
= signed_type_for (itype
);
5789 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
5791 t
= fold_build2 (PLUS_EXPR
, itype
,
5792 fold_convert (itype
, fd
->loops
[i
].step
), t
);
5793 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
5794 fold_convert (itype
, fd
->loops
[i
].n2
));
5795 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
5796 fold_convert (itype
, fd
->loops
[i
].n1
));
5797 /* ?? We could probably use CEIL_DIV_EXPR instead of
5798 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5799 generate the same code in the end because generically we
5800 don't know that the values involved must be negative for
5802 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
5803 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
5804 fold_build1 (NEGATE_EXPR
, itype
, t
),
5805 fold_build1 (NEGATE_EXPR
, itype
,
5806 fold_convert (itype
,
5807 fd
->loops
[i
].step
)));
5809 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
5810 fold_convert (itype
, fd
->loops
[i
].step
));
5811 t
= fold_convert (type
, t
);
5812 if (TREE_CODE (t
) == INTEGER_CST
)
5816 counts
[i
] = create_tmp_reg (type
, ".count");
5817 expand_omp_build_assign (gsi
, counts
[i
], t
);
5819 if (SSA_VAR_P (fd
->loop
.n2
))
5824 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
5825 expand_omp_build_assign (gsi
, fd
->loop
.n2
, t
);
5831 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5833 V3 = N31 + (T % count3) * STEP3;
5835 V2 = N21 + (T % count2) * STEP2;
5837 V1 = N11 + T * STEP1;
5838 if this loop doesn't have an inner loop construct combined with it.
5839 If it does have an inner loop construct combined with it and the
5840 iteration count isn't known constant, store values from counts array
5841 into its _looptemp_ temporaries instead. */
5844 expand_omp_for_init_vars (struct omp_for_data
*fd
, gimple_stmt_iterator
*gsi
,
5845 tree
*counts
, gimple inner_stmt
, tree startvar
)
5848 if (gimple_omp_for_combined_p (fd
->for_stmt
))
5850 /* If fd->loop.n2 is constant, then no propagation of the counts
5851 is needed, they are constant. */
5852 if (TREE_CODE (fd
->loop
.n2
) == INTEGER_CST
)
5855 tree clauses
= gimple_code (inner_stmt
) == GIMPLE_OMP_PARALLEL
5856 ? gimple_omp_parallel_clauses (inner_stmt
)
5857 : gimple_omp_for_clauses (inner_stmt
);
5858 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5859 isn't supposed to be handled, as the inner loop doesn't
5861 tree innerc
= find_omp_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
5862 gcc_assert (innerc
);
5863 for (i
= 0; i
< fd
->collapse
; i
++)
5865 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
5866 OMP_CLAUSE__LOOPTEMP_
);
5867 gcc_assert (innerc
);
5870 tree tem
= OMP_CLAUSE_DECL (innerc
);
5871 tree t
= fold_convert (TREE_TYPE (tem
), counts
[i
]);
5872 t
= force_gimple_operand_gsi (gsi
, t
, false, NULL_TREE
,
5873 false, GSI_CONTINUE_LINKING
);
5874 gassign
*stmt
= gimple_build_assign (tem
, t
);
5875 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
5881 tree type
= TREE_TYPE (fd
->loop
.v
);
5882 tree tem
= create_tmp_reg (type
, ".tem");
5883 gassign
*stmt
= gimple_build_assign (tem
, startvar
);
5884 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
5886 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
5888 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
, t
;
5890 if (POINTER_TYPE_P (vtype
))
5891 itype
= signed_type_for (vtype
);
5893 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
5896 t
= fold_convert (itype
, t
);
5897 t
= fold_build2 (MULT_EXPR
, itype
, t
,
5898 fold_convert (itype
, fd
->loops
[i
].step
));
5899 if (POINTER_TYPE_P (vtype
))
5900 t
= fold_build_pointer_plus (fd
->loops
[i
].n1
, t
);
5902 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
5903 t
= force_gimple_operand_gsi (gsi
, t
,
5904 DECL_P (fd
->loops
[i
].v
)
5905 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
5907 GSI_CONTINUE_LINKING
);
5908 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
5909 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
5912 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
5913 t
= force_gimple_operand_gsi (gsi
, t
, false, NULL_TREE
,
5914 false, GSI_CONTINUE_LINKING
);
5915 stmt
= gimple_build_assign (tem
, t
);
5916 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
5922 /* Helper function for expand_omp_for_*. Generate code like:
5925 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5929 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5936 extract_omp_for_update_vars (struct omp_for_data
*fd
, basic_block cont_bb
,
5937 basic_block body_bb
)
5939 basic_block last_bb
, bb
, collapse_bb
= NULL
;
5941 gimple_stmt_iterator gsi
;
5947 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
5949 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
5951 bb
= create_empty_bb (last_bb
);
5952 add_bb_to_loop (bb
, last_bb
->loop_father
);
5953 gsi
= gsi_start_bb (bb
);
5955 if (i
< fd
->collapse
- 1)
5957 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
5958 e
->probability
= REG_BR_PROB_BASE
/ 8;
5960 t
= fd
->loops
[i
+ 1].n1
;
5961 t
= force_gimple_operand_gsi (&gsi
, t
,
5962 DECL_P (fd
->loops
[i
+ 1].v
)
5963 && TREE_ADDRESSABLE (fd
->loops
[i
5966 GSI_CONTINUE_LINKING
);
5967 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
5968 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5973 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
5975 if (POINTER_TYPE_P (vtype
))
5976 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, fd
->loops
[i
].step
);
5978 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
, fd
->loops
[i
].step
);
5979 t
= force_gimple_operand_gsi (&gsi
, t
,
5980 DECL_P (fd
->loops
[i
].v
)
5981 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
5982 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
5983 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
5984 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5988 t
= fd
->loops
[i
].n2
;
5989 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5990 false, GSI_CONTINUE_LINKING
);
5991 tree v
= fd
->loops
[i
].v
;
5992 if (DECL_P (v
) && TREE_ADDRESSABLE (v
))
5993 v
= force_gimple_operand_gsi (&gsi
, v
, true, NULL_TREE
,
5994 false, GSI_CONTINUE_LINKING
);
5995 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
, v
, t
);
5996 stmt
= gimple_build_cond_empty (t
);
5997 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5998 e
= make_edge (bb
, body_bb
, EDGE_TRUE_VALUE
);
5999 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
6002 make_edge (bb
, body_bb
, EDGE_FALLTHRU
);
6010 /* A subroutine of expand_omp_for. Generate code for a parallel
6011 loop with any schedule. Given parameters:
6013 for (V = N1; V cond N2; V += STEP) BODY;
6015 where COND is "<" or ">", we generate pseudocode
6017 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
6018 if (more) goto L0; else goto L3;
6025 if (V cond iend) goto L1; else goto L2;
6027 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
6030 If this is a combined omp parallel loop, instead of the call to
6031 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
6032 If this is gimple_omp_for_combined_p loop, then instead of assigning
6033 V and iend in L0 we assign the first two _looptemp_ clause decls of the
6034 inner GIMPLE_OMP_FOR and V += STEP; and
6035 if (V cond iend) goto L1; else goto L2; are removed.
6037 For collapsed loops, given parameters:
6039 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6040 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6041 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6044 we generate pseudocode
6046 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
6051 count3 = (adj + N32 - N31) / STEP3;
6052 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
6057 count2 = (adj + N22 - N21) / STEP2;
6058 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
6063 count1 = (adj + N12 - N11) / STEP1;
6064 count = count1 * count2 * count3;
6069 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
6070 if (more) goto L0; else goto L3;
6074 V3 = N31 + (T % count3) * STEP3;
6076 V2 = N21 + (T % count2) * STEP2;
6078 V1 = N11 + T * STEP1;
6083 if (V < iend) goto L10; else goto L2;
6086 if (V3 cond3 N32) goto L1; else goto L11;
6090 if (V2 cond2 N22) goto L1; else goto L12;
6096 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
6102 expand_omp_for_generic (struct omp_region
*region
,
6103 struct omp_for_data
*fd
,
6104 enum built_in_function start_fn
,
6105 enum built_in_function next_fn
,
6108 tree type
, istart0
, iend0
, iend
;
6109 tree t
, vmain
, vback
, bias
= NULL_TREE
;
6110 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
6111 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
6112 gimple_stmt_iterator gsi
;
6113 gassign
*assign_stmt
;
6114 bool in_combined_parallel
= is_combined_parallel (region
);
6115 bool broken_loop
= region
->cont
== NULL
;
6117 tree
*counts
= NULL
;
6120 gcc_assert (!broken_loop
|| !in_combined_parallel
);
6121 gcc_assert (fd
->iter_type
== long_integer_type_node
6122 || !in_combined_parallel
);
6124 type
= TREE_TYPE (fd
->loop
.v
);
6125 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
6126 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
6127 TREE_ADDRESSABLE (istart0
) = 1;
6128 TREE_ADDRESSABLE (iend0
) = 1;
6130 /* See if we need to bias by LLONG_MIN. */
6131 if (fd
->iter_type
== long_long_unsigned_type_node
6132 && TREE_CODE (type
) == INTEGER_TYPE
6133 && !TYPE_UNSIGNED (type
))
6137 if (fd
->loop
.cond_code
== LT_EXPR
)
6140 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
6144 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
6147 if (TREE_CODE (n1
) != INTEGER_CST
6148 || TREE_CODE (n2
) != INTEGER_CST
6149 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
6150 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
6153 entry_bb
= region
->entry
;
6154 cont_bb
= region
->cont
;
6156 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
6157 gcc_assert (broken_loop
6158 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
6159 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
6160 l1_bb
= single_succ (l0_bb
);
6163 l2_bb
= create_empty_bb (cont_bb
);
6164 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
6165 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
6169 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
6170 exit_bb
= region
->exit
;
6172 gsi
= gsi_last_bb (entry_bb
);
6174 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
6175 if (fd
->collapse
> 1)
6177 int first_zero_iter
= -1;
6178 basic_block zero_iter_bb
= NULL
, l2_dom_bb
= NULL
;
6180 counts
= XALLOCAVEC (tree
, fd
->collapse
);
6181 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
6182 zero_iter_bb
, first_zero_iter
,
6187 /* Some counts[i] vars might be uninitialized if
6188 some loop has zero iterations. But the body shouldn't
6189 be executed in that case, so just avoid uninit warnings. */
6190 for (i
= first_zero_iter
; i
< fd
->collapse
; i
++)
6191 if (SSA_VAR_P (counts
[i
]))
6192 TREE_NO_WARNING (counts
[i
]) = 1;
6194 e
= split_block (entry_bb
, gsi_stmt (gsi
));
6196 make_edge (zero_iter_bb
, entry_bb
, EDGE_FALLTHRU
);
6197 gsi
= gsi_last_bb (entry_bb
);
6198 set_immediate_dominator (CDI_DOMINATORS
, entry_bb
,
6199 get_immediate_dominator (CDI_DOMINATORS
,
6203 if (in_combined_parallel
)
6205 /* In a combined parallel loop, emit a call to
6206 GOMP_loop_foo_next. */
6207 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
6208 build_fold_addr_expr (istart0
),
6209 build_fold_addr_expr (iend0
));
6213 tree t0
, t1
, t2
, t3
, t4
;
6214 /* If this is not a combined parallel loop, emit a call to
6215 GOMP_loop_foo_start in ENTRY_BB. */
6216 t4
= build_fold_addr_expr (iend0
);
6217 t3
= build_fold_addr_expr (istart0
);
6218 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
6221 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
6223 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
6224 OMP_CLAUSE__LOOPTEMP_
);
6225 gcc_assert (innerc
);
6226 t0
= OMP_CLAUSE_DECL (innerc
);
6227 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
6228 OMP_CLAUSE__LOOPTEMP_
);
6229 gcc_assert (innerc
);
6230 t1
= OMP_CLAUSE_DECL (innerc
);
6232 if (POINTER_TYPE_P (TREE_TYPE (t0
))
6233 && TYPE_PRECISION (TREE_TYPE (t0
))
6234 != TYPE_PRECISION (fd
->iter_type
))
6236 /* Avoid casting pointers to integer of a different size. */
6237 tree itype
= signed_type_for (type
);
6238 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, t1
));
6239 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, t0
));
6243 t1
= fold_convert (fd
->iter_type
, t1
);
6244 t0
= fold_convert (fd
->iter_type
, t0
);
6248 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
6249 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
6251 if (fd
->iter_type
== long_integer_type_node
)
6255 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
6256 t
= build_call_expr (builtin_decl_explicit (start_fn
),
6257 6, t0
, t1
, t2
, t
, t3
, t4
);
6260 t
= build_call_expr (builtin_decl_explicit (start_fn
),
6261 5, t0
, t1
, t2
, t3
, t4
);
6269 /* The GOMP_loop_ull_*start functions have additional boolean
6270 argument, true for < loops and false for > loops.
6271 In Fortran, the C bool type can be different from
6272 boolean_type_node. */
6273 bfn_decl
= builtin_decl_explicit (start_fn
);
6274 c_bool_type
= TREE_TYPE (TREE_TYPE (bfn_decl
));
6275 t5
= build_int_cst (c_bool_type
,
6276 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
6279 tree bfn_decl
= builtin_decl_explicit (start_fn
);
6280 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
6281 t
= build_call_expr (bfn_decl
, 7, t5
, t0
, t1
, t2
, t
, t3
, t4
);
6284 t
= build_call_expr (builtin_decl_explicit (start_fn
),
6285 6, t5
, t0
, t1
, t2
, t3
, t4
);
6288 if (TREE_TYPE (t
) != boolean_type_node
)
6289 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
6290 t
, build_int_cst (TREE_TYPE (t
), 0));
6291 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6292 true, GSI_SAME_STMT
);
6293 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
6295 /* Remove the GIMPLE_OMP_FOR statement. */
6296 gsi_remove (&gsi
, true);
6298 /* Iteration setup for sequential loop goes in L0_BB. */
6299 tree startvar
= fd
->loop
.v
;
6300 tree endvar
= NULL_TREE
;
6302 if (gimple_omp_for_combined_p (fd
->for_stmt
))
6304 gcc_assert (gimple_code (inner_stmt
) == GIMPLE_OMP_FOR
6305 && gimple_omp_for_kind (inner_stmt
)
6306 == GF_OMP_FOR_KIND_SIMD
);
6307 tree innerc
= find_omp_clause (gimple_omp_for_clauses (inner_stmt
),
6308 OMP_CLAUSE__LOOPTEMP_
);
6309 gcc_assert (innerc
);
6310 startvar
= OMP_CLAUSE_DECL (innerc
);
6311 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
6312 OMP_CLAUSE__LOOPTEMP_
);
6313 gcc_assert (innerc
);
6314 endvar
= OMP_CLAUSE_DECL (innerc
);
6317 gsi
= gsi_start_bb (l0_bb
);
6320 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
6321 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
6322 t
= fold_convert (signed_type_for (TREE_TYPE (startvar
)), t
);
6323 t
= fold_convert (TREE_TYPE (startvar
), t
);
6324 t
= force_gimple_operand_gsi (&gsi
, t
,
6326 && TREE_ADDRESSABLE (startvar
),
6327 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
6328 assign_stmt
= gimple_build_assign (startvar
, t
);
6329 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
6333 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
6334 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
6335 t
= fold_convert (signed_type_for (TREE_TYPE (startvar
)), t
);
6336 t
= fold_convert (TREE_TYPE (startvar
), t
);
6337 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6338 false, GSI_CONTINUE_LINKING
);
6341 assign_stmt
= gimple_build_assign (endvar
, iend
);
6342 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
6343 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (iend
)))
6344 assign_stmt
= gimple_build_assign (fd
->loop
.v
, iend
);
6346 assign_stmt
= gimple_build_assign (fd
->loop
.v
, NOP_EXPR
, iend
);
6347 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
6349 if (fd
->collapse
> 1)
6350 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
6354 /* Code to control the increment and predicate for the sequential
6355 loop goes in the CONT_BB. */
6356 gsi
= gsi_last_bb (cont_bb
);
6357 gomp_continue
*cont_stmt
= as_a
<gomp_continue
*> (gsi_stmt (gsi
));
6358 gcc_assert (gimple_code (cont_stmt
) == GIMPLE_OMP_CONTINUE
);
6359 vmain
= gimple_omp_continue_control_use (cont_stmt
);
6360 vback
= gimple_omp_continue_control_def (cont_stmt
);
6362 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
6364 if (POINTER_TYPE_P (type
))
6365 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
6367 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
6368 t
= force_gimple_operand_gsi (&gsi
, t
,
6370 && TREE_ADDRESSABLE (vback
),
6371 NULL_TREE
, true, GSI_SAME_STMT
);
6372 assign_stmt
= gimple_build_assign (vback
, t
);
6373 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
6375 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
6376 DECL_P (vback
) && TREE_ADDRESSABLE (vback
) ? t
: vback
,
6378 gcond
*cond_stmt
= gimple_build_cond_empty (t
);
6379 gsi_insert_before (&gsi
, cond_stmt
, GSI_SAME_STMT
);
6382 /* Remove GIMPLE_OMP_CONTINUE. */
6383 gsi_remove (&gsi
, true);
6385 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
6386 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, l1_bb
);
6388 /* Emit code to get the next parallel iteration in L2_BB. */
6389 gsi
= gsi_start_bb (l2_bb
);
6391 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
6392 build_fold_addr_expr (istart0
),
6393 build_fold_addr_expr (iend0
));
6394 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6395 false, GSI_CONTINUE_LINKING
);
6396 if (TREE_TYPE (t
) != boolean_type_node
)
6397 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
6398 t
, build_int_cst (TREE_TYPE (t
), 0));
6399 gcond
*cond_stmt
= gimple_build_cond_empty (t
);
6400 gsi_insert_after (&gsi
, cond_stmt
, GSI_CONTINUE_LINKING
);
6403 /* Add the loop cleanup function. */
6404 gsi
= gsi_last_bb (exit_bb
);
6405 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
6406 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT
);
6407 else if (gimple_omp_return_lhs (gsi_stmt (gsi
)))
6408 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL
);
6410 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END
);
6411 gcall
*call_stmt
= gimple_build_call (t
, 0);
6412 if (gimple_omp_return_lhs (gsi_stmt (gsi
)))
6413 gimple_call_set_lhs (call_stmt
, gimple_omp_return_lhs (gsi_stmt (gsi
)));
6414 gsi_insert_after (&gsi
, call_stmt
, GSI_SAME_STMT
);
6415 gsi_remove (&gsi
, true);
6417 /* Connect the new blocks. */
6418 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
6419 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
6425 e
= find_edge (cont_bb
, l3_bb
);
6426 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
6428 phis
= phi_nodes (l3_bb
);
6429 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6431 gimple phi
= gsi_stmt (gsi
);
6432 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
6433 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
6437 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
6438 add_bb_to_loop (l2_bb
, cont_bb
->loop_father
);
6439 e
= find_edge (cont_bb
, l1_bb
);
6440 if (gimple_omp_for_combined_p (fd
->for_stmt
))
6445 else if (fd
->collapse
> 1)
6448 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
6451 e
->flags
= EDGE_TRUE_VALUE
;
6454 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
6455 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
6459 e
= find_edge (cont_bb
, l2_bb
);
6460 e
->flags
= EDGE_FALLTHRU
;
6462 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
6464 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
6465 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
6466 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
6467 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
6468 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
6469 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
6470 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
6471 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
6473 struct loop
*outer_loop
= alloc_loop ();
6474 outer_loop
->header
= l0_bb
;
6475 outer_loop
->latch
= l2_bb
;
6476 add_loop (outer_loop
, l0_bb
->loop_father
);
6478 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
6480 struct loop
*loop
= alloc_loop ();
6481 loop
->header
= l1_bb
;
6482 /* The loop may have multiple latches. */
6483 add_loop (loop
, outer_loop
);
6489 /* A subroutine of expand_omp_for. Generate code for a parallel
6490 loop with static schedule and no specified chunk size. Given
6493 for (V = N1; V cond N2; V += STEP) BODY;
6495 where COND is "<" or ">", we generate pseudocode
6497 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6502 if ((__typeof (V)) -1 > 0 && cond is >)
6503 n = -(adj + N2 - N1) / -STEP;
6505 n = (adj + N2 - N1) / STEP;
6508 if (threadid < tt) goto L3; else goto L4;
6513 s0 = q * threadid + tt;
6516 if (s0 >= e0) goto L2; else goto L0;
6522 if (V cond e) goto L1;
6527 expand_omp_for_static_nochunk (struct omp_region
*region
,
6528 struct omp_for_data
*fd
,
6531 tree n
, q
, s0
, e0
, e
, t
, tt
, nthreads
, threadid
;
6532 tree type
, itype
, vmain
, vback
;
6533 basic_block entry_bb
, second_bb
, third_bb
, exit_bb
, seq_start_bb
;
6534 basic_block body_bb
, cont_bb
, collapse_bb
= NULL
;
6536 gimple_stmt_iterator gsi
;
6538 bool broken_loop
= region
->cont
== NULL
;
6539 tree
*counts
= NULL
;
6542 gcc_checking_assert ((gimple_omp_for_kind (fd
->for_stmt
)
6543 != GF_OMP_FOR_KIND_OACC_LOOP
)
6546 itype
= type
= TREE_TYPE (fd
->loop
.v
);
6547 if (POINTER_TYPE_P (type
))
6548 itype
= signed_type_for (type
);
6550 entry_bb
= region
->entry
;
6551 cont_bb
= region
->cont
;
6552 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
6553 fin_bb
= BRANCH_EDGE (entry_bb
)->dest
;
6554 gcc_assert (broken_loop
6555 || (fin_bb
== FALLTHRU_EDGE (cont_bb
)->dest
));
6556 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
6557 body_bb
= single_succ (seq_start_bb
);
6560 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
6561 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
6563 exit_bb
= region
->exit
;
6565 /* Iteration space partitioning goes in ENTRY_BB. */
6566 gsi
= gsi_last_bb (entry_bb
);
6567 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
6569 if (fd
->collapse
> 1)
6571 int first_zero_iter
= -1;
6572 basic_block l2_dom_bb
= NULL
;
6574 counts
= XALLOCAVEC (tree
, fd
->collapse
);
6575 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
6576 fin_bb
, first_zero_iter
,
6580 else if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
6581 t
= integer_one_node
;
6583 t
= fold_binary (fd
->loop
.cond_code
, boolean_type_node
,
6584 fold_convert (type
, fd
->loop
.n1
),
6585 fold_convert (type
, fd
->loop
.n2
));
6586 if (fd
->collapse
== 1
6587 && TYPE_UNSIGNED (type
)
6588 && (t
== NULL_TREE
|| !integer_onep (t
)))
6590 n1
= fold_convert (type
, unshare_expr (fd
->loop
.n1
));
6591 n1
= force_gimple_operand_gsi (&gsi
, n1
, true, NULL_TREE
,
6592 true, GSI_SAME_STMT
);
6593 n2
= fold_convert (type
, unshare_expr (fd
->loop
.n2
));
6594 n2
= force_gimple_operand_gsi (&gsi
, n2
, true, NULL_TREE
,
6595 true, GSI_SAME_STMT
);
6596 gcond
*cond_stmt
= gimple_build_cond (fd
->loop
.cond_code
, n1
, n2
,
6597 NULL_TREE
, NULL_TREE
);
6598 gsi_insert_before (&gsi
, cond_stmt
, GSI_SAME_STMT
);
6599 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt
),
6600 expand_omp_regimplify_p
, NULL
, NULL
)
6601 || walk_tree (gimple_cond_rhs_ptr (cond_stmt
),
6602 expand_omp_regimplify_p
, NULL
, NULL
))
6604 gsi
= gsi_for_stmt (cond_stmt
);
6605 gimple_regimplify_operands (cond_stmt
, &gsi
);
6607 ep
= split_block (entry_bb
, cond_stmt
);
6608 ep
->flags
= EDGE_TRUE_VALUE
;
6609 entry_bb
= ep
->dest
;
6610 ep
->probability
= REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
/ 2000 - 1);
6611 ep
= make_edge (ep
->src
, fin_bb
, EDGE_FALSE_VALUE
);
6612 ep
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
6613 if (gimple_in_ssa_p (cfun
))
6615 int dest_idx
= find_edge (entry_bb
, fin_bb
)->dest_idx
;
6616 for (gphi_iterator gpi
= gsi_start_phis (fin_bb
);
6617 !gsi_end_p (gpi
); gsi_next (&gpi
))
6619 gphi
*phi
= gpi
.phi ();
6620 add_phi_arg (phi
, gimple_phi_arg_def (phi
, dest_idx
),
6621 ep
, UNKNOWN_LOCATION
);
6624 gsi
= gsi_last_bb (entry_bb
);
6627 switch (gimple_omp_for_kind (fd
->for_stmt
))
6629 case GF_OMP_FOR_KIND_FOR
:
6630 nthreads
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
6631 threadid
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
6633 case GF_OMP_FOR_KIND_DISTRIBUTE
:
6634 nthreads
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS
);
6635 threadid
= builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM
);
6637 case GF_OMP_FOR_KIND_OACC_LOOP
:
6638 nthreads
= builtin_decl_explicit (BUILT_IN_GOACC_GET_NUM_THREADS
);
6639 threadid
= builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM
);
6644 nthreads
= build_call_expr (nthreads
, 0);
6645 nthreads
= fold_convert (itype
, nthreads
);
6646 nthreads
= force_gimple_operand_gsi (&gsi
, nthreads
, true, NULL_TREE
,
6647 true, GSI_SAME_STMT
);
6648 threadid
= build_call_expr (threadid
, 0);
6649 threadid
= fold_convert (itype
, threadid
);
6650 threadid
= force_gimple_operand_gsi (&gsi
, threadid
, true, NULL_TREE
,
6651 true, GSI_SAME_STMT
);
6655 step
= fd
->loop
.step
;
6656 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
6658 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
6659 OMP_CLAUSE__LOOPTEMP_
);
6660 gcc_assert (innerc
);
6661 n1
= OMP_CLAUSE_DECL (innerc
);
6662 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
6663 OMP_CLAUSE__LOOPTEMP_
);
6664 gcc_assert (innerc
);
6665 n2
= OMP_CLAUSE_DECL (innerc
);
6667 n1
= force_gimple_operand_gsi (&gsi
, fold_convert (type
, n1
),
6668 true, NULL_TREE
, true, GSI_SAME_STMT
);
6669 n2
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, n2
),
6670 true, NULL_TREE
, true, GSI_SAME_STMT
);
6671 step
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, step
),
6672 true, NULL_TREE
, true, GSI_SAME_STMT
);
6674 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
6675 t
= fold_build2 (PLUS_EXPR
, itype
, step
, t
);
6676 t
= fold_build2 (PLUS_EXPR
, itype
, t
, n2
);
6677 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, n1
));
6678 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
6679 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
6680 fold_build1 (NEGATE_EXPR
, itype
, t
),
6681 fold_build1 (NEGATE_EXPR
, itype
, step
));
6683 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, step
);
6684 t
= fold_convert (itype
, t
);
6685 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6687 q
= create_tmp_reg (itype
, "q");
6688 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
6689 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
6690 gsi_insert_before (&gsi
, gimple_build_assign (q
, t
), GSI_SAME_STMT
);
6692 tt
= create_tmp_reg (itype
, "tt");
6693 t
= fold_build2 (TRUNC_MOD_EXPR
, itype
, n
, nthreads
);
6694 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
6695 gsi_insert_before (&gsi
, gimple_build_assign (tt
, t
), GSI_SAME_STMT
);
6697 t
= build2 (LT_EXPR
, boolean_type_node
, threadid
, tt
);
6698 gcond
*cond_stmt
= gimple_build_cond_empty (t
);
6699 gsi_insert_before (&gsi
, cond_stmt
, GSI_SAME_STMT
);
6701 second_bb
= split_block (entry_bb
, cond_stmt
)->dest
;
6702 gsi
= gsi_last_bb (second_bb
);
6703 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
6705 gsi_insert_before (&gsi
, gimple_build_assign (tt
, build_int_cst (itype
, 0)),
6707 gassign
*assign_stmt
6708 = gimple_build_assign (q
, PLUS_EXPR
, q
, build_int_cst (itype
, 1));
6709 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
6711 third_bb
= split_block (second_bb
, assign_stmt
)->dest
;
6712 gsi
= gsi_last_bb (third_bb
);
6713 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
6715 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
6716 t
= build2 (PLUS_EXPR
, itype
, t
, tt
);
6717 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6719 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
6720 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6722 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
6723 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
6725 /* Remove the GIMPLE_OMP_FOR statement. */
6726 gsi_remove (&gsi
, true);
6728 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6729 gsi
= gsi_start_bb (seq_start_bb
);
6731 tree startvar
= fd
->loop
.v
;
6732 tree endvar
= NULL_TREE
;
6734 if (gimple_omp_for_combined_p (fd
->for_stmt
))
6736 tree clauses
= gimple_code (inner_stmt
) == GIMPLE_OMP_PARALLEL
6737 ? gimple_omp_parallel_clauses (inner_stmt
)
6738 : gimple_omp_for_clauses (inner_stmt
);
6739 tree innerc
= find_omp_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
6740 gcc_assert (innerc
);
6741 startvar
= OMP_CLAUSE_DECL (innerc
);
6742 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
6743 OMP_CLAUSE__LOOPTEMP_
);
6744 gcc_assert (innerc
);
6745 endvar
= OMP_CLAUSE_DECL (innerc
);
6747 t
= fold_convert (itype
, s0
);
6748 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
6749 if (POINTER_TYPE_P (type
))
6750 t
= fold_build_pointer_plus (n1
, t
);
6752 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
6753 t
= fold_convert (TREE_TYPE (startvar
), t
);
6754 t
= force_gimple_operand_gsi (&gsi
, t
,
6756 && TREE_ADDRESSABLE (startvar
),
6757 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
6758 assign_stmt
= gimple_build_assign (startvar
, t
);
6759 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
6761 t
= fold_convert (itype
, e0
);
6762 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
6763 if (POINTER_TYPE_P (type
))
6764 t
= fold_build_pointer_plus (n1
, t
);
6766 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
6767 t
= fold_convert (TREE_TYPE (startvar
), t
);
6768 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6769 false, GSI_CONTINUE_LINKING
);
6772 assign_stmt
= gimple_build_assign (endvar
, e
);
6773 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
6774 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (e
)))
6775 assign_stmt
= gimple_build_assign (fd
->loop
.v
, e
);
6777 assign_stmt
= gimple_build_assign (fd
->loop
.v
, NOP_EXPR
, e
);
6778 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
6780 if (fd
->collapse
> 1)
6781 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
6785 /* The code controlling the sequential loop replaces the
6786 GIMPLE_OMP_CONTINUE. */
6787 gsi
= gsi_last_bb (cont_bb
);
6788 gomp_continue
*cont_stmt
= as_a
<gomp_continue
*> (gsi_stmt (gsi
));
6789 gcc_assert (gimple_code (cont_stmt
) == GIMPLE_OMP_CONTINUE
);
6790 vmain
= gimple_omp_continue_control_use (cont_stmt
);
6791 vback
= gimple_omp_continue_control_def (cont_stmt
);
6793 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
6795 if (POINTER_TYPE_P (type
))
6796 t
= fold_build_pointer_plus (vmain
, step
);
6798 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, step
);
6799 t
= force_gimple_operand_gsi (&gsi
, t
,
6801 && TREE_ADDRESSABLE (vback
),
6802 NULL_TREE
, true, GSI_SAME_STMT
);
6803 assign_stmt
= gimple_build_assign (vback
, t
);
6804 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
6806 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
6807 DECL_P (vback
) && TREE_ADDRESSABLE (vback
)
6809 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
6812 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6813 gsi_remove (&gsi
, true);
6815 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
6816 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, body_bb
);
6819 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6820 gsi
= gsi_last_bb (exit_bb
);
6821 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
6823 t
= gimple_omp_return_lhs (gsi_stmt (gsi
));
6824 if (gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_OACC_LOOP
)
6825 gcc_checking_assert (t
== NULL_TREE
);
6827 gsi_insert_after (&gsi
, build_omp_barrier (t
), GSI_SAME_STMT
);
6829 gsi_remove (&gsi
, true);
6831 /* Connect all the blocks. */
6832 ep
= make_edge (entry_bb
, third_bb
, EDGE_FALSE_VALUE
);
6833 ep
->probability
= REG_BR_PROB_BASE
/ 4 * 3;
6834 ep
= find_edge (entry_bb
, second_bb
);
6835 ep
->flags
= EDGE_TRUE_VALUE
;
6836 ep
->probability
= REG_BR_PROB_BASE
/ 4;
6837 find_edge (third_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
6838 find_edge (third_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
6842 ep
= find_edge (cont_bb
, body_bb
);
6843 if (gimple_omp_for_combined_p (fd
->for_stmt
))
6848 else if (fd
->collapse
> 1)
6851 ep
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
6854 ep
->flags
= EDGE_TRUE_VALUE
;
6855 find_edge (cont_bb
, fin_bb
)->flags
6856 = ep
? EDGE_FALSE_VALUE
: EDGE_FALLTHRU
;
6859 set_immediate_dominator (CDI_DOMINATORS
, second_bb
, entry_bb
);
6860 set_immediate_dominator (CDI_DOMINATORS
, third_bb
, entry_bb
);
6861 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, third_bb
);
6863 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
6864 recompute_dominator (CDI_DOMINATORS
, body_bb
));
6865 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
6866 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
6868 if (!broken_loop
&& !gimple_omp_for_combined_p (fd
->for_stmt
))
6870 struct loop
*loop
= alloc_loop ();
6871 loop
->header
= body_bb
;
6872 if (collapse_bb
== NULL
)
6873 loop
->latch
= cont_bb
;
6874 add_loop (loop
, body_bb
->loop_father
);
6879 /* A subroutine of expand_omp_for. Generate code for a parallel
6880 loop with static schedule and a specified chunk size. Given
6883 for (V = N1; V cond N2; V += STEP) BODY;
6885 where COND is "<" or ">", we generate pseudocode
6887 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6892 if ((__typeof (V)) -1 > 0 && cond is >)
6893 n = -(adj + N2 - N1) / -STEP;
6895 n = (adj + N2 - N1) / STEP;
6897 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6898 here so that V is defined
6899 if the loop is not entered
6901 s0 = (trip * nthreads + threadid) * CHUNK;
6902 e0 = min(s0 + CHUNK, n);
6903 if (s0 < n) goto L1; else goto L4;
6910 if (V cond e) goto L2; else goto L3;
6918 expand_omp_for_static_chunk (struct omp_region
*region
,
6919 struct omp_for_data
*fd
, gimple inner_stmt
)
6921 tree n
, s0
, e0
, e
, t
;
6922 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
6923 tree type
, itype
, vmain
, vback
, vextra
;
6924 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
6925 basic_block trip_update_bb
= NULL
, cont_bb
, collapse_bb
= NULL
, fin_bb
;
6926 gimple_stmt_iterator gsi
;
6928 bool broken_loop
= region
->cont
== NULL
;
6929 tree
*counts
= NULL
;
6932 gcc_checking_assert ((gimple_omp_for_kind (fd
->for_stmt
)
6933 != GF_OMP_FOR_KIND_OACC_LOOP
)
6936 itype
= type
= TREE_TYPE (fd
->loop
.v
);
6937 if (POINTER_TYPE_P (type
))
6938 itype
= signed_type_for (type
);
6940 entry_bb
= region
->entry
;
6941 se
= split_block (entry_bb
, last_stmt (entry_bb
));
6943 iter_part_bb
= se
->dest
;
6944 cont_bb
= region
->cont
;
6945 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
6946 fin_bb
= BRANCH_EDGE (iter_part_bb
)->dest
;
6947 gcc_assert (broken_loop
6948 || fin_bb
== FALLTHRU_EDGE (cont_bb
)->dest
);
6949 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
6950 body_bb
= single_succ (seq_start_bb
);
6953 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
6954 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
6955 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
6957 exit_bb
= region
->exit
;
6959 /* Trip and adjustment setup goes in ENTRY_BB. */
6960 gsi
= gsi_last_bb (entry_bb
);
6961 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
6963 if (fd
->collapse
> 1)
6965 int first_zero_iter
= -1;
6966 basic_block l2_dom_bb
= NULL
;
6968 counts
= XALLOCAVEC (tree
, fd
->collapse
);
6969 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
6970 fin_bb
, first_zero_iter
,
6974 else if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
6975 t
= integer_one_node
;
6977 t
= fold_binary (fd
->loop
.cond_code
, boolean_type_node
,
6978 fold_convert (type
, fd
->loop
.n1
),
6979 fold_convert (type
, fd
->loop
.n2
));
6980 if (fd
->collapse
== 1
6981 && TYPE_UNSIGNED (type
)
6982 && (t
== NULL_TREE
|| !integer_onep (t
)))
6984 n1
= fold_convert (type
, unshare_expr (fd
->loop
.n1
));
6985 n1
= force_gimple_operand_gsi (&gsi
, n1
, true, NULL_TREE
,
6986 true, GSI_SAME_STMT
);
6987 n2
= fold_convert (type
, unshare_expr (fd
->loop
.n2
));
6988 n2
= force_gimple_operand_gsi (&gsi
, n2
, true, NULL_TREE
,
6989 true, GSI_SAME_STMT
);
6990 gcond
*cond_stmt
= gimple_build_cond (fd
->loop
.cond_code
, n1
, n2
,
6991 NULL_TREE
, NULL_TREE
);
6992 gsi_insert_before (&gsi
, cond_stmt
, GSI_SAME_STMT
);
6993 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt
),
6994 expand_omp_regimplify_p
, NULL
, NULL
)
6995 || walk_tree (gimple_cond_rhs_ptr (cond_stmt
),
6996 expand_omp_regimplify_p
, NULL
, NULL
))
6998 gsi
= gsi_for_stmt (cond_stmt
);
6999 gimple_regimplify_operands (cond_stmt
, &gsi
);
7001 se
= split_block (entry_bb
, cond_stmt
);
7002 se
->flags
= EDGE_TRUE_VALUE
;
7003 entry_bb
= se
->dest
;
7004 se
->probability
= REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
/ 2000 - 1);
7005 se
= make_edge (se
->src
, fin_bb
, EDGE_FALSE_VALUE
);
7006 se
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
7007 if (gimple_in_ssa_p (cfun
))
7009 int dest_idx
= find_edge (entry_bb
, fin_bb
)->dest_idx
;
7010 for (gphi_iterator gpi
= gsi_start_phis (fin_bb
);
7011 !gsi_end_p (gpi
); gsi_next (&gpi
))
7013 gphi
*phi
= gpi
.phi ();
7014 add_phi_arg (phi
, gimple_phi_arg_def (phi
, dest_idx
),
7015 se
, UNKNOWN_LOCATION
);
7018 gsi
= gsi_last_bb (entry_bb
);
7021 switch (gimple_omp_for_kind (fd
->for_stmt
))
7023 case GF_OMP_FOR_KIND_FOR
:
7024 nthreads
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
7025 threadid
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
7027 case GF_OMP_FOR_KIND_DISTRIBUTE
:
7028 nthreads
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS
);
7029 threadid
= builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM
);
7031 case GF_OMP_FOR_KIND_OACC_LOOP
:
7032 nthreads
= builtin_decl_explicit (BUILT_IN_GOACC_GET_NUM_THREADS
);
7033 threadid
= builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM
);
7038 nthreads
= build_call_expr (nthreads
, 0);
7039 nthreads
= fold_convert (itype
, nthreads
);
7040 nthreads
= force_gimple_operand_gsi (&gsi
, nthreads
, true, NULL_TREE
,
7041 true, GSI_SAME_STMT
);
7042 threadid
= build_call_expr (threadid
, 0);
7043 threadid
= fold_convert (itype
, threadid
);
7044 threadid
= force_gimple_operand_gsi (&gsi
, threadid
, true, NULL_TREE
,
7045 true, GSI_SAME_STMT
);
7049 step
= fd
->loop
.step
;
7050 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
7052 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
7053 OMP_CLAUSE__LOOPTEMP_
);
7054 gcc_assert (innerc
);
7055 n1
= OMP_CLAUSE_DECL (innerc
);
7056 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
7057 OMP_CLAUSE__LOOPTEMP_
);
7058 gcc_assert (innerc
);
7059 n2
= OMP_CLAUSE_DECL (innerc
);
7061 n1
= force_gimple_operand_gsi (&gsi
, fold_convert (type
, n1
),
7062 true, NULL_TREE
, true, GSI_SAME_STMT
);
7063 n2
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, n2
),
7064 true, NULL_TREE
, true, GSI_SAME_STMT
);
7065 step
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, step
),
7066 true, NULL_TREE
, true, GSI_SAME_STMT
);
7068 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->chunk_size
),
7069 true, NULL_TREE
, true, GSI_SAME_STMT
);
7071 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
7072 t
= fold_build2 (PLUS_EXPR
, itype
, step
, t
);
7073 t
= fold_build2 (PLUS_EXPR
, itype
, t
, n2
);
7074 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, n1
));
7075 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
7076 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
7077 fold_build1 (NEGATE_EXPR
, itype
, t
),
7078 fold_build1 (NEGATE_EXPR
, itype
, step
));
7080 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, step
);
7081 t
= fold_convert (itype
, t
);
7082 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
7083 true, GSI_SAME_STMT
);
7085 trip_var
= create_tmp_reg (itype
, ".trip");
7086 if (gimple_in_ssa_p (cfun
))
7088 trip_init
= make_ssa_name (trip_var
);
7089 trip_main
= make_ssa_name (trip_var
);
7090 trip_back
= make_ssa_name (trip_var
);
7094 trip_init
= trip_var
;
7095 trip_main
= trip_var
;
7096 trip_back
= trip_var
;
7099 gassign
*assign_stmt
7100 = gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
7101 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
7103 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
7104 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
7105 if (POINTER_TYPE_P (type
))
7106 t
= fold_build_pointer_plus (n1
, t
);
7108 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
7109 vextra
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
7110 true, GSI_SAME_STMT
);
7112 /* Remove the GIMPLE_OMP_FOR. */
7113 gsi_remove (&gsi
, true);
7115 /* Iteration space partitioning goes in ITER_PART_BB. */
7116 gsi
= gsi_last_bb (iter_part_bb
);
7118 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
7119 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
7120 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
7121 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
7122 false, GSI_CONTINUE_LINKING
);
7124 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
7125 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
7126 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
7127 false, GSI_CONTINUE_LINKING
);
7129 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
7130 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
7132 /* Setup code for sequential iteration goes in SEQ_START_BB. */
7133 gsi
= gsi_start_bb (seq_start_bb
);
7135 tree startvar
= fd
->loop
.v
;
7136 tree endvar
= NULL_TREE
;
7138 if (gimple_omp_for_combined_p (fd
->for_stmt
))
7140 tree clauses
= gimple_code (inner_stmt
) == GIMPLE_OMP_PARALLEL
7141 ? gimple_omp_parallel_clauses (inner_stmt
)
7142 : gimple_omp_for_clauses (inner_stmt
);
7143 tree innerc
= find_omp_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
7144 gcc_assert (innerc
);
7145 startvar
= OMP_CLAUSE_DECL (innerc
);
7146 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
7147 OMP_CLAUSE__LOOPTEMP_
);
7148 gcc_assert (innerc
);
7149 endvar
= OMP_CLAUSE_DECL (innerc
);
7152 t
= fold_convert (itype
, s0
);
7153 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
7154 if (POINTER_TYPE_P (type
))
7155 t
= fold_build_pointer_plus (n1
, t
);
7157 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
7158 t
= fold_convert (TREE_TYPE (startvar
), t
);
7159 t
= force_gimple_operand_gsi (&gsi
, t
,
7161 && TREE_ADDRESSABLE (startvar
),
7162 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
7163 assign_stmt
= gimple_build_assign (startvar
, t
);
7164 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
7166 t
= fold_convert (itype
, e0
);
7167 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
7168 if (POINTER_TYPE_P (type
))
7169 t
= fold_build_pointer_plus (n1
, t
);
7171 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
7172 t
= fold_convert (TREE_TYPE (startvar
), t
);
7173 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
7174 false, GSI_CONTINUE_LINKING
);
7177 assign_stmt
= gimple_build_assign (endvar
, e
);
7178 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
7179 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (e
)))
7180 assign_stmt
= gimple_build_assign (fd
->loop
.v
, e
);
7182 assign_stmt
= gimple_build_assign (fd
->loop
.v
, NOP_EXPR
, e
);
7183 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
7185 if (fd
->collapse
> 1)
7186 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
7190 /* The code controlling the sequential loop goes in CONT_BB,
7191 replacing the GIMPLE_OMP_CONTINUE. */
7192 gsi
= gsi_last_bb (cont_bb
);
7193 gomp_continue
*cont_stmt
= as_a
<gomp_continue
*> (gsi_stmt (gsi
));
7194 vmain
= gimple_omp_continue_control_use (cont_stmt
);
7195 vback
= gimple_omp_continue_control_def (cont_stmt
);
7197 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
7199 if (POINTER_TYPE_P (type
))
7200 t
= fold_build_pointer_plus (vmain
, step
);
7202 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, step
);
7203 if (DECL_P (vback
) && TREE_ADDRESSABLE (vback
))
7204 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
7205 true, GSI_SAME_STMT
);
7206 assign_stmt
= gimple_build_assign (vback
, t
);
7207 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
7209 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
7210 DECL_P (vback
) && TREE_ADDRESSABLE (vback
)
7212 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
7215 /* Remove GIMPLE_OMP_CONTINUE. */
7216 gsi_remove (&gsi
, true);
7218 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
7219 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, body_bb
);
7221 /* Trip update code goes into TRIP_UPDATE_BB. */
7222 gsi
= gsi_start_bb (trip_update_bb
);
7224 t
= build_int_cst (itype
, 1);
7225 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
7226 assign_stmt
= gimple_build_assign (trip_back
, t
);
7227 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
7230 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
7231 gsi
= gsi_last_bb (exit_bb
);
7232 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
7234 t
= gimple_omp_return_lhs (gsi_stmt (gsi
));
7235 if (gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_OACC_LOOP
)
7236 gcc_checking_assert (t
== NULL_TREE
);
7238 gsi_insert_after (&gsi
, build_omp_barrier (t
), GSI_SAME_STMT
);
7240 gsi_remove (&gsi
, true);
7242 /* Connect the new blocks. */
7243 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
7244 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
7248 se
= find_edge (cont_bb
, body_bb
);
7249 if (gimple_omp_for_combined_p (fd
->for_stmt
))
7254 else if (fd
->collapse
> 1)
7257 se
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
7260 se
->flags
= EDGE_TRUE_VALUE
;
7261 find_edge (cont_bb
, trip_update_bb
)->flags
7262 = se
? EDGE_FALSE_VALUE
: EDGE_FALLTHRU
;
7264 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
7267 if (gimple_in_ssa_p (cfun
))
7275 gcc_assert (fd
->collapse
== 1 && !broken_loop
);
7277 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
7278 remove arguments of the phi nodes in fin_bb. We need to create
7279 appropriate phi nodes in iter_part_bb instead. */
7280 se
= single_pred_edge (fin_bb
);
7281 re
= single_succ_edge (trip_update_bb
);
7282 vec
<edge_var_map
> *head
= redirect_edge_var_map_vector (re
);
7283 ene
= single_succ_edge (entry_bb
);
7285 psi
= gsi_start_phis (fin_bb
);
7286 for (i
= 0; !gsi_end_p (psi
) && head
->iterate (i
, &vm
);
7287 gsi_next (&psi
), ++i
)
7290 source_location locus
;
7293 t
= gimple_phi_result (phi
);
7294 gcc_assert (t
== redirect_edge_var_map_result (vm
));
7295 nphi
= create_phi_node (t
, iter_part_bb
);
7297 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
7298 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
7300 /* A special case -- fd->loop.v is not yet computed in
7301 iter_part_bb, we need to use vextra instead. */
7302 if (t
== fd
->loop
.v
)
7304 add_phi_arg (nphi
, t
, ene
, locus
);
7305 locus
= redirect_edge_var_map_location (vm
);
7306 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
7308 gcc_assert (gsi_end_p (psi
) && i
== head
->length ());
7309 redirect_edge_var_map_clear (re
);
7312 psi
= gsi_start_phis (fin_bb
);
7313 if (gsi_end_p (psi
))
7315 remove_phi_node (&psi
, false);
7318 /* Make phi node for trip. */
7319 phi
= create_phi_node (trip_main
, iter_part_bb
);
7320 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
7322 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
7327 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
7328 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
7329 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
7330 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
7331 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
7332 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
7333 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
7334 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
7335 recompute_dominator (CDI_DOMINATORS
, body_bb
));
7339 struct loop
*trip_loop
= alloc_loop ();
7340 trip_loop
->header
= iter_part_bb
;
7341 trip_loop
->latch
= trip_update_bb
;
7342 add_loop (trip_loop
, iter_part_bb
->loop_father
);
7344 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
7346 struct loop
*loop
= alloc_loop ();
7347 loop
->header
= body_bb
;
7348 if (collapse_bb
== NULL
)
7349 loop
->latch
= cont_bb
;
7350 add_loop (loop
, trip_loop
);
7355 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
7357 for (V = N1; V cond N2; V += STEP) BODY;
7359 where COND is "<" or ">" or "!=", we generate pseudocode
7361 for (ind_var = low; ind_var < high; ind_var++)
7363 V = n1 + (ind_var * STEP)
7368 In the above pseudocode, low and high are function parameters of the
7369 child function. In the function below, we are inserting a temp.
7370 variable that will be making a call to two OMP functions that will not be
7371 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
7372 with _Cilk_for). These functions are replaced with low and high
7373 by the function that handles taskreg. */
7377 expand_cilk_for (struct omp_region
*region
, struct omp_for_data
*fd
)
7379 bool broken_loop
= region
->cont
== NULL
;
7380 basic_block entry_bb
= region
->entry
;
7381 basic_block cont_bb
= region
->cont
;
7383 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
7384 gcc_assert (broken_loop
7385 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
7386 basic_block l0_bb
= FALLTHRU_EDGE (entry_bb
)->dest
;
7387 basic_block l1_bb
, l2_bb
;
7391 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l0_bb
);
7392 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
7393 l1_bb
= split_block (cont_bb
, last_stmt (cont_bb
))->dest
;
7394 l2_bb
= BRANCH_EDGE (entry_bb
)->dest
;
7398 BRANCH_EDGE (entry_bb
)->flags
&= ~EDGE_ABNORMAL
;
7399 l1_bb
= split_edge (BRANCH_EDGE (entry_bb
));
7400 l2_bb
= single_succ (l1_bb
);
7402 basic_block exit_bb
= region
->exit
;
7403 basic_block l2_dom_bb
= NULL
;
7405 gimple_stmt_iterator gsi
= gsi_last_bb (entry_bb
);
7407 /* Below statements until the "tree high_val = ..." are pseudo statements
7408 used to pass information to be used by expand_omp_taskreg.
7409 low_val and high_val will be replaced by the __low and __high
7410 parameter from the child function.
7412 The call_exprs part is a place-holder, it is mainly used
7413 to distinctly identify to the top-level part that this is
7414 where we should put low and high (reasoning given in header
7418 = gimple_omp_parallel_child_fn (
7419 as_a
<gomp_parallel
*> (last_stmt (region
->outer
->entry
)));
7420 tree t
, low_val
= NULL_TREE
, high_val
= NULL_TREE
;
7421 for (t
= DECL_ARGUMENTS (child_fndecl
); t
; t
= TREE_CHAIN (t
))
7423 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t
)), "__high"))
7425 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t
)), "__low"))
7428 gcc_assert (low_val
&& high_val
);
7430 tree type
= TREE_TYPE (low_val
);
7431 tree ind_var
= create_tmp_reg (type
, "__cilk_ind_var");
7432 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
7434 /* Not needed in SSA form right now. */
7435 gcc_assert (!gimple_in_ssa_p (cfun
));
7436 if (l2_dom_bb
== NULL
)
7442 gimple stmt
= gimple_build_assign (ind_var
, n1
);
7444 /* Replace the GIMPLE_OMP_FOR statement. */
7445 gsi_replace (&gsi
, stmt
, true);
7449 /* Code to control the increment goes in the CONT_BB. */
7450 gsi
= gsi_last_bb (cont_bb
);
7451 stmt
= gsi_stmt (gsi
);
7452 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
7453 stmt
= gimple_build_assign (ind_var
, PLUS_EXPR
, ind_var
,
7454 build_one_cst (type
));
7456 /* Replace GIMPLE_OMP_CONTINUE. */
7457 gsi_replace (&gsi
, stmt
, true);
7460 /* Emit the condition in L1_BB. */
7461 gsi
= gsi_after_labels (l1_bb
);
7462 t
= fold_build2 (MULT_EXPR
, TREE_TYPE (fd
->loop
.step
),
7463 fold_convert (TREE_TYPE (fd
->loop
.step
), ind_var
),
7465 if (POINTER_TYPE_P (TREE_TYPE (fd
->loop
.n1
)))
7466 t
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (fd
->loop
.n1
),
7467 fd
->loop
.n1
, fold_convert (sizetype
, t
));
7469 t
= fold_build2 (PLUS_EXPR
, TREE_TYPE (fd
->loop
.n1
),
7470 fd
->loop
.n1
, fold_convert (TREE_TYPE (fd
->loop
.n1
), t
));
7471 t
= fold_convert (TREE_TYPE (fd
->loop
.v
), t
);
7472 expand_omp_build_assign (&gsi
, fd
->loop
.v
, t
);
7474 /* The condition is always '<' since the runtime will fill in the low
7476 stmt
= gimple_build_cond (LT_EXPR
, ind_var
, n2
, NULL_TREE
, NULL_TREE
);
7477 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
7479 /* Remove GIMPLE_OMP_RETURN. */
7480 gsi
= gsi_last_bb (exit_bb
);
7481 gsi_remove (&gsi
, true);
7483 /* Connect the new blocks. */
7484 remove_edge (FALLTHRU_EDGE (entry_bb
));
7489 remove_edge (BRANCH_EDGE (entry_bb
));
7490 make_edge (entry_bb
, l1_bb
, EDGE_FALLTHRU
);
7492 e
= BRANCH_EDGE (l1_bb
);
7493 ne
= FALLTHRU_EDGE (l1_bb
);
7494 e
->flags
= EDGE_TRUE_VALUE
;
7498 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
7500 ne
= single_succ_edge (l1_bb
);
7501 e
= make_edge (l1_bb
, l0_bb
, EDGE_TRUE_VALUE
);
7504 ne
->flags
= EDGE_FALSE_VALUE
;
7505 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
7506 ne
->probability
= REG_BR_PROB_BASE
/ 8;
7508 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
, entry_bb
);
7509 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
, l2_dom_bb
);
7510 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
, l1_bb
);
7514 struct loop
*loop
= alloc_loop ();
7515 loop
->header
= l1_bb
;
7516 loop
->latch
= cont_bb
;
7517 add_loop (loop
, l1_bb
->loop_father
);
7518 loop
->safelen
= INT_MAX
;
7521 /* Pick the correct library function based on the precision of the
7522 induction variable type. */
7523 tree lib_fun
= NULL_TREE
;
7524 if (TYPE_PRECISION (type
) == 32)
7525 lib_fun
= cilk_for_32_fndecl
;
7526 else if (TYPE_PRECISION (type
) == 64)
7527 lib_fun
= cilk_for_64_fndecl
;
7531 gcc_assert (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_CILKFOR
);
7533 /* WS_ARGS contains the library function flavor to call:
7534 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
7535 user-defined grain value. If the user does not define one, then zero
7536 is passed in by the parser. */
7537 vec_alloc (region
->ws_args
, 2);
7538 region
->ws_args
->quick_push (lib_fun
);
7539 region
->ws_args
->quick_push (fd
->chunk_size
);
7542 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
7543 loop. Given parameters:
7545 for (V = N1; V cond N2; V += STEP) BODY;
7547 where COND is "<" or ">", we generate pseudocode
7555 if (V cond N2) goto L0; else goto L2;
7558 For collapsed loops, given parameters:
7560 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
7561 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
7562 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
7565 we generate pseudocode
7571 count3 = (adj + N32 - N31) / STEP3;
7576 count2 = (adj + N22 - N21) / STEP2;
7581 count1 = (adj + N12 - N11) / STEP1;
7582 count = count1 * count2 * count3;
7592 V2 += (V3 cond3 N32) ? 0 : STEP2;
7593 V3 = (V3 cond3 N32) ? V3 : N31;
7594 V1 += (V2 cond2 N22) ? 0 : STEP1;
7595 V2 = (V2 cond2 N22) ? V2 : N21;
7597 if (V < count) goto L0; else goto L2;
7603 expand_omp_simd (struct omp_region
*region
, struct omp_for_data
*fd
)
7606 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, l2_bb
, l2_dom_bb
;
7607 gimple_stmt_iterator gsi
;
7610 bool broken_loop
= region
->cont
== NULL
;
7612 tree
*counts
= NULL
;
7614 tree safelen
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
7615 OMP_CLAUSE_SAFELEN
);
7616 tree simduid
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
7617 OMP_CLAUSE__SIMDUID_
);
7620 type
= TREE_TYPE (fd
->loop
.v
);
7621 entry_bb
= region
->entry
;
7622 cont_bb
= region
->cont
;
7623 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
7624 gcc_assert (broken_loop
7625 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
7626 l0_bb
= FALLTHRU_EDGE (entry_bb
)->dest
;
7629 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l0_bb
);
7630 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
7631 l1_bb
= split_block (cont_bb
, last_stmt (cont_bb
))->dest
;
7632 l2_bb
= BRANCH_EDGE (entry_bb
)->dest
;
7636 BRANCH_EDGE (entry_bb
)->flags
&= ~EDGE_ABNORMAL
;
7637 l1_bb
= split_edge (BRANCH_EDGE (entry_bb
));
7638 l2_bb
= single_succ (l1_bb
);
7640 exit_bb
= region
->exit
;
7643 gsi
= gsi_last_bb (entry_bb
);
7645 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
7646 /* Not needed in SSA form right now. */
7647 gcc_assert (!gimple_in_ssa_p (cfun
));
7648 if (fd
->collapse
> 1)
7650 int first_zero_iter
= -1;
7651 basic_block zero_iter_bb
= l2_bb
;
7653 counts
= XALLOCAVEC (tree
, fd
->collapse
);
7654 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
7655 zero_iter_bb
, first_zero_iter
,
7658 if (l2_dom_bb
== NULL
)
7663 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
7665 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
7666 OMP_CLAUSE__LOOPTEMP_
);
7667 gcc_assert (innerc
);
7668 n1
= OMP_CLAUSE_DECL (innerc
);
7669 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
7670 OMP_CLAUSE__LOOPTEMP_
);
7671 gcc_assert (innerc
);
7672 n2
= OMP_CLAUSE_DECL (innerc
);
7673 expand_omp_build_assign (&gsi
, fd
->loop
.v
,
7674 fold_convert (type
, n1
));
7675 if (fd
->collapse
> 1)
7678 expand_omp_for_init_vars (fd
, &gsi
, counts
, NULL
, n1
);
7684 expand_omp_build_assign (&gsi
, fd
->loop
.v
,
7685 fold_convert (type
, fd
->loop
.n1
));
7686 if (fd
->collapse
> 1)
7687 for (i
= 0; i
< fd
->collapse
; i
++)
7689 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
7690 if (POINTER_TYPE_P (itype
))
7691 itype
= signed_type_for (itype
);
7692 t
= fold_convert (TREE_TYPE (fd
->loops
[i
].v
), fd
->loops
[i
].n1
);
7693 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
7697 /* Remove the GIMPLE_OMP_FOR statement. */
7698 gsi_remove (&gsi
, true);
7702 /* Code to control the increment goes in the CONT_BB. */
7703 gsi
= gsi_last_bb (cont_bb
);
7704 stmt
= gsi_stmt (gsi
);
7705 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
7707 if (POINTER_TYPE_P (type
))
7708 t
= fold_build_pointer_plus (fd
->loop
.v
, fd
->loop
.step
);
7710 t
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.v
, fd
->loop
.step
);
7711 expand_omp_build_assign (&gsi
, fd
->loop
.v
, t
);
7713 if (fd
->collapse
> 1)
7715 i
= fd
->collapse
- 1;
7716 if (POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
].v
)))
7718 t
= fold_convert (sizetype
, fd
->loops
[i
].step
);
7719 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, t
);
7723 t
= fold_convert (TREE_TYPE (fd
->loops
[i
].v
),
7725 t
= fold_build2 (PLUS_EXPR
, TREE_TYPE (fd
->loops
[i
].v
),
7728 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
7730 for (i
= fd
->collapse
- 1; i
> 0; i
--)
7732 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
7733 tree itype2
= TREE_TYPE (fd
->loops
[i
- 1].v
);
7734 if (POINTER_TYPE_P (itype2
))
7735 itype2
= signed_type_for (itype2
);
7736 t
= build3 (COND_EXPR
, itype2
,
7737 build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
7739 fold_convert (itype
, fd
->loops
[i
].n2
)),
7740 build_int_cst (itype2
, 0),
7741 fold_convert (itype2
, fd
->loops
[i
- 1].step
));
7742 if (POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
- 1].v
)))
7743 t
= fold_build_pointer_plus (fd
->loops
[i
- 1].v
, t
);
7745 t
= fold_build2 (PLUS_EXPR
, itype2
, fd
->loops
[i
- 1].v
, t
);
7746 expand_omp_build_assign (&gsi
, fd
->loops
[i
- 1].v
, t
);
7748 t
= build3 (COND_EXPR
, itype
,
7749 build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
7751 fold_convert (itype
, fd
->loops
[i
].n2
)),
7753 fold_convert (itype
, fd
->loops
[i
].n1
));
7754 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
7758 /* Remove GIMPLE_OMP_CONTINUE. */
7759 gsi_remove (&gsi
, true);
7762 /* Emit the condition in L1_BB. */
7763 gsi
= gsi_start_bb (l1_bb
);
7765 t
= fold_convert (type
, n2
);
7766 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
7767 false, GSI_CONTINUE_LINKING
);
7768 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, fd
->loop
.v
, t
);
7769 cond_stmt
= gimple_build_cond_empty (t
);
7770 gsi_insert_after (&gsi
, cond_stmt
, GSI_CONTINUE_LINKING
);
7771 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt
), expand_omp_regimplify_p
,
7773 || walk_tree (gimple_cond_rhs_ptr (cond_stmt
), expand_omp_regimplify_p
,
7776 gsi
= gsi_for_stmt (cond_stmt
);
7777 gimple_regimplify_operands (cond_stmt
, &gsi
);
7780 /* Remove GIMPLE_OMP_RETURN. */
7781 gsi
= gsi_last_bb (exit_bb
);
7782 gsi_remove (&gsi
, true);
7784 /* Connect the new blocks. */
7785 remove_edge (FALLTHRU_EDGE (entry_bb
));
7789 remove_edge (BRANCH_EDGE (entry_bb
));
7790 make_edge (entry_bb
, l1_bb
, EDGE_FALLTHRU
);
7792 e
= BRANCH_EDGE (l1_bb
);
7793 ne
= FALLTHRU_EDGE (l1_bb
);
7794 e
->flags
= EDGE_TRUE_VALUE
;
7798 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
7800 ne
= single_succ_edge (l1_bb
);
7801 e
= make_edge (l1_bb
, l0_bb
, EDGE_TRUE_VALUE
);
7804 ne
->flags
= EDGE_FALSE_VALUE
;
7805 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
7806 ne
->probability
= REG_BR_PROB_BASE
/ 8;
7808 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
, entry_bb
);
7809 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
, l2_dom_bb
);
7810 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
, l1_bb
);
7814 struct loop
*loop
= alloc_loop ();
7815 loop
->header
= l1_bb
;
7816 loop
->latch
= cont_bb
;
7817 add_loop (loop
, l1_bb
->loop_father
);
7818 if (safelen
== NULL_TREE
)
7819 loop
->safelen
= INT_MAX
;
7822 safelen
= OMP_CLAUSE_SAFELEN_EXPR (safelen
);
7823 if (TREE_CODE (safelen
) != INTEGER_CST
)
7825 else if (!tree_fits_uhwi_p (safelen
)
7826 || tree_to_uhwi (safelen
) > INT_MAX
)
7827 loop
->safelen
= INT_MAX
;
7829 loop
->safelen
= tree_to_uhwi (safelen
);
7830 if (loop
->safelen
== 1)
7835 loop
->simduid
= OMP_CLAUSE__SIMDUID__DECL (simduid
);
7836 cfun
->has_simduid_loops
= true;
7838 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7840 if ((flag_tree_loop_vectorize
7841 || (!global_options_set
.x_flag_tree_loop_vectorize
7842 && !global_options_set
.x_flag_tree_vectorize
))
7843 && flag_tree_loop_optimize
7844 && loop
->safelen
> 1)
7846 loop
->force_vectorize
= true;
7847 cfun
->has_force_vectorize_loops
= true;
7853 /* Expand the OMP loop defined by REGION. */
7856 expand_omp_for (struct omp_region
*region
, gimple inner_stmt
)
7858 struct omp_for_data fd
;
7859 struct omp_for_data_loop
*loops
;
7862 = (struct omp_for_data_loop
*)
7863 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
7864 * sizeof (struct omp_for_data_loop
));
7865 extract_omp_for_data (as_a
<gomp_for
*> (last_stmt (region
->entry
)),
7867 region
->sched_kind
= fd
.sched_kind
;
7869 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
7870 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
7871 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
7874 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
7875 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
7876 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
7879 /* If there isn't a continue then this is a degerate case where
7880 the introduction of abnormal edges during lowering will prevent
7881 original loops from being detected. Fix that up. */
7882 loops_state_set (LOOPS_NEED_FIXUP
);
7884 if (gimple_omp_for_kind (fd
.for_stmt
) & GF_OMP_FOR_SIMD
)
7885 expand_omp_simd (region
, &fd
);
7886 else if (gimple_omp_for_kind (fd
.for_stmt
) == GF_OMP_FOR_KIND_CILKFOR
)
7887 expand_cilk_for (region
, &fd
);
7888 else if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
7889 && !fd
.have_ordered
)
7891 if (fd
.chunk_size
== NULL
)
7892 expand_omp_for_static_nochunk (region
, &fd
, inner_stmt
);
7894 expand_omp_for_static_chunk (region
, &fd
, inner_stmt
);
7898 int fn_index
, start_ix
, next_ix
;
7900 gcc_assert (gimple_omp_for_kind (fd
.for_stmt
)
7901 == GF_OMP_FOR_KIND_FOR
);
7902 if (fd
.chunk_size
== NULL
7903 && fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
7904 fd
.chunk_size
= integer_zero_node
;
7905 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
7906 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
7907 ? 3 : fd
.sched_kind
;
7908 fn_index
+= fd
.have_ordered
* 4;
7909 start_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_START
) + fn_index
;
7910 next_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
) + fn_index
;
7911 if (fd
.iter_type
== long_long_unsigned_type_node
)
7913 start_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
7914 - (int)BUILT_IN_GOMP_LOOP_STATIC_START
);
7915 next_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
7916 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
);
7918 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
7919 (enum built_in_function
) next_ix
, inner_stmt
);
7922 if (gimple_in_ssa_p (cfun
))
7923 update_ssa (TODO_update_ssa_only_virtuals
);
7927 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7929 v = GOMP_sections_start (n);
7946 v = GOMP_sections_next ();
7951 If this is a combined parallel sections, replace the call to
7952 GOMP_sections_start with call to GOMP_sections_next. */
7955 expand_omp_sections (struct omp_region
*region
)
7957 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
7959 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
7960 gimple_stmt_iterator si
, switch_si
;
7961 gomp_sections
*sections_stmt
;
7963 gomp_continue
*cont
;
7966 struct omp_region
*inner
;
7968 bool exit_reachable
= region
->cont
!= NULL
;
7970 gcc_assert (region
->exit
!= NULL
);
7971 entry_bb
= region
->entry
;
7972 l0_bb
= single_succ (entry_bb
);
7973 l1_bb
= region
->cont
;
7974 l2_bb
= region
->exit
;
7975 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
7976 l2
= gimple_block_label (l2_bb
);
7979 /* This can happen if there are reductions. */
7980 len
= EDGE_COUNT (l0_bb
->succs
);
7981 gcc_assert (len
> 0);
7982 e
= EDGE_SUCC (l0_bb
, len
- 1);
7983 si
= gsi_last_bb (e
->dest
);
7986 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
7987 l2
= gimple_block_label (e
->dest
);
7989 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
7991 si
= gsi_last_bb (e
->dest
);
7993 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
7995 l2
= gimple_block_label (e
->dest
);
8001 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
8003 default_bb
= create_empty_bb (l0_bb
);
8005 /* We will build a switch() with enough cases for all the
8006 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
8007 and a default case to abort if something goes wrong. */
8008 len
= EDGE_COUNT (l0_bb
->succs
);
8010 /* Use vec::quick_push on label_vec throughout, since we know the size
8012 auto_vec
<tree
> label_vec (len
);
8014 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
8015 GIMPLE_OMP_SECTIONS statement. */
8016 si
= gsi_last_bb (entry_bb
);
8017 sections_stmt
= as_a
<gomp_sections
*> (gsi_stmt (si
));
8018 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
8019 vin
= gimple_omp_sections_control (sections_stmt
);
8020 if (!is_combined_parallel (region
))
8022 /* If we are not inside a combined parallel+sections region,
8023 call GOMP_sections_start. */
8024 t
= build_int_cst (unsigned_type_node
, len
- 1);
8025 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START
);
8026 stmt
= gimple_build_call (u
, 1, t
);
8030 /* Otherwise, call GOMP_sections_next. */
8031 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
8032 stmt
= gimple_build_call (u
, 0);
8034 gimple_call_set_lhs (stmt
, vin
);
8035 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
8036 gsi_remove (&si
, true);
8038 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
8040 switch_si
= gsi_last_bb (l0_bb
);
8041 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
8044 cont
= as_a
<gomp_continue
*> (last_stmt (l1_bb
));
8045 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
8046 vmain
= gimple_omp_continue_control_use (cont
);
8047 vnext
= gimple_omp_continue_control_def (cont
);
8055 t
= build_case_label (build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
8056 label_vec
.quick_push (t
);
8059 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
8060 for (inner
= region
->inner
, casei
= 1;
8062 inner
= inner
->next
, i
++, casei
++)
8064 basic_block s_entry_bb
, s_exit_bb
;
8066 /* Skip optional reduction region. */
8067 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
8074 s_entry_bb
= inner
->entry
;
8075 s_exit_bb
= inner
->exit
;
8077 t
= gimple_block_label (s_entry_bb
);
8078 u
= build_int_cst (unsigned_type_node
, casei
);
8079 u
= build_case_label (u
, NULL
, t
);
8080 label_vec
.quick_push (u
);
8082 si
= gsi_last_bb (s_entry_bb
);
8083 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
8084 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
8085 gsi_remove (&si
, true);
8086 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
8088 if (s_exit_bb
== NULL
)
8091 si
= gsi_last_bb (s_exit_bb
);
8092 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
8093 gsi_remove (&si
, true);
8095 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
8098 /* Error handling code goes in DEFAULT_BB. */
8099 t
= gimple_block_label (default_bb
);
8100 u
= build_case_label (NULL
, NULL
, t
);
8101 make_edge (l0_bb
, default_bb
, 0);
8102 add_bb_to_loop (default_bb
, current_loops
->tree_root
);
8104 stmt
= gimple_build_switch (vmain
, u
, label_vec
);
8105 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
8106 gsi_remove (&switch_si
, true);
8108 si
= gsi_start_bb (default_bb
);
8109 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP
), 0);
8110 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
8116 /* Code to get the next section goes in L1_BB. */
8117 si
= gsi_last_bb (l1_bb
);
8118 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
8120 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
8121 stmt
= gimple_build_call (bfn_decl
, 0);
8122 gimple_call_set_lhs (stmt
, vnext
);
8123 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
8124 gsi_remove (&si
, true);
8126 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
8129 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
8130 si
= gsi_last_bb (l2_bb
);
8131 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
8132 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT
);
8133 else if (gimple_omp_return_lhs (gsi_stmt (si
)))
8134 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL
);
8136 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END
);
8137 stmt
= gimple_build_call (t
, 0);
8138 if (gimple_omp_return_lhs (gsi_stmt (si
)))
8139 gimple_call_set_lhs (stmt
, gimple_omp_return_lhs (gsi_stmt (si
)));
8140 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
8141 gsi_remove (&si
, true);
8143 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
8147 /* Expand code for an OpenMP single directive. We've already expanded
8148 much of the code, here we simply place the GOMP_barrier call. */
8151 expand_omp_single (struct omp_region
*region
)
8153 basic_block entry_bb
, exit_bb
;
8154 gimple_stmt_iterator si
;
8156 entry_bb
= region
->entry
;
8157 exit_bb
= region
->exit
;
8159 si
= gsi_last_bb (entry_bb
);
8160 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
8161 gsi_remove (&si
, true);
8162 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
8164 si
= gsi_last_bb (exit_bb
);
8165 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
8167 tree t
= gimple_omp_return_lhs (gsi_stmt (si
));
8168 gsi_insert_after (&si
, build_omp_barrier (t
), GSI_SAME_STMT
);
8170 gsi_remove (&si
, true);
8171 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
8175 /* Generic expansion for OpenMP synchronization directives: master,
8176 ordered and critical. All we need to do here is remove the entry
8177 and exit markers for REGION. */
8180 expand_omp_synch (struct omp_region
*region
)
8182 basic_block entry_bb
, exit_bb
;
8183 gimple_stmt_iterator si
;
8185 entry_bb
= region
->entry
;
8186 exit_bb
= region
->exit
;
8188 si
= gsi_last_bb (entry_bb
);
8189 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
8190 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
8191 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_TASKGROUP
8192 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
8193 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
8194 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_TEAMS
);
8195 gsi_remove (&si
, true);
8196 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
8200 si
= gsi_last_bb (exit_bb
);
8201 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
8202 gsi_remove (&si
, true);
8203 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
8207 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
8208 operation as a normal volatile load. */
8211 expand_omp_atomic_load (basic_block load_bb
, tree addr
,
8212 tree loaded_val
, int index
)
8214 enum built_in_function tmpbase
;
8215 gimple_stmt_iterator gsi
;
8216 basic_block store_bb
;
8219 tree decl
, call
, type
, itype
;
8221 gsi
= gsi_last_bb (load_bb
);
8222 stmt
= gsi_stmt (gsi
);
8223 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
8224 loc
= gimple_location (stmt
);
8226 /* ??? If the target does not implement atomic_load_optab[mode], and mode
8227 is smaller than word size, then expand_atomic_load assumes that the load
8228 is atomic. We could avoid the builtin entirely in this case. */
8230 tmpbase
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
8231 decl
= builtin_decl_explicit (tmpbase
);
8232 if (decl
== NULL_TREE
)
8235 type
= TREE_TYPE (loaded_val
);
8236 itype
= TREE_TYPE (TREE_TYPE (decl
));
8238 call
= build_call_expr_loc (loc
, decl
, 2, addr
,
8239 build_int_cst (NULL
,
8240 gimple_omp_atomic_seq_cst_p (stmt
)
8242 : MEMMODEL_RELAXED
));
8243 if (!useless_type_conversion_p (type
, itype
))
8244 call
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
8245 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
8247 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
8248 gsi_remove (&gsi
, true);
8250 store_bb
= single_succ (load_bb
);
8251 gsi
= gsi_last_bb (store_bb
);
8252 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
8253 gsi_remove (&gsi
, true);
8255 if (gimple_in_ssa_p (cfun
))
8256 update_ssa (TODO_update_ssa_no_phi
);
8261 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
8262 operation as a normal volatile store. */
8265 expand_omp_atomic_store (basic_block load_bb
, tree addr
,
8266 tree loaded_val
, tree stored_val
, int index
)
8268 enum built_in_function tmpbase
;
8269 gimple_stmt_iterator gsi
;
8270 basic_block store_bb
= single_succ (load_bb
);
8273 tree decl
, call
, type
, itype
;
8277 gsi
= gsi_last_bb (load_bb
);
8278 stmt
= gsi_stmt (gsi
);
8279 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
8281 /* If the load value is needed, then this isn't a store but an exchange. */
8282 exchange
= gimple_omp_atomic_need_value_p (stmt
);
8284 gsi
= gsi_last_bb (store_bb
);
8285 stmt
= gsi_stmt (gsi
);
8286 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_STORE
);
8287 loc
= gimple_location (stmt
);
8289 /* ??? If the target does not implement atomic_store_optab[mode], and mode
8290 is smaller than word size, then expand_atomic_store assumes that the store
8291 is atomic. We could avoid the builtin entirely in this case. */
8293 tmpbase
= (exchange
? BUILT_IN_ATOMIC_EXCHANGE_N
: BUILT_IN_ATOMIC_STORE_N
);
8294 tmpbase
= (enum built_in_function
) ((int) tmpbase
+ index
+ 1);
8295 decl
= builtin_decl_explicit (tmpbase
);
8296 if (decl
== NULL_TREE
)
8299 type
= TREE_TYPE (stored_val
);
8301 /* Dig out the type of the function's second argument. */
8302 itype
= TREE_TYPE (decl
);
8303 itype
= TYPE_ARG_TYPES (itype
);
8304 itype
= TREE_CHAIN (itype
);
8305 itype
= TREE_VALUE (itype
);
8306 imode
= TYPE_MODE (itype
);
8308 if (exchange
&& !can_atomic_exchange_p (imode
, true))
8311 if (!useless_type_conversion_p (itype
, type
))
8312 stored_val
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, itype
, stored_val
);
8313 call
= build_call_expr_loc (loc
, decl
, 3, addr
, stored_val
,
8314 build_int_cst (NULL
,
8315 gimple_omp_atomic_seq_cst_p (stmt
)
8317 : MEMMODEL_RELAXED
));
8320 if (!useless_type_conversion_p (type
, itype
))
8321 call
= build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
8322 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
8325 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
8326 gsi_remove (&gsi
, true);
8328 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
8329 gsi
= gsi_last_bb (load_bb
);
8330 gsi_remove (&gsi
, true);
8332 if (gimple_in_ssa_p (cfun
))
8333 update_ssa (TODO_update_ssa_no_phi
);
8338 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
8339 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
8340 size of the data type, and thus usable to find the index of the builtin
8341 decl. Returns false if the expression is not of the proper form. */
8344 expand_omp_atomic_fetch_op (basic_block load_bb
,
8345 tree addr
, tree loaded_val
,
8346 tree stored_val
, int index
)
8348 enum built_in_function oldbase
, newbase
, tmpbase
;
8349 tree decl
, itype
, call
;
8351 basic_block store_bb
= single_succ (load_bb
);
8352 gimple_stmt_iterator gsi
;
8355 enum tree_code code
;
8356 bool need_old
, need_new
;
8360 /* We expect to find the following sequences:
8363 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
8366 val = tmp OP something; (or: something OP tmp)
8367 GIMPLE_OMP_STORE (val)
8369 ???FIXME: Allow a more flexible sequence.
8370 Perhaps use data flow to pick the statements.
8374 gsi
= gsi_after_labels (store_bb
);
8375 stmt
= gsi_stmt (gsi
);
8376 loc
= gimple_location (stmt
);
8377 if (!is_gimple_assign (stmt
))
8380 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
8382 need_new
= gimple_omp_atomic_need_value_p (gsi_stmt (gsi
));
8383 need_old
= gimple_omp_atomic_need_value_p (last_stmt (load_bb
));
8384 seq_cst
= gimple_omp_atomic_seq_cst_p (last_stmt (load_bb
));
8385 gcc_checking_assert (!need_old
|| !need_new
);
8387 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
8390 /* Check for one of the supported fetch-op operations. */
8391 code
= gimple_assign_rhs_code (stmt
);
8395 case POINTER_PLUS_EXPR
:
8396 oldbase
= BUILT_IN_ATOMIC_FETCH_ADD_N
;
8397 newbase
= BUILT_IN_ATOMIC_ADD_FETCH_N
;
8400 oldbase
= BUILT_IN_ATOMIC_FETCH_SUB_N
;
8401 newbase
= BUILT_IN_ATOMIC_SUB_FETCH_N
;
8404 oldbase
= BUILT_IN_ATOMIC_FETCH_AND_N
;
8405 newbase
= BUILT_IN_ATOMIC_AND_FETCH_N
;
8408 oldbase
= BUILT_IN_ATOMIC_FETCH_OR_N
;
8409 newbase
= BUILT_IN_ATOMIC_OR_FETCH_N
;
8412 oldbase
= BUILT_IN_ATOMIC_FETCH_XOR_N
;
8413 newbase
= BUILT_IN_ATOMIC_XOR_FETCH_N
;
8419 /* Make sure the expression is of the proper form. */
8420 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
8421 rhs
= gimple_assign_rhs2 (stmt
);
8422 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
8423 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
8424 rhs
= gimple_assign_rhs1 (stmt
);
8428 tmpbase
= ((enum built_in_function
)
8429 ((need_new
? newbase
: oldbase
) + index
+ 1));
8430 decl
= builtin_decl_explicit (tmpbase
);
8431 if (decl
== NULL_TREE
)
8433 itype
= TREE_TYPE (TREE_TYPE (decl
));
8434 imode
= TYPE_MODE (itype
);
8436 /* We could test all of the various optabs involved, but the fact of the
8437 matter is that (with the exception of i486 vs i586 and xadd) all targets
8438 that support any atomic operaton optab also implements compare-and-swap.
8439 Let optabs.c take care of expanding any compare-and-swap loop. */
8440 if (!can_compare_and_swap_p (imode
, true))
8443 gsi
= gsi_last_bb (load_bb
);
8444 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
8446 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
8447 It only requires that the operation happen atomically. Thus we can
8448 use the RELAXED memory model. */
8449 call
= build_call_expr_loc (loc
, decl
, 3, addr
,
8450 fold_convert_loc (loc
, itype
, rhs
),
8451 build_int_cst (NULL
,
8452 seq_cst
? MEMMODEL_SEQ_CST
8453 : MEMMODEL_RELAXED
));
8455 if (need_old
|| need_new
)
8457 lhs
= need_old
? loaded_val
: stored_val
;
8458 call
= fold_convert_loc (loc
, TREE_TYPE (lhs
), call
);
8459 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, lhs
, call
);
8462 call
= fold_convert_loc (loc
, void_type_node
, call
);
8463 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
8464 gsi_remove (&gsi
, true);
8466 gsi
= gsi_last_bb (store_bb
);
8467 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
8468 gsi_remove (&gsi
, true);
8469 gsi
= gsi_last_bb (store_bb
);
8470 gsi_remove (&gsi
, true);
8472 if (gimple_in_ssa_p (cfun
))
8473 update_ssa (TODO_update_ssa_no_phi
);
8478 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8482 newval = rhs; // with oldval replacing *addr in rhs
8483 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
8484 if (oldval != newval)
8487 INDEX is log2 of the size of the data type, and thus usable to find the
8488 index of the builtin decl. */
8491 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
8492 tree addr
, tree loaded_val
, tree stored_val
,
8495 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
8496 tree type
, itype
, cmpxchg
, iaddr
;
8497 gimple_stmt_iterator si
;
8498 basic_block loop_header
= single_succ (load_bb
);
8501 enum built_in_function fncode
;
8503 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
8504 order to use the RELAXED memory model effectively. */
8505 fncode
= (enum built_in_function
)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
8507 cmpxchg
= builtin_decl_explicit (fncode
);
8508 if (cmpxchg
== NULL_TREE
)
8510 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
8511 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
8513 if (!can_compare_and_swap_p (TYPE_MODE (itype
), true))
8516 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
8517 si
= gsi_last_bb (load_bb
);
8518 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
8520 /* For floating-point values, we'll need to view-convert them to integers
8521 so that we can perform the atomic compare and swap. Simplify the
8522 following code by always setting up the "i"ntegral variables. */
8523 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
8527 iaddr
= create_tmp_reg (build_pointer_type_for_mode (itype
, ptr_mode
,
8530 = force_gimple_operand_gsi (&si
,
8531 fold_convert (TREE_TYPE (iaddr
), addr
),
8532 false, NULL_TREE
, true, GSI_SAME_STMT
);
8533 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
8534 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
8535 loadedi
= create_tmp_var (itype
);
8536 if (gimple_in_ssa_p (cfun
))
8537 loadedi
= make_ssa_name (loadedi
);
8542 loadedi
= loaded_val
;
8545 fncode
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
8546 tree loaddecl
= builtin_decl_explicit (fncode
);
8549 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr
)),
8550 build_call_expr (loaddecl
, 2, iaddr
,
8551 build_int_cst (NULL_TREE
,
8552 MEMMODEL_RELAXED
)));
8554 initial
= build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)), iaddr
,
8555 build_int_cst (TREE_TYPE (iaddr
), 0));
8558 = force_gimple_operand_gsi (&si
, initial
, true, NULL_TREE
, true,
8561 /* Move the value to the LOADEDI temporary. */
8562 if (gimple_in_ssa_p (cfun
))
8564 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
8565 phi
= create_phi_node (loadedi
, loop_header
);
8566 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
8570 gsi_insert_before (&si
,
8571 gimple_build_assign (loadedi
, initial
),
8573 if (loadedi
!= loaded_val
)
8575 gimple_stmt_iterator gsi2
;
8578 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
8579 gsi2
= gsi_start_bb (loop_header
);
8580 if (gimple_in_ssa_p (cfun
))
8583 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
8584 true, GSI_SAME_STMT
);
8585 stmt
= gimple_build_assign (loaded_val
, x
);
8586 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
8590 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
8591 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
8592 true, GSI_SAME_STMT
);
8595 gsi_remove (&si
, true);
8597 si
= gsi_last_bb (store_bb
);
8598 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
8601 storedi
= stored_val
;
8604 force_gimple_operand_gsi (&si
,
8605 build1 (VIEW_CONVERT_EXPR
, itype
,
8606 stored_val
), true, NULL_TREE
, true,
8609 /* Build the compare&swap statement. */
8610 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
8611 new_storedi
= force_gimple_operand_gsi (&si
,
8612 fold_convert (TREE_TYPE (loadedi
),
8615 true, GSI_SAME_STMT
);
8617 if (gimple_in_ssa_p (cfun
))
8621 old_vali
= create_tmp_var (TREE_TYPE (loadedi
));
8622 stmt
= gimple_build_assign (old_vali
, loadedi
);
8623 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
8625 stmt
= gimple_build_assign (loadedi
, new_storedi
);
8626 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
8629 /* Note that we always perform the comparison as an integer, even for
8630 floating point. This allows the atomic operation to properly
8631 succeed even with NaNs and -0.0. */
8632 stmt
= gimple_build_cond_empty
8633 (build2 (NE_EXPR
, boolean_type_node
,
8634 new_storedi
, old_vali
));
8635 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
8638 e
= single_succ_edge (store_bb
);
8639 e
->flags
&= ~EDGE_FALLTHRU
;
8640 e
->flags
|= EDGE_FALSE_VALUE
;
8642 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
8644 /* Copy the new value to loadedi (we already did that before the condition
8645 if we are not in SSA). */
8646 if (gimple_in_ssa_p (cfun
))
8648 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
8649 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
8652 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
8653 gsi_remove (&si
, true);
8655 struct loop
*loop
= alloc_loop ();
8656 loop
->header
= loop_header
;
8657 loop
->latch
= store_bb
;
8658 add_loop (loop
, loop_header
->loop_father
);
8660 if (gimple_in_ssa_p (cfun
))
8661 update_ssa (TODO_update_ssa_no_phi
);
8666 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8668 GOMP_atomic_start ();
8672 The result is not globally atomic, but works so long as all parallel
8673 references are within #pragma omp atomic directives. According to
8674 responses received from omp@openmp.org, appears to be within spec.
8675 Which makes sense, since that's how several other compilers handle
8676 this situation as well.
8677 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
8678 expanding. STORED_VAL is the operand of the matching
8679 GIMPLE_OMP_ATOMIC_STORE.
8682 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
8686 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
8691 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
8692 tree addr
, tree loaded_val
, tree stored_val
)
8694 gimple_stmt_iterator si
;
8698 si
= gsi_last_bb (load_bb
);
8699 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
8701 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
);
8702 t
= build_call_expr (t
, 0);
8703 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
8705 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
8706 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
8707 gsi_remove (&si
, true);
8709 si
= gsi_last_bb (store_bb
);
8710 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
8712 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
8714 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
8716 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
);
8717 t
= build_call_expr (t
, 0);
8718 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
8719 gsi_remove (&si
, true);
8721 if (gimple_in_ssa_p (cfun
))
8722 update_ssa (TODO_update_ssa_no_phi
);
8726 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
8727 using expand_omp_atomic_fetch_op. If it failed, we try to
8728 call expand_omp_atomic_pipeline, and if it fails too, the
8729 ultimate fallback is wrapping the operation in a mutex
8730 (expand_omp_atomic_mutex). REGION is the atomic region built
8731 by build_omp_regions_1(). */
8734 expand_omp_atomic (struct omp_region
*region
)
8736 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
8737 gomp_atomic_load
*load
= as_a
<gomp_atomic_load
*> (last_stmt (load_bb
));
8738 gomp_atomic_store
*store
= as_a
<gomp_atomic_store
*> (last_stmt (store_bb
));
8739 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
8740 tree addr
= gimple_omp_atomic_load_rhs (load
);
8741 tree stored_val
= gimple_omp_atomic_store_val (store
);
8742 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
8743 HOST_WIDE_INT index
;
8745 /* Make sure the type is one of the supported sizes. */
8746 index
= tree_to_uhwi (TYPE_SIZE_UNIT (type
));
8747 index
= exact_log2 (index
);
8748 if (index
>= 0 && index
<= 4)
8750 unsigned int align
= TYPE_ALIGN_UNIT (type
);
8752 /* __sync builtins require strict data alignment. */
8753 if (exact_log2 (align
) >= index
)
8756 if (loaded_val
== stored_val
8757 && (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
8758 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
8759 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
8760 && expand_omp_atomic_load (load_bb
, addr
, loaded_val
, index
))
8764 if ((GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
8765 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
8766 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
8767 && store_bb
== single_succ (load_bb
)
8768 && first_stmt (store_bb
) == store
8769 && expand_omp_atomic_store (load_bb
, addr
, loaded_val
,
8773 /* When possible, use specialized atomic update functions. */
8774 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
8775 && store_bb
== single_succ (load_bb
)
8776 && expand_omp_atomic_fetch_op (load_bb
, addr
,
8777 loaded_val
, stored_val
, index
))
8780 /* If we don't have specialized __sync builtins, try and implement
8781 as a compare and swap loop. */
8782 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
8783 loaded_val
, stored_val
, index
))
8788 /* The ultimate fallback is wrapping the operation in a mutex. */
8789 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
8793 /* Expand the GIMPLE_OMP_TARGET starting at REGION. */
8796 expand_omp_target (struct omp_region
*region
)
8798 basic_block entry_bb
, exit_bb
, new_bb
;
8799 struct function
*child_cfun
;
8800 tree child_fn
, block
, t
;
8801 gimple_stmt_iterator gsi
;
8802 gomp_target
*entry_stmt
;
8805 bool offloaded
, data_region
;
8807 entry_stmt
= as_a
<gomp_target
*> (last_stmt (region
->entry
));
8808 new_bb
= region
->entry
;
8810 offloaded
= is_gimple_omp_offloaded (entry_stmt
);
8811 switch (gimple_omp_target_kind (entry_stmt
))
8813 case GF_OMP_TARGET_KIND_REGION
:
8814 case GF_OMP_TARGET_KIND_UPDATE
:
8815 case GF_OMP_TARGET_KIND_OACC_PARALLEL
:
8816 case GF_OMP_TARGET_KIND_OACC_KERNELS
:
8817 case GF_OMP_TARGET_KIND_OACC_UPDATE
:
8818 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
:
8819 data_region
= false;
8821 case GF_OMP_TARGET_KIND_DATA
:
8822 case GF_OMP_TARGET_KIND_OACC_DATA
:
8829 child_fn
= NULL_TREE
;
8833 child_fn
= gimple_omp_target_child_fn (entry_stmt
);
8834 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
8837 /* Supported by expand_omp_taskreg, but not here. */
8838 if (child_cfun
!= NULL
)
8839 gcc_checking_assert (!child_cfun
->cfg
);
8840 gcc_checking_assert (!gimple_in_ssa_p (cfun
));
8842 entry_bb
= region
->entry
;
8843 exit_bb
= region
->exit
;
8847 unsigned srcidx
, dstidx
, num
;
8849 /* If the offloading region needs data sent from the parent
8850 function, then the very first statement (except possible
8851 tree profile counter updates) of the offloading body
8852 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
8853 &.OMP_DATA_O is passed as an argument to the child function,
8854 we need to replace it with the argument as seen by the child
8857 In most cases, this will end up being the identity assignment
8858 .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had
8859 a function call that has been inlined, the original PARM_DECL
8860 .OMP_DATA_I may have been converted into a different local
8861 variable. In which case, we need to keep the assignment. */
8862 tree data_arg
= gimple_omp_target_data_arg (entry_stmt
);
8865 basic_block entry_succ_bb
= single_succ (entry_bb
);
8866 gimple_stmt_iterator gsi
;
8868 gimple tgtcopy_stmt
= NULL
;
8869 tree sender
= TREE_VEC_ELT (data_arg
, 0);
8871 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
8873 gcc_assert (!gsi_end_p (gsi
));
8874 stmt
= gsi_stmt (gsi
);
8875 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
8878 if (gimple_num_ops (stmt
) == 2)
8880 tree arg
= gimple_assign_rhs1 (stmt
);
8882 /* We're ignoring the subcode because we're
8883 effectively doing a STRIP_NOPS. */
8885 if (TREE_CODE (arg
) == ADDR_EXPR
8886 && TREE_OPERAND (arg
, 0) == sender
)
8888 tgtcopy_stmt
= stmt
;
8894 gcc_assert (tgtcopy_stmt
!= NULL
);
8895 arg
= DECL_ARGUMENTS (child_fn
);
8897 gcc_assert (gimple_assign_lhs (tgtcopy_stmt
) == arg
);
8898 gsi_remove (&gsi
, true);
8901 /* Declare local variables needed in CHILD_CFUN. */
8902 block
= DECL_INITIAL (child_fn
);
8903 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
8904 /* The gimplifier could record temporaries in the offloading block
8905 rather than in containing function's local_decls chain,
8906 which would mean cgraph missed finalizing them. Do it now. */
8907 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
8908 if (TREE_CODE (t
) == VAR_DECL
8910 && !DECL_EXTERNAL (t
))
8911 varpool_node::finalize_decl (t
);
8912 DECL_SAVED_TREE (child_fn
) = NULL
;
8913 /* We'll create a CFG for child_fn, so no gimple body is needed. */
8914 gimple_set_body (child_fn
, NULL
);
8915 TREE_USED (block
) = 1;
8917 /* Reset DECL_CONTEXT on function arguments. */
8918 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
8919 DECL_CONTEXT (t
) = child_fn
;
8921 /* Split ENTRY_BB at GIMPLE_*,
8922 so that it can be moved to the child function. */
8923 gsi
= gsi_last_bb (entry_bb
);
8924 stmt
= gsi_stmt (gsi
);
8926 && gimple_code (stmt
) == gimple_code (entry_stmt
));
8927 e
= split_block (entry_bb
, stmt
);
8928 gsi_remove (&gsi
, true);
8930 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
8932 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
8935 gsi
= gsi_last_bb (exit_bb
);
8936 gcc_assert (!gsi_end_p (gsi
)
8937 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
8938 stmt
= gimple_build_return (NULL
);
8939 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
8940 gsi_remove (&gsi
, true);
8943 /* Move the offloading region into CHILD_CFUN. */
8945 block
= gimple_block (entry_stmt
);
8947 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
8949 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
8950 /* When the OMP expansion process cannot guarantee an up-to-date
8951 loop tree arrange for the child function to fixup loops. */
8952 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
8953 child_cfun
->x_current_loops
->state
|= LOOPS_NEED_FIXUP
;
8955 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8956 num
= vec_safe_length (child_cfun
->local_decls
);
8957 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
8959 t
= (*child_cfun
->local_decls
)[srcidx
];
8960 if (DECL_CONTEXT (t
) == cfun
->decl
)
8962 if (srcidx
!= dstidx
)
8963 (*child_cfun
->local_decls
)[dstidx
] = t
;
8967 vec_safe_truncate (child_cfun
->local_decls
, dstidx
);
8969 /* Inform the callgraph about the new function. */
8970 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
= cfun
->curr_properties
;
8971 cgraph_node::add_new_function (child_fn
, true);
8973 #ifdef ENABLE_OFFLOADING
8974 /* Add the new function to the offload table. */
8975 vec_safe_push (offload_funcs
, child_fn
);
8978 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8979 fixed in a following pass. */
8980 push_cfun (child_cfun
);
8981 cgraph_edge::rebuild_edges ();
8983 #ifdef ENABLE_OFFLOADING
8984 /* Prevent IPA from removing child_fn as unreachable, since there are no
8985 refs from the parent function to child_fn in offload LTO mode. */
8986 struct cgraph_node
*node
= cgraph_node::get (child_fn
);
8987 node
->mark_force_output ();
8990 /* Some EH regions might become dead, see PR34608. If
8991 pass_cleanup_cfg isn't the first pass to happen with the
8992 new child, these dead EH edges might cause problems.
8993 Clean them up now. */
8994 if (flag_exceptions
)
8997 bool changed
= false;
8999 FOR_EACH_BB_FN (bb
, cfun
)
9000 changed
|= gimple_purge_dead_eh_edges (bb
);
9002 cleanup_tree_cfg ();
9007 /* Emit a library call to launch the offloading region, or do data
9009 tree t1
, t2
, t3
, t4
, device
, cond
, c
, clauses
;
9010 enum built_in_function start_ix
;
9011 location_t clause_loc
;
9013 switch (gimple_omp_target_kind (entry_stmt
))
9015 case GF_OMP_TARGET_KIND_REGION
:
9016 start_ix
= BUILT_IN_GOMP_TARGET
;
9018 case GF_OMP_TARGET_KIND_DATA
:
9019 start_ix
= BUILT_IN_GOMP_TARGET_DATA
;
9021 case GF_OMP_TARGET_KIND_UPDATE
:
9022 start_ix
= BUILT_IN_GOMP_TARGET_UPDATE
;
9024 case GF_OMP_TARGET_KIND_OACC_PARALLEL
:
9025 case GF_OMP_TARGET_KIND_OACC_KERNELS
:
9026 start_ix
= BUILT_IN_GOACC_PARALLEL
;
9028 case GF_OMP_TARGET_KIND_OACC_DATA
:
9029 start_ix
= BUILT_IN_GOACC_DATA_START
;
9031 case GF_OMP_TARGET_KIND_OACC_UPDATE
:
9032 start_ix
= BUILT_IN_GOACC_UPDATE
;
9034 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
:
9035 start_ix
= BUILT_IN_GOACC_ENTER_EXIT_DATA
;
9041 clauses
= gimple_omp_target_clauses (entry_stmt
);
9043 /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime
9044 library choose) and there is no conditional. */
9046 device
= build_int_cst (integer_type_node
, GOMP_DEVICE_ICV
);
9048 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
9050 cond
= OMP_CLAUSE_IF_EXPR (c
);
9052 c
= find_omp_clause (clauses
, OMP_CLAUSE_DEVICE
);
9055 /* Even if we pass it to all library function calls, it is currently only
9056 defined/used for the OpenMP target ones. */
9057 gcc_checking_assert (start_ix
== BUILT_IN_GOMP_TARGET
9058 || start_ix
== BUILT_IN_GOMP_TARGET_DATA
9059 || start_ix
== BUILT_IN_GOMP_TARGET_UPDATE
);
9061 device
= OMP_CLAUSE_DEVICE_ID (c
);
9062 clause_loc
= OMP_CLAUSE_LOCATION (c
);
9065 clause_loc
= gimple_location (entry_stmt
);
9067 /* Ensure 'device' is of the correct type. */
9068 device
= fold_convert_loc (clause_loc
, integer_type_node
, device
);
9070 /* If we found the clause 'if (cond)', build
9071 (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */
9074 cond
= gimple_boolify (cond
);
9076 basic_block cond_bb
, then_bb
, else_bb
;
9080 tmp_var
= create_tmp_var (TREE_TYPE (device
));
9082 e
= split_block_after_labels (new_bb
);
9085 gsi
= gsi_last_bb (new_bb
);
9087 e
= split_block (new_bb
, gsi_stmt (gsi
));
9093 then_bb
= create_empty_bb (cond_bb
);
9094 else_bb
= create_empty_bb (then_bb
);
9095 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
9096 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
9098 stmt
= gimple_build_cond_empty (cond
);
9099 gsi
= gsi_last_bb (cond_bb
);
9100 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
9102 gsi
= gsi_start_bb (then_bb
);
9103 stmt
= gimple_build_assign (tmp_var
, device
);
9104 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
9106 gsi
= gsi_start_bb (else_bb
);
9107 stmt
= gimple_build_assign (tmp_var
,
9108 build_int_cst (integer_type_node
,
9109 GOMP_DEVICE_HOST_FALLBACK
));
9110 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
9112 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
9113 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
9114 add_bb_to_loop (then_bb
, cond_bb
->loop_father
);
9115 add_bb_to_loop (else_bb
, cond_bb
->loop_father
);
9116 make_edge (then_bb
, new_bb
, EDGE_FALLTHRU
);
9117 make_edge (else_bb
, new_bb
, EDGE_FALLTHRU
);
9122 gsi
= gsi_last_bb (new_bb
);
9123 t
= gimple_omp_target_data_arg (entry_stmt
);
9126 t1
= size_zero_node
;
9127 t2
= build_zero_cst (ptr_type_node
);
9133 t1
= TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t
, 1))));
9134 t1
= size_binop (PLUS_EXPR
, t1
, size_int (1));
9135 t2
= build_fold_addr_expr (TREE_VEC_ELT (t
, 0));
9136 t3
= build_fold_addr_expr (TREE_VEC_ELT (t
, 1));
9137 t4
= build_fold_addr_expr (TREE_VEC_ELT (t
, 2));
9141 /* The maximum number used by any start_ix, without varargs. */
9142 auto_vec
<tree
, 11> args
;
9143 args
.quick_push (device
);
9145 args
.quick_push (build_fold_addr_expr (child_fn
));
9148 case BUILT_IN_GOMP_TARGET
:
9149 case BUILT_IN_GOMP_TARGET_DATA
:
9150 case BUILT_IN_GOMP_TARGET_UPDATE
:
9151 /* This const void * is part of the current ABI, but we're not actually
9153 args
.quick_push (build_zero_cst (ptr_type_node
));
9155 case BUILT_IN_GOACC_DATA_START
:
9156 case BUILT_IN_GOACC_ENTER_EXIT_DATA
:
9157 case BUILT_IN_GOACC_PARALLEL
:
9158 case BUILT_IN_GOACC_UPDATE
:
9163 args
.quick_push (t1
);
9164 args
.quick_push (t2
);
9165 args
.quick_push (t3
);
9166 args
.quick_push (t4
);
9169 case BUILT_IN_GOACC_DATA_START
:
9170 case BUILT_IN_GOMP_TARGET
:
9171 case BUILT_IN_GOMP_TARGET_DATA
:
9172 case BUILT_IN_GOMP_TARGET_UPDATE
:
9174 case BUILT_IN_GOACC_PARALLEL
:
9176 tree t_num_gangs
, t_num_workers
, t_vector_length
;
9178 /* Default values for num_gangs, num_workers, and vector_length. */
9179 t_num_gangs
= t_num_workers
= t_vector_length
9180 = fold_convert_loc (gimple_location (entry_stmt
),
9181 integer_type_node
, integer_one_node
);
9182 /* ..., but if present, use the value specified by the respective
9183 clause, making sure that are of the correct type. */
9184 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_GANGS
);
9186 t_num_gangs
= fold_convert_loc (OMP_CLAUSE_LOCATION (c
),
9188 OMP_CLAUSE_NUM_GANGS_EXPR (c
));
9189 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_WORKERS
);
9191 t_num_workers
= fold_convert_loc (OMP_CLAUSE_LOCATION (c
),
9193 OMP_CLAUSE_NUM_WORKERS_EXPR (c
));
9194 c
= find_omp_clause (clauses
, OMP_CLAUSE_VECTOR_LENGTH
);
9196 t_vector_length
= fold_convert_loc (OMP_CLAUSE_LOCATION (c
),
9198 OMP_CLAUSE_VECTOR_LENGTH_EXPR (c
));
9199 args
.quick_push (t_num_gangs
);
9200 args
.quick_push (t_num_workers
);
9201 args
.quick_push (t_vector_length
);
9204 case BUILT_IN_GOACC_ENTER_EXIT_DATA
:
9205 case BUILT_IN_GOACC_UPDATE
:
9210 /* Default values for t_async. */
9211 t_async
= fold_convert_loc (gimple_location (entry_stmt
),
9213 build_int_cst (integer_type_node
,
9215 /* ..., but if present, use the value specified by the respective
9216 clause, making sure that is of the correct type. */
9217 c
= find_omp_clause (clauses
, OMP_CLAUSE_ASYNC
);
9219 t_async
= fold_convert_loc (OMP_CLAUSE_LOCATION (c
),
9221 OMP_CLAUSE_ASYNC_EXPR (c
));
9223 args
.quick_push (t_async
);
9224 /* Save the index, and... */
9225 t_wait_idx
= args
.length ();
9226 /* ... push a default value. */
9227 args
.quick_push (fold_convert_loc (gimple_location (entry_stmt
),
9229 integer_zero_node
));
9230 c
= find_omp_clause (clauses
, OMP_CLAUSE_WAIT
);
9235 for (; c
; c
= OMP_CLAUSE_CHAIN (c
))
9237 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_WAIT
)
9239 args
.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c
),
9241 OMP_CLAUSE_WAIT_EXPR (c
)));
9246 /* Now that we know the number, replace the default value. */
9247 args
.ordered_remove (t_wait_idx
);
9248 args
.quick_insert (t_wait_idx
,
9249 fold_convert_loc (gimple_location (entry_stmt
),
9251 build_int_cst (integer_type_node
, n
)));
9259 g
= gimple_build_call_vec (builtin_decl_explicit (start_ix
), args
);
9260 gimple_set_location (g
, gimple_location (entry_stmt
));
9261 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
9265 gcc_assert (g
&& gimple_code (g
) == GIMPLE_OMP_TARGET
);
9266 gsi_remove (&gsi
, true);
9271 gsi
= gsi_last_bb (region
->exit
);
9273 gcc_assert (g
&& gimple_code (g
) == GIMPLE_OMP_RETURN
);
9274 gsi_remove (&gsi
, true);
9279 /* Expand the parallel region tree rooted at REGION. Expansion
9280 proceeds in depth-first order. Innermost regions are expanded
9281 first. This way, parallel regions that require a new function to
9282 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
9283 internal dependencies in their body. */
9286 expand_omp (struct omp_region
*region
)
9290 location_t saved_location
;
9291 gimple inner_stmt
= NULL
;
9293 /* First, determine whether this is a combined parallel+workshare
9295 if (region
->type
== GIMPLE_OMP_PARALLEL
)
9296 determine_parallel_type (region
);
9298 if (region
->type
== GIMPLE_OMP_FOR
9299 && gimple_omp_for_combined_p (last_stmt (region
->entry
)))
9300 inner_stmt
= last_stmt (region
->inner
->entry
);
9303 expand_omp (region
->inner
);
9305 saved_location
= input_location
;
9306 if (gimple_has_location (last_stmt (region
->entry
)))
9307 input_location
= gimple_location (last_stmt (region
->entry
));
9309 switch (region
->type
)
9311 case GIMPLE_OMP_PARALLEL
:
9312 case GIMPLE_OMP_TASK
:
9313 expand_omp_taskreg (region
);
9316 case GIMPLE_OMP_FOR
:
9317 expand_omp_for (region
, inner_stmt
);
9320 case GIMPLE_OMP_SECTIONS
:
9321 expand_omp_sections (region
);
9324 case GIMPLE_OMP_SECTION
:
9325 /* Individual omp sections are handled together with their
9326 parent GIMPLE_OMP_SECTIONS region. */
9329 case GIMPLE_OMP_SINGLE
:
9330 expand_omp_single (region
);
9333 case GIMPLE_OMP_MASTER
:
9334 case GIMPLE_OMP_TASKGROUP
:
9335 case GIMPLE_OMP_ORDERED
:
9336 case GIMPLE_OMP_CRITICAL
:
9337 case GIMPLE_OMP_TEAMS
:
9338 expand_omp_synch (region
);
9341 case GIMPLE_OMP_ATOMIC_LOAD
:
9342 expand_omp_atomic (region
);
9345 case GIMPLE_OMP_TARGET
:
9346 expand_omp_target (region
);
9353 input_location
= saved_location
;
9354 region
= region
->next
;
9359 /* Helper for build_omp_regions. Scan the dominator tree starting at
9360 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
9361 true, the function ends once a single tree is built (otherwise, whole
9362 forest of OMP constructs may be built). */
9365 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
9368 gimple_stmt_iterator gsi
;
9372 gsi
= gsi_last_bb (bb
);
9373 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
9375 struct omp_region
*region
;
9376 enum gimple_code code
;
9378 stmt
= gsi_stmt (gsi
);
9379 code
= gimple_code (stmt
);
9380 if (code
== GIMPLE_OMP_RETURN
)
9382 /* STMT is the return point out of region PARENT. Mark it
9383 as the exit point and make PARENT the immediately
9384 enclosing region. */
9385 gcc_assert (parent
);
9388 parent
= parent
->outer
;
9390 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
9392 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
9393 GIMPLE_OMP_RETURN, but matches with
9394 GIMPLE_OMP_ATOMIC_LOAD. */
9395 gcc_assert (parent
);
9396 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
9399 parent
= parent
->outer
;
9401 else if (code
== GIMPLE_OMP_CONTINUE
)
9403 gcc_assert (parent
);
9406 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
9408 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
9409 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
9413 region
= new_omp_region (bb
, code
, parent
);
9415 if (code
== GIMPLE_OMP_TARGET
)
9417 switch (gimple_omp_target_kind (stmt
))
9419 case GF_OMP_TARGET_KIND_REGION
:
9420 case GF_OMP_TARGET_KIND_DATA
:
9421 case GF_OMP_TARGET_KIND_OACC_PARALLEL
:
9422 case GF_OMP_TARGET_KIND_OACC_KERNELS
:
9423 case GF_OMP_TARGET_KIND_OACC_DATA
:
9425 case GF_OMP_TARGET_KIND_UPDATE
:
9426 case GF_OMP_TARGET_KIND_OACC_UPDATE
:
9427 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
:
9428 /* ..., other than for those stand-alone directives... */
9435 /* ..., this directive becomes the parent for a new region. */
9441 if (single_tree
&& !parent
)
9444 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
9446 son
= next_dom_son (CDI_DOMINATORS
, son
))
9447 build_omp_regions_1 (son
, parent
, single_tree
);
9450 /* Builds the tree of OMP regions rooted at ROOT, storing it to
9454 build_omp_regions_root (basic_block root
)
9456 gcc_assert (root_omp_region
== NULL
);
9457 build_omp_regions_1 (root
, NULL
, true);
9458 gcc_assert (root_omp_region
!= NULL
);
9461 /* Expands omp construct (and its subconstructs) starting in HEAD. */
9464 omp_expand_local (basic_block head
)
9466 build_omp_regions_root (head
);
9467 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
9469 fprintf (dump_file
, "\nOMP region tree\n\n");
9470 dump_omp_region (dump_file
, root_omp_region
, 0);
9471 fprintf (dump_file
, "\n");
9474 remove_exit_barriers (root_omp_region
);
9475 expand_omp (root_omp_region
);
9477 free_omp_regions ();
9480 /* Scan the CFG and build a tree of OMP regions. Return the root of
9481 the OMP region tree. */
9484 build_omp_regions (void)
9486 gcc_assert (root_omp_region
== NULL
);
9487 calculate_dominance_info (CDI_DOMINATORS
);
9488 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun
), NULL
, false);
9491 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
9494 execute_expand_omp (void)
9496 build_omp_regions ();
9498 if (!root_omp_region
)
9503 fprintf (dump_file
, "\nOMP region tree\n\n");
9504 dump_omp_region (dump_file
, root_omp_region
, 0);
9505 fprintf (dump_file
, "\n");
9508 remove_exit_barriers (root_omp_region
);
9510 expand_omp (root_omp_region
);
9512 cleanup_tree_cfg ();
9514 free_omp_regions ();
9519 /* OMP expansion -- the default pass, run before creation of SSA form. */
9523 const pass_data pass_data_expand_omp
=
9525 GIMPLE_PASS
, /* type */
9526 "ompexp", /* name */
9527 OPTGROUP_NONE
, /* optinfo_flags */
9528 TV_NONE
, /* tv_id */
9529 PROP_gimple_any
, /* properties_required */
9530 PROP_gimple_eomp
, /* properties_provided */
9531 0, /* properties_destroyed */
9532 0, /* todo_flags_start */
9533 0, /* todo_flags_finish */
9536 class pass_expand_omp
: public gimple_opt_pass
9539 pass_expand_omp (gcc::context
*ctxt
)
9540 : gimple_opt_pass (pass_data_expand_omp
, ctxt
)
9543 /* opt_pass methods: */
9544 virtual unsigned int execute (function
*)
9546 bool gate
= ((flag_cilkplus
!= 0 || flag_openacc
!= 0 || flag_openmp
!= 0
9547 || flag_openmp_simd
!= 0)
9550 /* This pass always runs, to provide PROP_gimple_eomp.
9551 But often, there is nothing to do. */
9555 return execute_expand_omp ();
9558 }; // class pass_expand_omp
9563 make_pass_expand_omp (gcc::context
*ctxt
)
9565 return new pass_expand_omp (ctxt
);
9570 const pass_data pass_data_expand_omp_ssa
=
9572 GIMPLE_PASS
, /* type */
9573 "ompexpssa", /* name */
9574 OPTGROUP_NONE
, /* optinfo_flags */
9575 TV_NONE
, /* tv_id */
9576 PROP_cfg
| PROP_ssa
, /* properties_required */
9577 PROP_gimple_eomp
, /* properties_provided */
9578 0, /* properties_destroyed */
9579 0, /* todo_flags_start */
9580 TODO_cleanup_cfg
| TODO_rebuild_alias
, /* todo_flags_finish */
9583 class pass_expand_omp_ssa
: public gimple_opt_pass
9586 pass_expand_omp_ssa (gcc::context
*ctxt
)
9587 : gimple_opt_pass (pass_data_expand_omp_ssa
, ctxt
)
9590 /* opt_pass methods: */
9591 virtual bool gate (function
*fun
)
9593 return !(fun
->curr_properties
& PROP_gimple_eomp
);
9595 virtual unsigned int execute (function
*) { return execute_expand_omp (); }
9597 }; // class pass_expand_omp_ssa
9602 make_pass_expand_omp_ssa (gcc::context
*ctxt
)
9604 return new pass_expand_omp_ssa (ctxt
);
9607 /* Routines to lower OMP directives into OMP-GIMPLE. */
9609 /* Helper function to preform, potentially COMPLEX_TYPE, operation and
9610 convert it to gimple. */
9612 oacc_gimple_assign (tree dest
, tree_code op
, tree src
, gimple_seq
*seq
)
9616 if (TREE_CODE (TREE_TYPE (dest
)) != COMPLEX_TYPE
)
9618 stmt
= gimple_build_assign (dest
, op
, dest
, src
);
9619 gimple_seq_add_stmt (seq
, stmt
);
9623 tree t
= create_tmp_var (TREE_TYPE (TREE_TYPE (dest
)));
9624 tree rdest
= fold_build1 (REALPART_EXPR
, TREE_TYPE (TREE_TYPE (dest
)), dest
);
9625 gimplify_assign (t
, rdest
, seq
);
9628 t
= create_tmp_var (TREE_TYPE (TREE_TYPE (dest
)));
9629 tree idest
= fold_build1 (IMAGPART_EXPR
, TREE_TYPE (TREE_TYPE (dest
)), dest
);
9630 gimplify_assign (t
, idest
, seq
);
9633 t
= create_tmp_var (TREE_TYPE (TREE_TYPE (src
)));
9634 tree rsrc
= fold_build1 (REALPART_EXPR
, TREE_TYPE (TREE_TYPE (src
)), src
);
9635 gimplify_assign (t
, rsrc
, seq
);
9638 t
= create_tmp_var (TREE_TYPE (TREE_TYPE (src
)));
9639 tree isrc
= fold_build1 (IMAGPART_EXPR
, TREE_TYPE (TREE_TYPE (src
)), src
);
9640 gimplify_assign (t
, isrc
, seq
);
9643 tree r
= create_tmp_var (TREE_TYPE (TREE_TYPE (dest
)));
9644 tree i
= create_tmp_var (TREE_TYPE (TREE_TYPE (dest
)));
9647 if (op
== PLUS_EXPR
)
9649 stmt
= gimple_build_assign (r
, op
, rdest
, rsrc
);
9650 gimple_seq_add_stmt (seq
, stmt
);
9652 stmt
= gimple_build_assign (i
, op
, idest
, isrc
);
9653 gimple_seq_add_stmt (seq
, stmt
);
9655 else if (op
== MULT_EXPR
)
9657 /* Let x = a + ib = dest, y = c + id = src.
9658 x * y = (ac - bd) + i(ad + bc) */
9659 tree ac
= create_tmp_var (TREE_TYPE (TREE_TYPE (dest
)));
9660 tree bd
= create_tmp_var (TREE_TYPE (TREE_TYPE (dest
)));
9661 tree ad
= create_tmp_var (TREE_TYPE (TREE_TYPE (dest
)));
9662 tree bc
= create_tmp_var (TREE_TYPE (TREE_TYPE (dest
)));
9664 stmt
= gimple_build_assign (ac
, MULT_EXPR
, rdest
, rsrc
);
9665 gimple_seq_add_stmt (seq
, stmt
);
9667 stmt
= gimple_build_assign (bd
, MULT_EXPR
, idest
, isrc
);
9668 gimple_seq_add_stmt (seq
, stmt
);
9670 stmt
= gimple_build_assign (r
, MINUS_EXPR
, ac
, bd
);
9671 gimple_seq_add_stmt (seq
, stmt
);
9673 stmt
= gimple_build_assign (ad
, MULT_EXPR
, rdest
, isrc
);
9674 gimple_seq_add_stmt (seq
, stmt
);
9676 stmt
= gimple_build_assign (bd
, MULT_EXPR
, idest
, rsrc
);
9677 gimple_seq_add_stmt (seq
, stmt
);
9679 stmt
= gimple_build_assign (i
, PLUS_EXPR
, ad
, bc
);
9680 gimple_seq_add_stmt (seq
, stmt
);
9685 result
= build2 (COMPLEX_EXPR
, TREE_TYPE (dest
), r
, i
);
9686 gimplify_assign (dest
, result
, seq
);
9689 /* Helper function to initialize local data for the reduction arrays.
9690 The reduction arrays need to be placed inside the calling function
9691 for accelerators, or else the host won't be able to preform the final
9695 oacc_initialize_reduction_data (tree clauses
, tree nthreads
,
9696 gimple_seq
*stmt_seqp
, omp_context
*ctx
)
9702 /* Find the innermost OpenACC parallel context. */
9703 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TARGET
9704 && (gimple_omp_target_kind (ctx
->stmt
)
9705 == GF_OMP_TARGET_KIND_OACC_PARALLEL
))
9709 gcc_checking_assert (gimple_code (octx
->stmt
) == GIMPLE_OMP_TARGET
9710 && (gimple_omp_target_kind (octx
->stmt
)
9711 == GF_OMP_TARGET_KIND_OACC_PARALLEL
));
9713 /* Extract the clauses. */
9714 oc
= gimple_omp_target_clauses (octx
->stmt
);
9716 /* Find the last outer clause. */
9717 for (; oc
&& OMP_CLAUSE_CHAIN (oc
); oc
= OMP_CLAUSE_CHAIN (oc
))
9720 /* Allocate arrays for each reduction variable. */
9721 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
9723 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
9726 tree var
= OMP_CLAUSE_DECL (c
);
9727 tree type
= get_base_type (var
);
9728 tree array
= lookup_oacc_reduction (oacc_get_reduction_array_id (var
),
9732 /* Calculate size of the reduction array. */
9733 t
= create_tmp_var (TREE_TYPE (nthreads
));
9734 stmt
= gimple_build_assign (t
, MULT_EXPR
, nthreads
,
9735 fold_convert (TREE_TYPE (nthreads
),
9736 TYPE_SIZE_UNIT (type
)));
9737 gimple_seq_add_stmt (stmt_seqp
, stmt
);
9739 size
= create_tmp_var (sizetype
);
9740 gimplify_assign (size
, fold_build1 (NOP_EXPR
, sizetype
, t
), stmt_seqp
);
9742 /* Now allocate memory for it. */
9743 call
= unshare_expr (builtin_decl_explicit (BUILT_IN_ALLOCA
));
9744 stmt
= gimple_build_call (call
, 1, size
);
9745 gimple_call_set_lhs (stmt
, array
);
9746 gimple_seq_add_stmt (stmt_seqp
, stmt
);
9748 /* Map this array into the accelerator. */
9750 /* Add the reduction array to the list of clauses. */
9752 t
= build_omp_clause (gimple_location (ctx
->stmt
), OMP_CLAUSE_MAP
);
9753 OMP_CLAUSE_SET_MAP_KIND (t
, GOMP_MAP_FORCE_FROM
);
9754 OMP_CLAUSE_DECL (t
) = x
;
9755 OMP_CLAUSE_CHAIN (t
) = NULL
;
9757 OMP_CLAUSE_CHAIN (oc
) = t
;
9759 gimple_omp_target_set_clauses (as_a
<gomp_target
*> (octx
->stmt
), t
);
9760 OMP_CLAUSE_SIZE (t
) = size
;
9765 /* Helper function to process the array of partial reductions. Nthreads
9766 indicates the number of threads. Unfortunately, GOACC_GET_NUM_THREADS
9767 cannot be used here, because nthreads on the host may be different than
9768 on the accelerator. */
9771 oacc_finalize_reduction_data (tree clauses
, tree nthreads
,
9772 gimple_seq
*stmt_seqp
, omp_context
*ctx
)
9774 tree c
, x
, var
, array
, loop_header
, loop_body
, loop_exit
, type
;
9779 let var = the original reduction variable
9780 let array = reduction variable array
9782 for (i = 0; i < nthreads; i++)
9786 loop_header
= create_artificial_label (UNKNOWN_LOCATION
);
9787 loop_body
= create_artificial_label (UNKNOWN_LOCATION
);
9788 loop_exit
= create_artificial_label (UNKNOWN_LOCATION
);
9790 /* Create and initialize an index variable. */
9791 tree ix
= create_tmp_var (sizetype
);
9792 gimplify_assign (ix
, fold_build1 (NOP_EXPR
, sizetype
, integer_zero_node
),
9795 /* Insert the loop header label here. */
9796 gimple_seq_add_stmt (stmt_seqp
, gimple_build_label (loop_header
));
9798 /* Exit loop if ix >= nthreads. */
9799 x
= create_tmp_var (sizetype
);
9800 gimplify_assign (x
, fold_build1 (NOP_EXPR
, sizetype
, nthreads
), stmt_seqp
);
9801 stmt
= gimple_build_cond (GE_EXPR
, ix
, x
, loop_exit
, loop_body
);
9802 gimple_seq_add_stmt (stmt_seqp
, stmt
);
9804 /* Insert the loop body label here. */
9805 gimple_seq_add_stmt (stmt_seqp
, gimple_build_label (loop_body
));
9807 /* Collapse each reduction array, one element at a time. */
9808 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
9810 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
9813 tree_code reduction_code
= OMP_CLAUSE_REDUCTION_CODE (c
);
9815 /* reduction(-:var) sums up the partial results, so it acts
9816 identically to reduction(+:var). */
9817 if (reduction_code
== MINUS_EXPR
)
9818 reduction_code
= PLUS_EXPR
;
9820 /* Set up reduction variable var. */
9821 var
= OMP_CLAUSE_DECL (c
);
9822 type
= get_base_type (var
);
9823 array
= lookup_oacc_reduction (oacc_get_reduction_array_id
9824 (OMP_CLAUSE_DECL (c
)), ctx
);
9826 /* Calculate the array offset. */
9827 tree offset
= create_tmp_var (sizetype
);
9828 gimplify_assign (offset
, TYPE_SIZE_UNIT (type
), stmt_seqp
);
9829 stmt
= gimple_build_assign (offset
, MULT_EXPR
, offset
, ix
);
9830 gimple_seq_add_stmt (stmt_seqp
, stmt
);
9832 tree ptr
= create_tmp_var (TREE_TYPE (array
));
9833 stmt
= gimple_build_assign (ptr
, POINTER_PLUS_EXPR
, array
, offset
);
9834 gimple_seq_add_stmt (stmt_seqp
, stmt
);
9836 /* Extract array[ix] into mem. */
9837 tree mem
= create_tmp_var (type
);
9838 gimplify_assign (mem
, build_simple_mem_ref (ptr
), stmt_seqp
);
9840 /* Find the original reduction variable. */
9841 if (is_reference (var
))
9842 var
= build_simple_mem_ref (var
);
9844 tree t
= create_tmp_var (type
);
9846 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, t
, var
);
9847 gimplify_and_add (unshare_expr(x
), stmt_seqp
);
9849 /* var = var op mem */
9850 switch (OMP_CLAUSE_REDUCTION_CODE (c
))
9852 case TRUTH_ANDIF_EXPR
:
9853 case TRUTH_ORIF_EXPR
:
9854 t
= fold_build2 (OMP_CLAUSE_REDUCTION_CODE (c
), integer_type_node
,
9856 gimplify_and_add (t
, stmt_seqp
);
9859 /* The lhs isn't a gimple_reg when var is COMPLEX_TYPE. */
9860 oacc_gimple_assign (t
, OMP_CLAUSE_REDUCTION_CODE (c
), mem
,
9864 t
= fold_build1 (NOP_EXPR
, TREE_TYPE (var
), t
);
9865 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, var
, t
);
9866 gimplify_and_add (unshare_expr(x
), stmt_seqp
);
9869 /* Increment the induction variable. */
9870 tree one
= fold_build1 (NOP_EXPR
, sizetype
, integer_one_node
);
9871 stmt
= gimple_build_assign (ix
, PLUS_EXPR
, ix
, one
);
9872 gimple_seq_add_stmt (stmt_seqp
, stmt
);
9874 /* Go back to the top of the loop. */
9875 gimple_seq_add_stmt (stmt_seqp
, gimple_build_goto (loop_header
));
9877 /* Place the loop exit label here. */
9878 gimple_seq_add_stmt (stmt_seqp
, gimple_build_label (loop_exit
));
9881 /* Scan through all of the gimple stmts searching for an OMP_FOR_EXPR, and
9882 scan that for reductions. */
9885 oacc_process_reduction_data (gimple_seq
*body
, gimple_seq
*in_stmt_seqp
,
9886 gimple_seq
*out_stmt_seqp
, omp_context
*ctx
)
9888 gimple_stmt_iterator gsi
;
9889 gimple_seq inner
= NULL
;
9891 /* A collapse clause may have inserted a new bind block. */
9892 gsi
= gsi_start (*body
);
9893 while (!gsi_end_p (gsi
))
9895 gimple stmt
= gsi_stmt (gsi
);
9896 if (gbind
*bind_stmt
= dyn_cast
<gbind
*> (stmt
))
9898 inner
= gimple_bind_body (bind_stmt
);
9900 gsi
= gsi_start (*body
);
9902 else if (dyn_cast
<gomp_for
*> (stmt
))
9908 for (gsi
= gsi_start (*body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
9910 tree clauses
, nthreads
, t
, c
, acc_device
, acc_device_host
, call
,
9912 bool reduction_found
= false;
9914 gimple stmt
= gsi_stmt (gsi
);
9916 switch (gimple_code (stmt
))
9918 case GIMPLE_OMP_FOR
:
9919 clauses
= gimple_omp_for_clauses (stmt
);
9921 /* Search for a reduction clause. */
9922 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
9923 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
9925 reduction_found
= true;
9929 if (!reduction_found
)
9932 ctx
= maybe_lookup_ctx (stmt
);
9935 /* Extract the number of threads. */
9936 nthreads
= create_tmp_var (sizetype
);
9937 t
= oacc_max_threads (ctx
);
9938 gimplify_assign (nthreads
, t
, in_stmt_seqp
);
9940 /* Determine if this is kernel will be executed on the host. */
9941 call
= builtin_decl_explicit (BUILT_IN_ACC_GET_DEVICE_TYPE
);
9942 acc_device
= create_tmp_var (integer_type_node
, ".acc_device_type");
9943 stmt
= gimple_build_call (call
, 0);
9944 gimple_call_set_lhs (stmt
, acc_device
);
9945 gimple_seq_add_stmt (in_stmt_seqp
, stmt
);
9947 /* Set nthreads = 1 for ACC_DEVICE_TYPE=host. */
9948 acc_device_host
= create_tmp_var (integer_type_node
,
9949 ".acc_device_host");
9950 gimplify_assign (acc_device_host
,
9951 build_int_cst (integer_type_node
,
9955 enter
= create_artificial_label (UNKNOWN_LOCATION
);
9956 exit
= create_artificial_label (UNKNOWN_LOCATION
);
9958 stmt
= gimple_build_cond (EQ_EXPR
, acc_device
, acc_device_host
,
9960 gimple_seq_add_stmt (in_stmt_seqp
, stmt
);
9961 gimple_seq_add_stmt (in_stmt_seqp
, gimple_build_label (enter
));
9962 gimplify_assign (nthreads
, fold_build1 (NOP_EXPR
, sizetype
,
9965 gimple_seq_add_stmt (in_stmt_seqp
, gimple_build_label (exit
));
9967 /* Also, set nthreads = 1 for ACC_DEVICE_TYPE=host_nonshm. */
9968 gimplify_assign (acc_device_host
,
9969 build_int_cst (integer_type_node
,
9970 GOMP_DEVICE_HOST_NONSHM
),
9973 enter
= create_artificial_label (UNKNOWN_LOCATION
);
9974 exit
= create_artificial_label (UNKNOWN_LOCATION
);
9976 stmt
= gimple_build_cond (EQ_EXPR
, acc_device
, acc_device_host
,
9978 gimple_seq_add_stmt (in_stmt_seqp
, stmt
);
9979 gimple_seq_add_stmt (in_stmt_seqp
, gimple_build_label (enter
));
9980 gimplify_assign (nthreads
, fold_build1 (NOP_EXPR
, sizetype
,
9983 gimple_seq_add_stmt (in_stmt_seqp
, gimple_build_label (exit
));
9985 oacc_initialize_reduction_data (clauses
, nthreads
, in_stmt_seqp
,
9987 oacc_finalize_reduction_data (clauses
, nthreads
, out_stmt_seqp
, ctx
);
9990 // Scan for other directives which support reduction here.
9996 /* If ctx is a worksharing context inside of a cancellable parallel
9997 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
9998 and conditional branch to parallel's cancel_label to handle
9999 cancellation in the implicit barrier. */
10002 maybe_add_implicit_barrier_cancel (omp_context
*ctx
, gimple_seq
*body
)
10004 gimple omp_return
= gimple_seq_last_stmt (*body
);
10005 gcc_assert (gimple_code (omp_return
) == GIMPLE_OMP_RETURN
);
10006 if (gimple_omp_return_nowait_p (omp_return
))
10009 && gimple_code (ctx
->outer
->stmt
) == GIMPLE_OMP_PARALLEL
10010 && ctx
->outer
->cancellable
)
10012 tree fndecl
= builtin_decl_explicit (BUILT_IN_GOMP_CANCEL
);
10013 tree c_bool_type
= TREE_TYPE (TREE_TYPE (fndecl
));
10014 tree lhs
= create_tmp_var (c_bool_type
);
10015 gimple_omp_return_set_lhs (omp_return
, lhs
);
10016 tree fallthru_label
= create_artificial_label (UNKNOWN_LOCATION
);
10017 gimple g
= gimple_build_cond (NE_EXPR
, lhs
,
10018 fold_convert (c_bool_type
,
10019 boolean_false_node
),
10020 ctx
->outer
->cancel_label
, fallthru_label
);
10021 gimple_seq_add_stmt (body
, g
);
10022 gimple_seq_add_stmt (body
, gimple_build_label (fallthru_label
));
10026 /* Lower the OpenMP sections directive in the current statement in GSI_P.
10027 CTX is the enclosing OMP context for the current statement. */
10030 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
10032 tree block
, control
;
10033 gimple_stmt_iterator tgsi
;
10034 gomp_sections
*stmt
;
10036 gbind
*new_stmt
, *bind
;
10037 gimple_seq ilist
, dlist
, olist
, new_body
;
10039 stmt
= as_a
<gomp_sections
*> (gsi_stmt (*gsi_p
));
10041 push_gimplify_context ();
10045 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
10046 &ilist
, &dlist
, ctx
, NULL
);
10048 new_body
= gimple_omp_body (stmt
);
10049 gimple_omp_set_body (stmt
, NULL
);
10050 tgsi
= gsi_start (new_body
);
10051 for (; !gsi_end_p (tgsi
); gsi_next (&tgsi
))
10056 sec_start
= gsi_stmt (tgsi
);
10057 sctx
= maybe_lookup_ctx (sec_start
);
10060 lower_omp (gimple_omp_body_ptr (sec_start
), sctx
);
10061 gsi_insert_seq_after (&tgsi
, gimple_omp_body (sec_start
),
10062 GSI_CONTINUE_LINKING
);
10063 gimple_omp_set_body (sec_start
, NULL
);
10065 if (gsi_one_before_end_p (tgsi
))
10067 gimple_seq l
= NULL
;
10068 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
10070 gsi_insert_seq_after (&tgsi
, l
, GSI_CONTINUE_LINKING
);
10071 gimple_omp_section_set_last (sec_start
);
10074 gsi_insert_after (&tgsi
, gimple_build_omp_return (false),
10075 GSI_CONTINUE_LINKING
);
10078 block
= make_node (BLOCK
);
10079 bind
= gimple_build_bind (NULL
, new_body
, block
);
10082 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
10084 block
= make_node (BLOCK
);
10085 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
10086 gsi_replace (gsi_p
, new_stmt
, true);
10088 pop_gimplify_context (new_stmt
);
10089 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
10090 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
10091 if (BLOCK_VARS (block
))
10092 TREE_USED (block
) = 1;
10095 gimple_seq_add_seq (&new_body
, ilist
);
10096 gimple_seq_add_stmt (&new_body
, stmt
);
10097 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
10098 gimple_seq_add_stmt (&new_body
, bind
);
10100 control
= create_tmp_var (unsigned_type_node
, ".section");
10101 t
= gimple_build_omp_continue (control
, control
);
10102 gimple_omp_sections_set_control (stmt
, control
);
10103 gimple_seq_add_stmt (&new_body
, t
);
10105 gimple_seq_add_seq (&new_body
, olist
);
10106 if (ctx
->cancellable
)
10107 gimple_seq_add_stmt (&new_body
, gimple_build_label (ctx
->cancel_label
));
10108 gimple_seq_add_seq (&new_body
, dlist
);
10110 new_body
= maybe_catch_exception (new_body
);
10112 t
= gimple_build_omp_return
10113 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
10114 OMP_CLAUSE_NOWAIT
));
10115 gimple_seq_add_stmt (&new_body
, t
);
10116 maybe_add_implicit_barrier_cancel (ctx
, &new_body
);
10118 gimple_bind_set_body (new_stmt
, new_body
);
10122 /* A subroutine of lower_omp_single. Expand the simple form of
10123 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
10125 if (GOMP_single_start ())
10127 [ GOMP_barrier (); ] -> unless 'nowait' is present.
10129 FIXME. It may be better to delay expanding the logic of this until
10130 pass_expand_omp. The expanded logic may make the job more difficult
10131 to a synchronization analysis pass. */
10134 lower_omp_single_simple (gomp_single
*single_stmt
, gimple_seq
*pre_p
)
10136 location_t loc
= gimple_location (single_stmt
);
10137 tree tlabel
= create_artificial_label (loc
);
10138 tree flabel
= create_artificial_label (loc
);
10142 decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START
);
10143 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)));
10144 call
= gimple_build_call (decl
, 0);
10145 gimple_call_set_lhs (call
, lhs
);
10146 gimple_seq_add_stmt (pre_p
, call
);
10148 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
10149 fold_convert_loc (loc
, TREE_TYPE (lhs
),
10150 boolean_true_node
),
10152 gimple_seq_add_stmt (pre_p
, cond
);
10153 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
10154 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
10155 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
10159 /* A subroutine of lower_omp_single. Expand the simple form of
10160 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
10162 #pragma omp single copyprivate (a, b, c)
10164 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
10167 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
10173 GOMP_single_copy_end (©out);
10184 FIXME. It may be better to delay expanding the logic of this until
10185 pass_expand_omp. The expanded logic may make the job more difficult
10186 to a synchronization analysis pass. */
10189 lower_omp_single_copy (gomp_single
*single_stmt
, gimple_seq
*pre_p
,
10192 tree ptr_type
, t
, l0
, l1
, l2
, bfn_decl
;
10193 gimple_seq copyin_seq
;
10194 location_t loc
= gimple_location (single_stmt
);
10196 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
10198 ptr_type
= build_pointer_type (ctx
->record_type
);
10199 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
10201 l0
= create_artificial_label (loc
);
10202 l1
= create_artificial_label (loc
);
10203 l2
= create_artificial_label (loc
);
10205 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START
);
10206 t
= build_call_expr_loc (loc
, bfn_decl
, 0);
10207 t
= fold_convert_loc (loc
, ptr_type
, t
);
10208 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
10210 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
10211 build_int_cst (ptr_type
, 0));
10212 t
= build3 (COND_EXPR
, void_type_node
, t
,
10213 build_and_jump (&l0
), build_and_jump (&l1
));
10214 gimplify_and_add (t
, pre_p
);
10216 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
10218 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
10221 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
10224 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
10225 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END
);
10226 t
= build_call_expr_loc (loc
, bfn_decl
, 1, t
);
10227 gimplify_and_add (t
, pre_p
);
10229 t
= build_and_jump (&l2
);
10230 gimplify_and_add (t
, pre_p
);
10232 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
10234 gimple_seq_add_seq (pre_p
, copyin_seq
);
10236 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
10240 /* Expand code for an OpenMP single directive. */
10243 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
10247 gomp_single
*single_stmt
= as_a
<gomp_single
*> (gsi_stmt (*gsi_p
));
10249 gimple_seq bind_body
, bind_body_tail
= NULL
, dlist
;
10251 push_gimplify_context ();
10253 block
= make_node (BLOCK
);
10254 bind
= gimple_build_bind (NULL
, NULL
, block
);
10255 gsi_replace (gsi_p
, bind
, true);
10258 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
10259 &bind_body
, &dlist
, ctx
, NULL
);
10260 lower_omp (gimple_omp_body_ptr (single_stmt
), ctx
);
10262 gimple_seq_add_stmt (&bind_body
, single_stmt
);
10264 if (ctx
->record_type
)
10265 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
10267 lower_omp_single_simple (single_stmt
, &bind_body
);
10269 gimple_omp_set_body (single_stmt
, NULL
);
10271 gimple_seq_add_seq (&bind_body
, dlist
);
10273 bind_body
= maybe_catch_exception (bind_body
);
10275 t
= gimple_build_omp_return
10276 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
10277 OMP_CLAUSE_NOWAIT
));
10278 gimple_seq_add_stmt (&bind_body_tail
, t
);
10279 maybe_add_implicit_barrier_cancel (ctx
, &bind_body_tail
);
10280 if (ctx
->record_type
)
10282 gimple_stmt_iterator gsi
= gsi_start (bind_body_tail
);
10283 tree clobber
= build_constructor (ctx
->record_type
, NULL
);
10284 TREE_THIS_VOLATILE (clobber
) = 1;
10285 gsi_insert_after (&gsi
, gimple_build_assign (ctx
->sender_decl
,
10286 clobber
), GSI_SAME_STMT
);
10288 gimple_seq_add_seq (&bind_body
, bind_body_tail
);
10289 gimple_bind_set_body (bind
, bind_body
);
10291 pop_gimplify_context (bind
);
10293 gimple_bind_append_vars (bind
, ctx
->block_vars
);
10294 BLOCK_VARS (block
) = ctx
->block_vars
;
10295 if (BLOCK_VARS (block
))
10296 TREE_USED (block
) = 1;
10300 /* Expand code for an OpenMP master directive. */
10303 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
10305 tree block
, lab
= NULL
, x
, bfn_decl
;
10306 gimple stmt
= gsi_stmt (*gsi_p
);
10308 location_t loc
= gimple_location (stmt
);
10311 push_gimplify_context ();
10313 block
= make_node (BLOCK
);
10314 bind
= gimple_build_bind (NULL
, NULL
, block
);
10315 gsi_replace (gsi_p
, bind
, true);
10316 gimple_bind_add_stmt (bind
, stmt
);
10318 bfn_decl
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
10319 x
= build_call_expr_loc (loc
, bfn_decl
, 0);
10320 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
10321 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
10323 gimplify_and_add (x
, &tseq
);
10324 gimple_bind_add_seq (bind
, tseq
);
10326 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
10327 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
10328 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
10329 gimple_omp_set_body (stmt
, NULL
);
10331 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
10333 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
10335 pop_gimplify_context (bind
);
10337 gimple_bind_append_vars (bind
, ctx
->block_vars
);
10338 BLOCK_VARS (block
) = ctx
->block_vars
;
10342 /* Expand code for an OpenMP taskgroup directive. */
10345 lower_omp_taskgroup (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
10347 gimple stmt
= gsi_stmt (*gsi_p
);
10350 tree block
= make_node (BLOCK
);
10352 bind
= gimple_build_bind (NULL
, NULL
, block
);
10353 gsi_replace (gsi_p
, bind
, true);
10354 gimple_bind_add_stmt (bind
, stmt
);
10356 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START
),
10358 gimple_bind_add_stmt (bind
, x
);
10360 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
10361 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
10362 gimple_omp_set_body (stmt
, NULL
);
10364 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
10366 gimple_bind_append_vars (bind
, ctx
->block_vars
);
10367 BLOCK_VARS (block
) = ctx
->block_vars
;
10371 /* Expand code for an OpenMP ordered directive. */
10374 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
10377 gimple stmt
= gsi_stmt (*gsi_p
);
10381 push_gimplify_context ();
10383 block
= make_node (BLOCK
);
10384 bind
= gimple_build_bind (NULL
, NULL
, block
);
10385 gsi_replace (gsi_p
, bind
, true);
10386 gimple_bind_add_stmt (bind
, stmt
);
10388 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START
),
10390 gimple_bind_add_stmt (bind
, x
);
10392 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
10393 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
10394 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
10395 gimple_omp_set_body (stmt
, NULL
);
10397 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END
), 0);
10398 gimple_bind_add_stmt (bind
, x
);
10400 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
10402 pop_gimplify_context (bind
);
10404 gimple_bind_append_vars (bind
, ctx
->block_vars
);
10405 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
10409 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
10410 substitution of a couple of function calls. But in the NAMED case,
10411 requires that languages coordinate a symbol name. It is therefore
10412 best put here in common code. */
10414 static GTY(()) hash_map
<tree
, tree
> *critical_name_mutexes
;
10417 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
10420 tree name
, lock
, unlock
;
10421 gomp_critical
*stmt
= as_a
<gomp_critical
*> (gsi_stmt (*gsi_p
));
10423 location_t loc
= gimple_location (stmt
);
10426 name
= gimple_omp_critical_name (stmt
);
10431 if (!critical_name_mutexes
)
10432 critical_name_mutexes
= hash_map
<tree
, tree
>::create_ggc (10);
10434 tree
*n
= critical_name_mutexes
->get (name
);
10439 decl
= create_tmp_var_raw (ptr_type_node
);
10441 new_str
= ACONCAT ((".gomp_critical_user_",
10442 IDENTIFIER_POINTER (name
), NULL
));
10443 DECL_NAME (decl
) = get_identifier (new_str
);
10444 TREE_PUBLIC (decl
) = 1;
10445 TREE_STATIC (decl
) = 1;
10446 DECL_COMMON (decl
) = 1;
10447 DECL_ARTIFICIAL (decl
) = 1;
10448 DECL_IGNORED_P (decl
) = 1;
10450 varpool_node::finalize_decl (decl
);
10452 critical_name_mutexes
->put (name
, decl
);
10457 /* If '#pragma omp critical' is inside offloaded region or
10458 inside function marked as offloadable, the symbol must be
10459 marked as offloadable too. */
10461 if (cgraph_node::get (current_function_decl
)->offloadable
)
10462 varpool_node::get_create (decl
)->offloadable
= 1;
10464 for (octx
= ctx
->outer
; octx
; octx
= octx
->outer
)
10465 if (is_gimple_omp_offloaded (octx
->stmt
))
10467 varpool_node::get_create (decl
)->offloadable
= 1;
10471 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START
);
10472 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
10474 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END
);
10475 unlock
= build_call_expr_loc (loc
, unlock
, 1,
10476 build_fold_addr_expr_loc (loc
, decl
));
10480 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START
);
10481 lock
= build_call_expr_loc (loc
, lock
, 0);
10483 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END
);
10484 unlock
= build_call_expr_loc (loc
, unlock
, 0);
10487 push_gimplify_context ();
10489 block
= make_node (BLOCK
);
10490 bind
= gimple_build_bind (NULL
, NULL
, block
);
10491 gsi_replace (gsi_p
, bind
, true);
10492 gimple_bind_add_stmt (bind
, stmt
);
10494 tbody
= gimple_bind_body (bind
);
10495 gimplify_and_add (lock
, &tbody
);
10496 gimple_bind_set_body (bind
, tbody
);
10498 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
10499 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
10500 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
10501 gimple_omp_set_body (stmt
, NULL
);
10503 tbody
= gimple_bind_body (bind
);
10504 gimplify_and_add (unlock
, &tbody
);
10505 gimple_bind_set_body (bind
, tbody
);
10507 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
10509 pop_gimplify_context (bind
);
10510 gimple_bind_append_vars (bind
, ctx
->block_vars
);
10511 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
10515 /* A subroutine of lower_omp_for. Generate code to emit the predicate
10516 for a lastprivate clause. Given a loop control predicate of (V
10517 cond N2), we gate the clause on (!(V cond N2)). The lowered form
10518 is appended to *DLIST, iterator initialization is appended to
10522 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
10523 gimple_seq
*dlist
, struct omp_context
*ctx
)
10525 tree clauses
, cond
, vinit
;
10526 enum tree_code cond_code
;
10529 cond_code
= fd
->loop
.cond_code
;
10530 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
10532 /* When possible, use a strict equality expression. This can let VRP
10533 type optimizations deduce the value and remove a copy. */
10534 if (tree_fits_shwi_p (fd
->loop
.step
))
10536 HOST_WIDE_INT step
= tree_to_shwi (fd
->loop
.step
);
10537 if (step
== 1 || step
== -1)
10538 cond_code
= EQ_EXPR
;
10541 tree n2
= fd
->loop
.n2
;
10542 if (fd
->collapse
> 1
10543 && TREE_CODE (n2
) != INTEGER_CST
10544 && gimple_omp_for_combined_into_p (fd
->for_stmt
)
10545 && gimple_code (ctx
->outer
->stmt
) == GIMPLE_OMP_FOR
)
10547 gomp_for
*gfor
= as_a
<gomp_for
*> (ctx
->outer
->stmt
);
10548 if (gimple_omp_for_kind (gfor
) == GF_OMP_FOR_KIND_FOR
)
10550 struct omp_for_data outer_fd
;
10551 extract_omp_for_data (gfor
, &outer_fd
, NULL
);
10552 n2
= fold_convert (TREE_TYPE (n2
), outer_fd
.loop
.n2
);
10555 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, n2
);
10557 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
10559 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
10560 if (!gimple_seq_empty_p (stmts
))
10562 gimple_seq_add_seq (&stmts
, *dlist
);
10565 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
10566 vinit
= fd
->loop
.n1
;
10567 if (cond_code
== EQ_EXPR
10568 && tree_fits_shwi_p (fd
->loop
.n2
)
10569 && ! integer_zerop (fd
->loop
.n2
))
10570 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
10572 vinit
= unshare_expr (vinit
);
10574 /* Initialize the iterator variable, so that threads that don't execute
10575 any iterations don't execute the lastprivate clauses by accident. */
10576 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
10581 /* Lower code for an OMP loop directive. */
10584 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
10586 tree
*rhs_p
, block
;
10587 struct omp_for_data fd
, *fdp
= NULL
;
10588 gomp_for
*stmt
= as_a
<gomp_for
*> (gsi_stmt (*gsi_p
));
10590 gimple_seq omp_for_body
, body
, dlist
;
10593 push_gimplify_context ();
10595 lower_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
10597 block
= make_node (BLOCK
);
10598 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
10599 /* Replace at gsi right away, so that 'stmt' is no member
10600 of a sequence anymore as we're going to add to to a different
10602 gsi_replace (gsi_p
, new_stmt
, true);
10604 /* Move declaration of temporaries in the loop body before we make
10606 omp_for_body
= gimple_omp_body (stmt
);
10607 if (!gimple_seq_empty_p (omp_for_body
)
10608 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
10611 = as_a
<gbind
*> (gimple_seq_first_stmt (omp_for_body
));
10612 tree vars
= gimple_bind_vars (inner_bind
);
10613 gimple_bind_append_vars (new_stmt
, vars
);
10614 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
10615 keep them on the inner_bind and it's block. */
10616 gimple_bind_set_vars (inner_bind
, NULL_TREE
);
10617 if (gimple_bind_block (inner_bind
))
10618 BLOCK_VARS (gimple_bind_block (inner_bind
)) = NULL_TREE
;
10621 if (gimple_omp_for_combined_into_p (stmt
))
10623 extract_omp_for_data (stmt
, &fd
, NULL
);
10626 /* We need two temporaries with fd.loop.v type (istart/iend)
10627 and then (fd.collapse - 1) temporaries with the same
10628 type for count2 ... countN-1 vars if not constant. */
10630 tree type
= fd
.iter_type
;
10631 if (fd
.collapse
> 1
10632 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
10633 count
+= fd
.collapse
- 1;
10634 bool parallel_for
= gimple_omp_for_kind (stmt
) == GF_OMP_FOR_KIND_FOR
;
10635 tree outerc
= NULL
, *pc
= gimple_omp_for_clauses_ptr (stmt
);
10636 tree clauses
= *pc
;
10639 = find_omp_clause (gimple_omp_parallel_clauses (ctx
->outer
->stmt
),
10640 OMP_CLAUSE__LOOPTEMP_
);
10641 for (i
= 0; i
< count
; i
++)
10646 gcc_assert (outerc
);
10647 temp
= lookup_decl (OMP_CLAUSE_DECL (outerc
), ctx
->outer
);
10648 outerc
= find_omp_clause (OMP_CLAUSE_CHAIN (outerc
),
10649 OMP_CLAUSE__LOOPTEMP_
);
10653 temp
= create_tmp_var (type
);
10654 insert_decl_map (&ctx
->outer
->cb
, temp
, temp
);
10656 *pc
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE__LOOPTEMP_
);
10657 OMP_CLAUSE_DECL (*pc
) = temp
;
10658 pc
= &OMP_CLAUSE_CHAIN (*pc
);
10663 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
10666 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
,
10668 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
10670 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
10672 /* Lower the header expressions. At this point, we can assume that
10673 the header is of the form:
10675 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
10677 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
10678 using the .omp_data_s mapping, if needed. */
10679 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
10681 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
10682 if (!is_gimple_min_invariant (*rhs_p
))
10683 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
10685 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
10686 if (!is_gimple_min_invariant (*rhs_p
))
10687 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
10689 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
10690 if (!is_gimple_min_invariant (*rhs_p
))
10691 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
10694 /* Once lowered, extract the bounds and clauses. */
10695 extract_omp_for_data (stmt
, &fd
, NULL
);
10697 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
10699 gimple_seq_add_stmt (&body
, stmt
);
10700 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
10702 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
10705 /* After the loop, add exit clauses. */
10706 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
10708 if (ctx
->cancellable
)
10709 gimple_seq_add_stmt (&body
, gimple_build_label (ctx
->cancel_label
));
10711 gimple_seq_add_seq (&body
, dlist
);
10713 body
= maybe_catch_exception (body
);
10715 /* Region exit marker goes at the end of the loop body. */
10716 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
10717 maybe_add_implicit_barrier_cancel (ctx
, &body
);
10718 pop_gimplify_context (new_stmt
);
10720 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
10721 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
10722 if (BLOCK_VARS (block
))
10723 TREE_USED (block
) = 1;
10725 gimple_bind_set_body (new_stmt
, body
);
10726 gimple_omp_set_body (stmt
, NULL
);
10727 gimple_omp_for_set_pre_body (stmt
, NULL
);
10730 /* Callback for walk_stmts. Check if the current statement only contains
10731 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
10734 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
10735 bool *handled_ops_p
,
10736 struct walk_stmt_info
*wi
)
10738 int *info
= (int *) wi
->info
;
10739 gimple stmt
= gsi_stmt (*gsi_p
);
10741 *handled_ops_p
= true;
10742 switch (gimple_code (stmt
))
10746 case GIMPLE_OMP_FOR
:
10747 case GIMPLE_OMP_SECTIONS
:
10748 *info
= *info
== 0 ? 1 : -1;
10757 struct omp_taskcopy_context
10759 /* This field must be at the beginning, as we do "inheritance": Some
10760 callback functions for tree-inline.c (e.g., omp_copy_decl)
10761 receive a copy_body_data pointer that is up-casted to an
10762 omp_context pointer. */
10768 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
10770 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
10772 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
10773 return create_tmp_var (TREE_TYPE (var
));
10779 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
10781 tree name
, new_fields
= NULL
, type
, f
;
10783 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
10784 name
= DECL_NAME (TYPE_NAME (orig_type
));
10785 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
10786 TYPE_DECL
, name
, type
);
10787 TYPE_NAME (type
) = name
;
10789 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
10791 tree new_f
= copy_node (f
);
10792 DECL_CONTEXT (new_f
) = type
;
10793 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
10794 TREE_CHAIN (new_f
) = new_fields
;
10795 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
10796 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
10797 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
10799 new_fields
= new_f
;
10800 tcctx
->cb
.decl_map
->put (f
, new_f
);
10802 TYPE_FIELDS (type
) = nreverse (new_fields
);
10803 layout_type (type
);
10807 /* Create task copyfn. */
10810 create_task_copyfn (gomp_task
*task_stmt
, omp_context
*ctx
)
10812 struct function
*child_cfun
;
10813 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
10814 tree record_type
, srecord_type
, bind
, list
;
10815 bool record_needs_remap
= false, srecord_needs_remap
= false;
10817 struct omp_taskcopy_context tcctx
;
10818 location_t loc
= gimple_location (task_stmt
);
10820 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
10821 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
10822 gcc_assert (child_cfun
->cfg
== NULL
);
10823 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
10825 /* Reset DECL_CONTEXT on function arguments. */
10826 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
10827 DECL_CONTEXT (t
) = child_fn
;
10829 /* Populate the function. */
10830 push_gimplify_context ();
10831 push_cfun (child_cfun
);
10833 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
10834 TREE_SIDE_EFFECTS (bind
) = 1;
10836 DECL_SAVED_TREE (child_fn
) = bind
;
10837 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
10839 /* Remap src and dst argument types if needed. */
10840 record_type
= ctx
->record_type
;
10841 srecord_type
= ctx
->srecord_type
;
10842 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
10843 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
10845 record_needs_remap
= true;
10848 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= DECL_CHAIN (f
))
10849 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
10851 srecord_needs_remap
= true;
10855 if (record_needs_remap
|| srecord_needs_remap
)
10857 memset (&tcctx
, '\0', sizeof (tcctx
));
10858 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
10859 tcctx
.cb
.dst_fn
= child_fn
;
10860 tcctx
.cb
.src_node
= cgraph_node::get (tcctx
.cb
.src_fn
);
10861 gcc_checking_assert (tcctx
.cb
.src_node
);
10862 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
10863 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
10864 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
10865 tcctx
.cb
.eh_lp_nr
= 0;
10866 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
10867 tcctx
.cb
.decl_map
= new hash_map
<tree
, tree
>;
10870 if (record_needs_remap
)
10871 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
10872 if (srecord_needs_remap
)
10873 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
10876 tcctx
.cb
.decl_map
= NULL
;
10878 arg
= DECL_ARGUMENTS (child_fn
);
10879 TREE_TYPE (arg
) = build_pointer_type (record_type
);
10880 sarg
= DECL_CHAIN (arg
);
10881 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
10883 /* First pass: initialize temporaries used in record_type and srecord_type
10884 sizes and field offsets. */
10885 if (tcctx
.cb
.decl_map
)
10886 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
10887 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
10891 decl
= OMP_CLAUSE_DECL (c
);
10892 p
= tcctx
.cb
.decl_map
->get (decl
);
10895 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
10896 sf
= (tree
) n
->value
;
10897 sf
= *tcctx
.cb
.decl_map
->get (sf
);
10898 src
= build_simple_mem_ref_loc (loc
, sarg
);
10899 src
= omp_build_component_ref (src
, sf
);
10900 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
10901 append_to_statement_list (t
, &list
);
10904 /* Second pass: copy shared var pointers and copy construct non-VLA
10905 firstprivate vars. */
10906 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
10907 switch (OMP_CLAUSE_CODE (c
))
10909 case OMP_CLAUSE_SHARED
:
10910 decl
= OMP_CLAUSE_DECL (c
);
10911 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
10914 f
= (tree
) n
->value
;
10915 if (tcctx
.cb
.decl_map
)
10916 f
= *tcctx
.cb
.decl_map
->get (f
);
10917 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
10918 sf
= (tree
) n
->value
;
10919 if (tcctx
.cb
.decl_map
)
10920 sf
= *tcctx
.cb
.decl_map
->get (sf
);
10921 src
= build_simple_mem_ref_loc (loc
, sarg
);
10922 src
= omp_build_component_ref (src
, sf
);
10923 dst
= build_simple_mem_ref_loc (loc
, arg
);
10924 dst
= omp_build_component_ref (dst
, f
);
10925 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
10926 append_to_statement_list (t
, &list
);
10928 case OMP_CLAUSE_FIRSTPRIVATE
:
10929 decl
= OMP_CLAUSE_DECL (c
);
10930 if (is_variable_sized (decl
))
10932 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
10935 f
= (tree
) n
->value
;
10936 if (tcctx
.cb
.decl_map
)
10937 f
= *tcctx
.cb
.decl_map
->get (f
);
10938 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
10941 sf
= (tree
) n
->value
;
10942 if (tcctx
.cb
.decl_map
)
10943 sf
= *tcctx
.cb
.decl_map
->get (sf
);
10944 src
= build_simple_mem_ref_loc (loc
, sarg
);
10945 src
= omp_build_component_ref (src
, sf
);
10946 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
10947 src
= build_simple_mem_ref_loc (loc
, src
);
10951 dst
= build_simple_mem_ref_loc (loc
, arg
);
10952 dst
= omp_build_component_ref (dst
, f
);
10953 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
10954 append_to_statement_list (t
, &list
);
10956 case OMP_CLAUSE_PRIVATE
:
10957 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
10959 decl
= OMP_CLAUSE_DECL (c
);
10960 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
10961 f
= (tree
) n
->value
;
10962 if (tcctx
.cb
.decl_map
)
10963 f
= *tcctx
.cb
.decl_map
->get (f
);
10964 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
10967 sf
= (tree
) n
->value
;
10968 if (tcctx
.cb
.decl_map
)
10969 sf
= *tcctx
.cb
.decl_map
->get (sf
);
10970 src
= build_simple_mem_ref_loc (loc
, sarg
);
10971 src
= omp_build_component_ref (src
, sf
);
10972 if (use_pointer_for_field (decl
, NULL
))
10973 src
= build_simple_mem_ref_loc (loc
, src
);
10977 dst
= build_simple_mem_ref_loc (loc
, arg
);
10978 dst
= omp_build_component_ref (dst
, f
);
10979 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
10980 append_to_statement_list (t
, &list
);
10986 /* Last pass: handle VLA firstprivates. */
10987 if (tcctx
.cb
.decl_map
)
10988 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
10989 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
10993 decl
= OMP_CLAUSE_DECL (c
);
10994 if (!is_variable_sized (decl
))
10996 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
10999 f
= (tree
) n
->value
;
11000 f
= *tcctx
.cb
.decl_map
->get (f
);
11001 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
11002 ind
= DECL_VALUE_EXPR (decl
);
11003 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
11004 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
11005 n
= splay_tree_lookup (ctx
->sfield_map
,
11006 (splay_tree_key
) TREE_OPERAND (ind
, 0));
11007 sf
= (tree
) n
->value
;
11008 sf
= *tcctx
.cb
.decl_map
->get (sf
);
11009 src
= build_simple_mem_ref_loc (loc
, sarg
);
11010 src
= omp_build_component_ref (src
, sf
);
11011 src
= build_simple_mem_ref_loc (loc
, src
);
11012 dst
= build_simple_mem_ref_loc (loc
, arg
);
11013 dst
= omp_build_component_ref (dst
, f
);
11014 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
11015 append_to_statement_list (t
, &list
);
11016 n
= splay_tree_lookup (ctx
->field_map
,
11017 (splay_tree_key
) TREE_OPERAND (ind
, 0));
11018 df
= (tree
) n
->value
;
11019 df
= *tcctx
.cb
.decl_map
->get (df
);
11020 ptr
= build_simple_mem_ref_loc (loc
, arg
);
11021 ptr
= omp_build_component_ref (ptr
, df
);
11022 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
11023 build_fold_addr_expr_loc (loc
, dst
));
11024 append_to_statement_list (t
, &list
);
11027 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
11028 append_to_statement_list (t
, &list
);
11030 if (tcctx
.cb
.decl_map
)
11031 delete tcctx
.cb
.decl_map
;
11032 pop_gimplify_context (NULL
);
11033 BIND_EXPR_BODY (bind
) = list
;
11038 lower_depend_clauses (gimple stmt
, gimple_seq
*iseq
, gimple_seq
*oseq
)
11042 size_t n_in
= 0, n_out
= 0, idx
= 2, i
;
11044 clauses
= find_omp_clause (gimple_omp_task_clauses (stmt
),
11045 OMP_CLAUSE_DEPEND
);
11046 gcc_assert (clauses
);
11047 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
11048 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_DEPEND
)
11049 switch (OMP_CLAUSE_DEPEND_KIND (c
))
11051 case OMP_CLAUSE_DEPEND_IN
:
11054 case OMP_CLAUSE_DEPEND_OUT
:
11055 case OMP_CLAUSE_DEPEND_INOUT
:
11059 gcc_unreachable ();
11061 tree type
= build_array_type_nelts (ptr_type_node
, n_in
+ n_out
+ 2);
11062 tree array
= create_tmp_var (type
);
11063 tree r
= build4 (ARRAY_REF
, ptr_type_node
, array
, size_int (0), NULL_TREE
,
11065 g
= gimple_build_assign (r
, build_int_cst (ptr_type_node
, n_in
+ n_out
));
11066 gimple_seq_add_stmt (iseq
, g
);
11067 r
= build4 (ARRAY_REF
, ptr_type_node
, array
, size_int (1), NULL_TREE
,
11069 g
= gimple_build_assign (r
, build_int_cst (ptr_type_node
, n_out
));
11070 gimple_seq_add_stmt (iseq
, g
);
11071 for (i
= 0; i
< 2; i
++)
11073 if ((i
? n_in
: n_out
) == 0)
11075 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
11076 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_DEPEND
11077 && ((OMP_CLAUSE_DEPEND_KIND (c
) != OMP_CLAUSE_DEPEND_IN
) ^ i
))
11079 tree t
= OMP_CLAUSE_DECL (c
);
11080 t
= fold_convert (ptr_type_node
, t
);
11081 gimplify_expr (&t
, iseq
, NULL
, is_gimple_val
, fb_rvalue
);
11082 r
= build4 (ARRAY_REF
, ptr_type_node
, array
, size_int (idx
++),
11083 NULL_TREE
, NULL_TREE
);
11084 g
= gimple_build_assign (r
, t
);
11085 gimple_seq_add_stmt (iseq
, g
);
11088 tree
*p
= gimple_omp_task_clauses_ptr (stmt
);
11089 c
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE_DEPEND
);
11090 OMP_CLAUSE_DECL (c
) = build_fold_addr_expr (array
);
11091 OMP_CLAUSE_CHAIN (c
) = *p
;
11093 tree clobber
= build_constructor (type
, NULL
);
11094 TREE_THIS_VOLATILE (clobber
) = 1;
11095 g
= gimple_build_assign (array
, clobber
);
11096 gimple_seq_add_stmt (oseq
, g
);
11099 /* Lower the OpenMP parallel or task directive in the current statement
11100 in GSI_P. CTX holds context information for the directive. */
11103 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
11107 gimple stmt
= gsi_stmt (*gsi_p
);
11108 gbind
*par_bind
, *bind
, *dep_bind
= NULL
;
11109 gimple_seq par_body
, olist
, ilist
, par_olist
, par_rlist
, par_ilist
, new_body
;
11110 location_t loc
= gimple_location (stmt
);
11112 clauses
= gimple_omp_taskreg_clauses (stmt
);
11114 = as_a
<gbind
*> (gimple_seq_first_stmt (gimple_omp_body (stmt
)));
11115 par_body
= gimple_bind_body (par_bind
);
11116 child_fn
= ctx
->cb
.dst_fn
;
11117 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
11118 && !gimple_omp_parallel_combined_p (stmt
))
11120 struct walk_stmt_info wi
;
11123 memset (&wi
, 0, sizeof (wi
));
11125 wi
.val_only
= true;
11126 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
11128 gimple_omp_parallel_set_combined_p (stmt
, true);
11130 gimple_seq dep_ilist
= NULL
;
11131 gimple_seq dep_olist
= NULL
;
11132 if (gimple_code (stmt
) == GIMPLE_OMP_TASK
11133 && find_omp_clause (clauses
, OMP_CLAUSE_DEPEND
))
11135 push_gimplify_context ();
11136 dep_bind
= gimple_build_bind (NULL
, NULL
, make_node (BLOCK
));
11137 lower_depend_clauses (stmt
, &dep_ilist
, &dep_olist
);
11140 if (ctx
->srecord_type
)
11141 create_task_copyfn (as_a
<gomp_task
*> (stmt
), ctx
);
11143 push_gimplify_context ();
11148 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
, NULL
);
11149 lower_omp (&par_body
, ctx
);
11150 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
11151 lower_reduction_clauses (clauses
, &par_rlist
, ctx
);
11153 /* Declare all the variables created by mapping and the variables
11154 declared in the scope of the parallel body. */
11155 record_vars_into (ctx
->block_vars
, child_fn
);
11156 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
11158 if (ctx
->record_type
)
11161 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
11162 : ctx
->record_type
, ".omp_data_o");
11163 DECL_NAMELESS (ctx
->sender_decl
) = 1;
11164 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
11165 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
11170 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
11171 lower_send_shared_vars (&ilist
, &olist
, ctx
);
11173 if (ctx
->record_type
)
11175 tree clobber
= build_constructor (TREE_TYPE (ctx
->sender_decl
), NULL
);
11176 TREE_THIS_VOLATILE (clobber
) = 1;
11177 gimple_seq_add_stmt (&olist
, gimple_build_assign (ctx
->sender_decl
,
11181 /* Once all the expansions are done, sequence all the different
11182 fragments inside gimple_omp_body. */
11186 if (ctx
->record_type
)
11188 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
11189 /* fixup_child_record_type might have changed receiver_decl's type. */
11190 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
11191 gimple_seq_add_stmt (&new_body
,
11192 gimple_build_assign (ctx
->receiver_decl
, t
));
11195 gimple_seq_add_seq (&new_body
, par_ilist
);
11196 gimple_seq_add_seq (&new_body
, par_body
);
11197 gimple_seq_add_seq (&new_body
, par_rlist
);
11198 if (ctx
->cancellable
)
11199 gimple_seq_add_stmt (&new_body
, gimple_build_label (ctx
->cancel_label
));
11200 gimple_seq_add_seq (&new_body
, par_olist
);
11201 new_body
= maybe_catch_exception (new_body
);
11202 if (gimple_code (stmt
) == GIMPLE_OMP_TASK
)
11203 gimple_seq_add_stmt (&new_body
,
11204 gimple_build_omp_continue (integer_zero_node
,
11205 integer_zero_node
));
11206 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
11207 gimple_omp_set_body (stmt
, new_body
);
11209 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
11210 gsi_replace (gsi_p
, dep_bind
? dep_bind
: bind
, true);
11211 gimple_bind_add_seq (bind
, ilist
);
11212 gimple_bind_add_stmt (bind
, stmt
);
11213 gimple_bind_add_seq (bind
, olist
);
11215 pop_gimplify_context (NULL
);
11219 gimple_bind_add_seq (dep_bind
, dep_ilist
);
11220 gimple_bind_add_stmt (dep_bind
, bind
);
11221 gimple_bind_add_seq (dep_bind
, dep_olist
);
11222 pop_gimplify_context (dep_bind
);
11226 /* Lower the GIMPLE_OMP_TARGET in the current statement
11227 in GSI_P. CTX holds context information for the directive. */
11230 lower_omp_target (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
11233 tree child_fn
, t
, c
;
11234 gomp_target
*stmt
= as_a
<gomp_target
*> (gsi_stmt (*gsi_p
));
11235 gbind
*tgt_bind
, *bind
;
11236 gimple_seq tgt_body
, olist
, ilist
, orlist
, irlist
, new_body
;
11237 location_t loc
= gimple_location (stmt
);
11238 bool offloaded
, data_region
;
11239 unsigned int map_cnt
= 0;
11241 offloaded
= is_gimple_omp_offloaded (stmt
);
11242 switch (gimple_omp_target_kind (stmt
))
11244 case GF_OMP_TARGET_KIND_REGION
:
11245 case GF_OMP_TARGET_KIND_UPDATE
:
11246 case GF_OMP_TARGET_KIND_OACC_PARALLEL
:
11247 case GF_OMP_TARGET_KIND_OACC_KERNELS
:
11248 case GF_OMP_TARGET_KIND_OACC_UPDATE
:
11249 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
:
11250 data_region
= false;
11252 case GF_OMP_TARGET_KIND_DATA
:
11253 case GF_OMP_TARGET_KIND_OACC_DATA
:
11254 data_region
= true;
11257 gcc_unreachable ();
11260 clauses
= gimple_omp_target_clauses (stmt
);
11266 tgt_bind
= gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt
));
11267 tgt_body
= gimple_bind_body (tgt_bind
);
11269 else if (data_region
)
11270 tgt_body
= gimple_omp_body (stmt
);
11271 child_fn
= ctx
->cb
.dst_fn
;
11273 push_gimplify_context ();
11278 && is_gimple_omp_oacc (stmt
))
11279 oacc_process_reduction_data (&tgt_body
, &irlist
, &orlist
, ctx
);
11281 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
11282 switch (OMP_CLAUSE_CODE (c
))
11288 case OMP_CLAUSE_MAP
:
11289 #ifdef ENABLE_CHECKING
11290 /* First check what we're prepared to handle in the following. */
11291 switch (OMP_CLAUSE_MAP_KIND (c
))
11293 case GOMP_MAP_ALLOC
:
11295 case GOMP_MAP_FROM
:
11296 case GOMP_MAP_TOFROM
:
11297 case GOMP_MAP_POINTER
:
11298 case GOMP_MAP_TO_PSET
:
11300 case GOMP_MAP_FORCE_ALLOC
:
11301 case GOMP_MAP_FORCE_TO
:
11302 case GOMP_MAP_FORCE_FROM
:
11303 case GOMP_MAP_FORCE_TOFROM
:
11304 case GOMP_MAP_FORCE_PRESENT
:
11305 case GOMP_MAP_FORCE_DEALLOC
:
11306 case GOMP_MAP_FORCE_DEVICEPTR
:
11307 gcc_assert (is_gimple_omp_oacc (stmt
));
11310 gcc_unreachable ();
11314 case OMP_CLAUSE_TO
:
11315 case OMP_CLAUSE_FROM
:
11316 var
= OMP_CLAUSE_DECL (c
);
11319 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_MAP
11320 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
))
11325 if (DECL_SIZE (var
)
11326 && TREE_CODE (DECL_SIZE (var
)) != INTEGER_CST
)
11328 tree var2
= DECL_VALUE_EXPR (var
);
11329 gcc_assert (TREE_CODE (var2
) == INDIRECT_REF
);
11330 var2
= TREE_OPERAND (var2
, 0);
11331 gcc_assert (DECL_P (var2
));
11335 if (!maybe_lookup_field (var
, ctx
))
11340 x
= build_receiver_ref (var
, true, ctx
);
11341 tree new_var
= lookup_decl (var
, ctx
);
11342 if (OMP_CLAUSE_MAP_KIND (c
) == GOMP_MAP_POINTER
11343 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
)
11344 && TREE_CODE (TREE_TYPE (var
)) == ARRAY_TYPE
)
11345 x
= build_simple_mem_ref (x
);
11346 SET_DECL_VALUE_EXPR (new_var
, x
);
11347 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
11354 target_nesting_level
++;
11355 lower_omp (&tgt_body
, ctx
);
11356 target_nesting_level
--;
11358 else if (data_region
)
11359 lower_omp (&tgt_body
, ctx
);
11363 /* Declare all the variables created by mapping and the variables
11364 declared in the scope of the target body. */
11365 record_vars_into (ctx
->block_vars
, child_fn
);
11366 record_vars_into (gimple_bind_vars (tgt_bind
), child_fn
);
11371 if (ctx
->record_type
)
11374 = create_tmp_var (ctx
->record_type
, ".omp_data_arr");
11375 DECL_NAMELESS (ctx
->sender_decl
) = 1;
11376 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
11377 t
= make_tree_vec (3);
11378 TREE_VEC_ELT (t
, 0) = ctx
->sender_decl
;
11379 TREE_VEC_ELT (t
, 1)
11380 = create_tmp_var (build_array_type_nelts (size_type_node
, map_cnt
),
11381 ".omp_data_sizes");
11382 DECL_NAMELESS (TREE_VEC_ELT (t
, 1)) = 1;
11383 TREE_ADDRESSABLE (TREE_VEC_ELT (t
, 1)) = 1;
11384 TREE_STATIC (TREE_VEC_ELT (t
, 1)) = 1;
11387 if (is_gimple_omp_oacc (stmt
))
11389 tkind_type
= short_unsigned_type_node
;
11394 tkind_type
= unsigned_char_type_node
;
11397 TREE_VEC_ELT (t
, 2)
11398 = create_tmp_var (build_array_type_nelts (tkind_type
, map_cnt
),
11399 ".omp_data_kinds");
11400 DECL_NAMELESS (TREE_VEC_ELT (t
, 2)) = 1;
11401 TREE_ADDRESSABLE (TREE_VEC_ELT (t
, 2)) = 1;
11402 TREE_STATIC (TREE_VEC_ELT (t
, 2)) = 1;
11403 gimple_omp_target_set_data_arg (stmt
, t
);
11405 vec
<constructor_elt
, va_gc
> *vsize
;
11406 vec
<constructor_elt
, va_gc
> *vkind
;
11407 vec_alloc (vsize
, map_cnt
);
11408 vec_alloc (vkind
, map_cnt
);
11409 unsigned int map_idx
= 0;
11411 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
11412 switch (OMP_CLAUSE_CODE (c
))
11418 case OMP_CLAUSE_MAP
:
11419 case OMP_CLAUSE_TO
:
11420 case OMP_CLAUSE_FROM
:
11422 ovar
= OMP_CLAUSE_DECL (c
);
11423 if (!DECL_P (ovar
))
11425 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
11426 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
))
11428 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c
))
11429 == get_base_address (ovar
));
11430 nc
= OMP_CLAUSE_CHAIN (c
);
11431 ovar
= OMP_CLAUSE_DECL (nc
);
11435 tree x
= build_sender_ref (ovar
, ctx
);
11437 = build_fold_addr_expr_with_type (ovar
, ptr_type_node
);
11438 gimplify_assign (x
, v
, &ilist
);
11444 if (DECL_SIZE (ovar
)
11445 && TREE_CODE (DECL_SIZE (ovar
)) != INTEGER_CST
)
11447 tree ovar2
= DECL_VALUE_EXPR (ovar
);
11448 gcc_assert (TREE_CODE (ovar2
) == INDIRECT_REF
);
11449 ovar2
= TREE_OPERAND (ovar2
, 0);
11450 gcc_assert (DECL_P (ovar2
));
11453 if (!maybe_lookup_field (ovar
, ctx
))
11457 unsigned int talign
= TYPE_ALIGN_UNIT (TREE_TYPE (ovar
));
11458 if (DECL_P (ovar
) && DECL_ALIGN_UNIT (ovar
) > talign
)
11459 talign
= DECL_ALIGN_UNIT (ovar
);
11462 tree var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
11463 tree x
= build_sender_ref (ovar
, ctx
);
11464 if (maybe_lookup_oacc_reduction (var
, ctx
))
11466 gcc_checking_assert (offloaded
11467 && is_gimple_omp_oacc (stmt
));
11468 gimplify_assign (x
, var
, &ilist
);
11470 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
11471 && OMP_CLAUSE_MAP_KIND (c
) == GOMP_MAP_POINTER
11472 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
)
11473 && TREE_CODE (TREE_TYPE (ovar
)) == ARRAY_TYPE
)
11475 gcc_assert (offloaded
);
11477 = create_tmp_var (TREE_TYPE (TREE_TYPE (x
)));
11478 mark_addressable (avar
);
11479 gimplify_assign (avar
, build_fold_addr_expr (var
), &ilist
);
11480 talign
= DECL_ALIGN_UNIT (avar
);
11481 avar
= build_fold_addr_expr (avar
);
11482 gimplify_assign (x
, avar
, &ilist
);
11484 else if (is_gimple_reg (var
))
11486 gcc_assert (offloaded
);
11487 tree avar
= create_tmp_var (TREE_TYPE (var
));
11488 mark_addressable (avar
);
11489 enum gomp_map_kind map_kind
= OMP_CLAUSE_MAP_KIND (c
);
11490 if (GOMP_MAP_COPY_TO_P (map_kind
)
11491 || map_kind
== GOMP_MAP_POINTER
11492 || map_kind
== GOMP_MAP_TO_PSET
11493 || map_kind
== GOMP_MAP_FORCE_DEVICEPTR
)
11494 gimplify_assign (avar
, var
, &ilist
);
11495 avar
= build_fold_addr_expr (avar
);
11496 gimplify_assign (x
, avar
, &ilist
);
11497 if ((GOMP_MAP_COPY_FROM_P (map_kind
)
11498 || map_kind
== GOMP_MAP_FORCE_DEVICEPTR
)
11499 && !TYPE_READONLY (TREE_TYPE (var
)))
11501 x
= build_sender_ref (ovar
, ctx
);
11502 x
= build_simple_mem_ref (x
);
11503 gimplify_assign (var
, x
, &olist
);
11508 var
= build_fold_addr_expr (var
);
11509 gimplify_assign (x
, var
, &ilist
);
11512 tree s
= OMP_CLAUSE_SIZE (c
);
11513 if (s
== NULL_TREE
)
11514 s
= TYPE_SIZE_UNIT (TREE_TYPE (ovar
));
11515 s
= fold_convert (size_type_node
, s
);
11516 tree purpose
= size_int (map_idx
++);
11517 CONSTRUCTOR_APPEND_ELT (vsize
, purpose
, s
);
11518 if (TREE_CODE (s
) != INTEGER_CST
)
11519 TREE_STATIC (TREE_VEC_ELT (t
, 1)) = 0;
11521 unsigned HOST_WIDE_INT tkind
;
11522 switch (OMP_CLAUSE_CODE (c
))
11524 case OMP_CLAUSE_MAP
:
11525 tkind
= OMP_CLAUSE_MAP_KIND (c
);
11527 case OMP_CLAUSE_TO
:
11528 tkind
= GOMP_MAP_TO
;
11530 case OMP_CLAUSE_FROM
:
11531 tkind
= GOMP_MAP_FROM
;
11534 gcc_unreachable ();
11536 gcc_checking_assert (tkind
11537 < (HOST_WIDE_INT_C (1U) << talign_shift
));
11538 talign
= ceil_log2 (talign
);
11539 tkind
|= talign
<< talign_shift
;
11540 gcc_checking_assert (tkind
11541 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type
)));
11542 CONSTRUCTOR_APPEND_ELT (vkind
, purpose
,
11543 build_int_cstu (tkind_type
, tkind
));
11548 gcc_assert (map_idx
== map_cnt
);
11550 DECL_INITIAL (TREE_VEC_ELT (t
, 1))
11551 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t
, 1)), vsize
);
11552 DECL_INITIAL (TREE_VEC_ELT (t
, 2))
11553 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t
, 2)), vkind
);
11554 if (!TREE_STATIC (TREE_VEC_ELT (t
, 1)))
11556 gimple_seq initlist
= NULL
;
11557 force_gimple_operand (build1 (DECL_EXPR
, void_type_node
,
11558 TREE_VEC_ELT (t
, 1)),
11559 &initlist
, true, NULL_TREE
);
11560 gimple_seq_add_seq (&ilist
, initlist
);
11562 tree clobber
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t
, 1)),
11564 TREE_THIS_VOLATILE (clobber
) = 1;
11565 gimple_seq_add_stmt (&olist
,
11566 gimple_build_assign (TREE_VEC_ELT (t
, 1),
11570 tree clobber
= build_constructor (ctx
->record_type
, NULL
);
11571 TREE_THIS_VOLATILE (clobber
) = 1;
11572 gimple_seq_add_stmt (&olist
, gimple_build_assign (ctx
->sender_decl
,
11576 /* Once all the expansions are done, sequence all the different
11577 fragments inside gimple_omp_body. */
11582 && ctx
->record_type
)
11584 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
11585 /* fixup_child_record_type might have changed receiver_decl's type. */
11586 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
11587 gimple_seq_add_stmt (&new_body
,
11588 gimple_build_assign (ctx
->receiver_decl
, t
));
11593 gimple_seq_add_seq (&new_body
, tgt_body
);
11594 new_body
= maybe_catch_exception (new_body
);
11596 else if (data_region
)
11597 new_body
= tgt_body
;
11598 if (offloaded
|| data_region
)
11600 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
11601 gimple_omp_set_body (stmt
, new_body
);
11604 bind
= gimple_build_bind (NULL
, NULL
,
11605 tgt_bind
? gimple_bind_block (tgt_bind
)
11607 gsi_replace (gsi_p
, bind
, true);
11608 gimple_bind_add_seq (bind
, irlist
);
11609 gimple_bind_add_seq (bind
, ilist
);
11610 gimple_bind_add_stmt (bind
, stmt
);
11611 gimple_bind_add_seq (bind
, olist
);
11612 gimple_bind_add_seq (bind
, orlist
);
11614 pop_gimplify_context (NULL
);
11617 /* Expand code for an OpenMP teams directive. */
11620 lower_omp_teams (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
11622 gomp_teams
*teams_stmt
= as_a
<gomp_teams
*> (gsi_stmt (*gsi_p
));
11623 push_gimplify_context ();
11625 tree block
= make_node (BLOCK
);
11626 gbind
*bind
= gimple_build_bind (NULL
, NULL
, block
);
11627 gsi_replace (gsi_p
, bind
, true);
11628 gimple_seq bind_body
= NULL
;
11629 gimple_seq dlist
= NULL
;
11630 gimple_seq olist
= NULL
;
11632 tree num_teams
= find_omp_clause (gimple_omp_teams_clauses (teams_stmt
),
11633 OMP_CLAUSE_NUM_TEAMS
);
11634 if (num_teams
== NULL_TREE
)
11635 num_teams
= build_int_cst (unsigned_type_node
, 0);
11638 num_teams
= OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams
);
11639 num_teams
= fold_convert (unsigned_type_node
, num_teams
);
11640 gimplify_expr (&num_teams
, &bind_body
, NULL
, is_gimple_val
, fb_rvalue
);
11642 tree thread_limit
= find_omp_clause (gimple_omp_teams_clauses (teams_stmt
),
11643 OMP_CLAUSE_THREAD_LIMIT
);
11644 if (thread_limit
== NULL_TREE
)
11645 thread_limit
= build_int_cst (unsigned_type_node
, 0);
11648 thread_limit
= OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit
);
11649 thread_limit
= fold_convert (unsigned_type_node
, thread_limit
);
11650 gimplify_expr (&thread_limit
, &bind_body
, NULL
, is_gimple_val
,
11654 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt
),
11655 &bind_body
, &dlist
, ctx
, NULL
);
11656 lower_omp (gimple_omp_body_ptr (teams_stmt
), ctx
);
11657 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt
), &olist
, ctx
);
11658 gimple_seq_add_stmt (&bind_body
, teams_stmt
);
11660 location_t loc
= gimple_location (teams_stmt
);
11661 tree decl
= builtin_decl_explicit (BUILT_IN_GOMP_TEAMS
);
11662 gimple call
= gimple_build_call (decl
, 2, num_teams
, thread_limit
);
11663 gimple_set_location (call
, loc
);
11664 gimple_seq_add_stmt (&bind_body
, call
);
11666 gimple_seq_add_seq (&bind_body
, gimple_omp_body (teams_stmt
));
11667 gimple_omp_set_body (teams_stmt
, NULL
);
11668 gimple_seq_add_seq (&bind_body
, olist
);
11669 gimple_seq_add_seq (&bind_body
, dlist
);
11670 gimple_seq_add_stmt (&bind_body
, gimple_build_omp_return (true));
11671 gimple_bind_set_body (bind
, bind_body
);
11673 pop_gimplify_context (bind
);
11675 gimple_bind_append_vars (bind
, ctx
->block_vars
);
11676 BLOCK_VARS (block
) = ctx
->block_vars
;
11677 if (BLOCK_VARS (block
))
11678 TREE_USED (block
) = 1;
11682 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
11683 regimplified. If DATA is non-NULL, lower_omp_1 is outside
11684 of OMP context, but with task_shared_vars set. */
11687 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
11692 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
11693 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
11696 if (task_shared_vars
11698 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
11701 /* If a global variable has been privatized, TREE_CONSTANT on
11702 ADDR_EXPR might be wrong. */
11703 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
11704 recompute_tree_invariant_for_addr_expr (t
);
11706 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
11711 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
11713 gimple stmt
= gsi_stmt (*gsi_p
);
11714 struct walk_stmt_info wi
;
11717 if (gimple_has_location (stmt
))
11718 input_location
= gimple_location (stmt
);
11720 if (task_shared_vars
)
11721 memset (&wi
, '\0', sizeof (wi
));
11723 /* If we have issued syntax errors, avoid doing any heavy lifting.
11724 Just replace the OMP directives with a NOP to avoid
11725 confusing RTL expansion. */
11726 if (seen_error () && is_gimple_omp (stmt
))
11728 gsi_replace (gsi_p
, gimple_build_nop (), true);
11732 switch (gimple_code (stmt
))
11736 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
11737 if ((ctx
|| task_shared_vars
)
11738 && (walk_tree (gimple_cond_lhs_ptr (cond_stmt
),
11739 lower_omp_regimplify_p
,
11740 ctx
? NULL
: &wi
, NULL
)
11741 || walk_tree (gimple_cond_rhs_ptr (cond_stmt
),
11742 lower_omp_regimplify_p
,
11743 ctx
? NULL
: &wi
, NULL
)))
11744 gimple_regimplify_operands (cond_stmt
, gsi_p
);
11748 lower_omp (gimple_catch_handler_ptr (as_a
<gcatch
*> (stmt
)), ctx
);
11750 case GIMPLE_EH_FILTER
:
11751 lower_omp (gimple_eh_filter_failure_ptr (stmt
), ctx
);
11754 lower_omp (gimple_try_eval_ptr (stmt
), ctx
);
11755 lower_omp (gimple_try_cleanup_ptr (stmt
), ctx
);
11757 case GIMPLE_TRANSACTION
:
11758 lower_omp (gimple_transaction_body_ptr (
11759 as_a
<gtransaction
*> (stmt
)),
11763 lower_omp (gimple_bind_body_ptr (as_a
<gbind
*> (stmt
)), ctx
);
11765 case GIMPLE_OMP_PARALLEL
:
11766 case GIMPLE_OMP_TASK
:
11767 ctx
= maybe_lookup_ctx (stmt
);
11769 if (ctx
->cancellable
)
11770 ctx
->cancel_label
= create_artificial_label (UNKNOWN_LOCATION
);
11771 lower_omp_taskreg (gsi_p
, ctx
);
11773 case GIMPLE_OMP_FOR
:
11774 ctx
= maybe_lookup_ctx (stmt
);
11776 if (ctx
->cancellable
)
11777 ctx
->cancel_label
= create_artificial_label (UNKNOWN_LOCATION
);
11778 lower_omp_for (gsi_p
, ctx
);
11780 case GIMPLE_OMP_SECTIONS
:
11781 ctx
= maybe_lookup_ctx (stmt
);
11783 if (ctx
->cancellable
)
11784 ctx
->cancel_label
= create_artificial_label (UNKNOWN_LOCATION
);
11785 lower_omp_sections (gsi_p
, ctx
);
11787 case GIMPLE_OMP_SINGLE
:
11788 ctx
= maybe_lookup_ctx (stmt
);
11790 lower_omp_single (gsi_p
, ctx
);
11792 case GIMPLE_OMP_MASTER
:
11793 ctx
= maybe_lookup_ctx (stmt
);
11795 lower_omp_master (gsi_p
, ctx
);
11797 case GIMPLE_OMP_TASKGROUP
:
11798 ctx
= maybe_lookup_ctx (stmt
);
11800 lower_omp_taskgroup (gsi_p
, ctx
);
11802 case GIMPLE_OMP_ORDERED
:
11803 ctx
= maybe_lookup_ctx (stmt
);
11805 lower_omp_ordered (gsi_p
, ctx
);
11807 case GIMPLE_OMP_CRITICAL
:
11808 ctx
= maybe_lookup_ctx (stmt
);
11810 lower_omp_critical (gsi_p
, ctx
);
11812 case GIMPLE_OMP_ATOMIC_LOAD
:
11813 if ((ctx
|| task_shared_vars
)
11814 && walk_tree (gimple_omp_atomic_load_rhs_ptr (
11815 as_a
<gomp_atomic_load
*> (stmt
)),
11816 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
11817 gimple_regimplify_operands (stmt
, gsi_p
);
11819 case GIMPLE_OMP_TARGET
:
11820 ctx
= maybe_lookup_ctx (stmt
);
11822 lower_omp_target (gsi_p
, ctx
);
11824 case GIMPLE_OMP_TEAMS
:
11825 ctx
= maybe_lookup_ctx (stmt
);
11827 lower_omp_teams (gsi_p
, ctx
);
11831 call_stmt
= as_a
<gcall
*> (stmt
);
11832 fndecl
= gimple_call_fndecl (call_stmt
);
11834 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
11835 switch (DECL_FUNCTION_CODE (fndecl
))
11837 case BUILT_IN_GOMP_BARRIER
:
11841 case BUILT_IN_GOMP_CANCEL
:
11842 case BUILT_IN_GOMP_CANCELLATION_POINT
:
11845 if (gimple_code (cctx
->stmt
) == GIMPLE_OMP_SECTION
)
11846 cctx
= cctx
->outer
;
11847 gcc_assert (gimple_call_lhs (call_stmt
) == NULL_TREE
);
11848 if (!cctx
->cancellable
)
11850 if (DECL_FUNCTION_CODE (fndecl
)
11851 == BUILT_IN_GOMP_CANCELLATION_POINT
)
11853 stmt
= gimple_build_nop ();
11854 gsi_replace (gsi_p
, stmt
, false);
11858 if (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
11860 fndecl
= builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL
);
11861 gimple_call_set_fndecl (call_stmt
, fndecl
);
11862 gimple_call_set_fntype (call_stmt
, TREE_TYPE (fndecl
));
11865 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl
)));
11866 gimple_call_set_lhs (call_stmt
, lhs
);
11867 tree fallthru_label
;
11868 fallthru_label
= create_artificial_label (UNKNOWN_LOCATION
);
11870 g
= gimple_build_label (fallthru_label
);
11871 gsi_insert_after (gsi_p
, g
, GSI_SAME_STMT
);
11872 g
= gimple_build_cond (NE_EXPR
, lhs
,
11873 fold_convert (TREE_TYPE (lhs
),
11874 boolean_false_node
),
11875 cctx
->cancel_label
, fallthru_label
);
11876 gsi_insert_after (gsi_p
, g
, GSI_SAME_STMT
);
11883 if ((ctx
|| task_shared_vars
)
11884 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
11887 /* Just remove clobbers, this should happen only if we have
11888 "privatized" local addressable variables in SIMD regions,
11889 the clobber isn't needed in that case and gimplifying address
11890 of the ARRAY_REF into a pointer and creating MEM_REF based
11891 clobber would create worse code than we get with the clobber
11893 if (gimple_clobber_p (stmt
))
11895 gsi_replace (gsi_p
, gimple_build_nop (), true);
11898 gimple_regimplify_operands (stmt
, gsi_p
);
11905 lower_omp (gimple_seq
*body
, omp_context
*ctx
)
11907 location_t saved_location
= input_location
;
11908 gimple_stmt_iterator gsi
;
11909 for (gsi
= gsi_start (*body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
11910 lower_omp_1 (&gsi
, ctx
);
11911 /* During gimplification, we haven't folded statments inside offloading
11912 regions (gimplify.c:maybe_fold_stmt); do that now. */
11913 if (target_nesting_level
)
11914 for (gsi
= gsi_start (*body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
11916 input_location
= saved_location
;
11919 /* Main entry point. */
11921 static unsigned int
11922 execute_lower_omp (void)
11928 /* This pass always runs, to provide PROP_gimple_lomp.
11929 But often, there is nothing to do. */
11930 if (flag_cilkplus
== 0 && flag_openacc
== 0 && flag_openmp
== 0
11931 && flag_openmp_simd
== 0)
11934 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
11935 delete_omp_context
);
11937 body
= gimple_body (current_function_decl
);
11938 scan_omp (&body
, NULL
);
11939 gcc_assert (taskreg_nesting_level
== 0);
11940 FOR_EACH_VEC_ELT (taskreg_contexts
, i
, ctx
)
11941 finish_taskreg_scan (ctx
);
11942 taskreg_contexts
.release ();
11944 if (all_contexts
->root
)
11946 if (task_shared_vars
)
11947 push_gimplify_context ();
11948 lower_omp (&body
, NULL
);
11949 if (task_shared_vars
)
11950 pop_gimplify_context (NULL
);
11955 splay_tree_delete (all_contexts
);
11956 all_contexts
= NULL
;
11958 BITMAP_FREE (task_shared_vars
);
11964 const pass_data pass_data_lower_omp
=
11966 GIMPLE_PASS
, /* type */
11967 "omplower", /* name */
11968 OPTGROUP_NONE
, /* optinfo_flags */
11969 TV_NONE
, /* tv_id */
11970 PROP_gimple_any
, /* properties_required */
11971 PROP_gimple_lomp
, /* properties_provided */
11972 0, /* properties_destroyed */
11973 0, /* todo_flags_start */
11974 0, /* todo_flags_finish */
11977 class pass_lower_omp
: public gimple_opt_pass
11980 pass_lower_omp (gcc::context
*ctxt
)
11981 : gimple_opt_pass (pass_data_lower_omp
, ctxt
)
11984 /* opt_pass methods: */
11985 virtual unsigned int execute (function
*) { return execute_lower_omp (); }
11987 }; // class pass_lower_omp
11989 } // anon namespace
11992 make_pass_lower_omp (gcc::context
*ctxt
)
11994 return new pass_lower_omp (ctxt
);
11997 /* The following is a utility to diagnose structured block violations.
11998 It is not part of the "omplower" pass, as that's invoked too late. It
11999 should be invoked by the respective front ends after gimplification. */
12001 static splay_tree all_labels
;
12003 /* Check for mismatched contexts and generate an error if needed. Return
12004 true if an error is detected. */
12007 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
12008 gimple branch_ctx
, gimple label_ctx
)
12010 gcc_checking_assert (!branch_ctx
|| is_gimple_omp (branch_ctx
));
12011 gcc_checking_assert (!label_ctx
|| is_gimple_omp (label_ctx
));
12013 if (label_ctx
== branch_ctx
)
12016 const char* kind
= NULL
;
12021 && gimple_code (branch_ctx
) == GIMPLE_OMP_FOR
12022 && gimple_omp_for_kind (branch_ctx
) == GF_OMP_FOR_KIND_CILKSIMD
)
12024 && gimple_code (label_ctx
) == GIMPLE_OMP_FOR
12025 && gimple_omp_for_kind (label_ctx
) == GF_OMP_FOR_KIND_CILKSIMD
))
12026 kind
= "Cilk Plus";
12030 if ((branch_ctx
&& is_gimple_omp_oacc (branch_ctx
))
12031 || (label_ctx
&& is_gimple_omp_oacc (label_ctx
)))
12033 gcc_checking_assert (kind
== NULL
);
12039 gcc_checking_assert (flag_openmp
);
12044 Previously we kept track of the label's entire context in diagnose_sb_[12]
12045 so we could traverse it and issue a correct "exit" or "enter" error
12046 message upon a structured block violation.
12048 We built the context by building a list with tree_cons'ing, but there is
12049 no easy counterpart in gimple tuples. It seems like far too much work
12050 for issuing exit/enter error messages. If someone really misses the
12051 distinct error message... patches welcome.
12055 /* Try to avoid confusing the user by producing and error message
12056 with correct "exit" or "enter" verbiage. We prefer "exit"
12057 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
12058 if (branch_ctx
== NULL
)
12064 if (TREE_VALUE (label_ctx
) == branch_ctx
)
12069 label_ctx
= TREE_CHAIN (label_ctx
);
12074 error ("invalid exit from %s structured block", kind
);
12076 error ("invalid entry to %s structured block", kind
);
12079 /* If it's obvious we have an invalid entry, be specific about the error. */
12080 if (branch_ctx
== NULL
)
12081 error ("invalid entry to %s structured block", kind
);
12084 /* Otherwise, be vague and lazy, but efficient. */
12085 error ("invalid branch to/from %s structured block", kind
);
12088 gsi_replace (gsi_p
, gimple_build_nop (), false);
12092 /* Pass 1: Create a minimal tree of structured blocks, and record
12093 where each label is found. */
12096 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
12097 struct walk_stmt_info
*wi
)
12099 gimple context
= (gimple
) wi
->info
;
12100 gimple inner_context
;
12101 gimple stmt
= gsi_stmt (*gsi_p
);
12103 *handled_ops_p
= true;
12105 switch (gimple_code (stmt
))
12109 case GIMPLE_OMP_PARALLEL
:
12110 case GIMPLE_OMP_TASK
:
12111 case GIMPLE_OMP_SECTIONS
:
12112 case GIMPLE_OMP_SINGLE
:
12113 case GIMPLE_OMP_SECTION
:
12114 case GIMPLE_OMP_MASTER
:
12115 case GIMPLE_OMP_ORDERED
:
12116 case GIMPLE_OMP_CRITICAL
:
12117 case GIMPLE_OMP_TARGET
:
12118 case GIMPLE_OMP_TEAMS
:
12119 case GIMPLE_OMP_TASKGROUP
:
12120 /* The minimal context here is just the current OMP construct. */
12121 inner_context
= stmt
;
12122 wi
->info
= inner_context
;
12123 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
12124 wi
->info
= context
;
12127 case GIMPLE_OMP_FOR
:
12128 inner_context
= stmt
;
12129 wi
->info
= inner_context
;
12130 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
12132 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
12133 diagnose_sb_1
, NULL
, wi
);
12134 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
12135 wi
->info
= context
;
12139 splay_tree_insert (all_labels
,
12140 (splay_tree_key
) gimple_label_label (
12141 as_a
<glabel
*> (stmt
)),
12142 (splay_tree_value
) context
);
12152 /* Pass 2: Check each branch and see if its context differs from that of
12153 the destination label's context. */
12156 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
12157 struct walk_stmt_info
*wi
)
12159 gimple context
= (gimple
) wi
->info
;
12161 gimple stmt
= gsi_stmt (*gsi_p
);
12163 *handled_ops_p
= true;
12165 switch (gimple_code (stmt
))
12169 case GIMPLE_OMP_PARALLEL
:
12170 case GIMPLE_OMP_TASK
:
12171 case GIMPLE_OMP_SECTIONS
:
12172 case GIMPLE_OMP_SINGLE
:
12173 case GIMPLE_OMP_SECTION
:
12174 case GIMPLE_OMP_MASTER
:
12175 case GIMPLE_OMP_ORDERED
:
12176 case GIMPLE_OMP_CRITICAL
:
12177 case GIMPLE_OMP_TARGET
:
12178 case GIMPLE_OMP_TEAMS
:
12179 case GIMPLE_OMP_TASKGROUP
:
12181 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
12182 wi
->info
= context
;
12185 case GIMPLE_OMP_FOR
:
12187 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
12189 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt
),
12190 diagnose_sb_2
, NULL
, wi
);
12191 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
12192 wi
->info
= context
;
12197 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
12198 tree lab
= gimple_cond_true_label (cond_stmt
);
12201 n
= splay_tree_lookup (all_labels
,
12202 (splay_tree_key
) lab
);
12203 diagnose_sb_0 (gsi_p
, context
,
12204 n
? (gimple
) n
->value
: NULL
);
12206 lab
= gimple_cond_false_label (cond_stmt
);
12209 n
= splay_tree_lookup (all_labels
,
12210 (splay_tree_key
) lab
);
12211 diagnose_sb_0 (gsi_p
, context
,
12212 n
? (gimple
) n
->value
: NULL
);
12219 tree lab
= gimple_goto_dest (stmt
);
12220 if (TREE_CODE (lab
) != LABEL_DECL
)
12223 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
12224 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
12228 case GIMPLE_SWITCH
:
12230 gswitch
*switch_stmt
= as_a
<gswitch
*> (stmt
);
12232 for (i
= 0; i
< gimple_switch_num_labels (switch_stmt
); ++i
)
12234 tree lab
= CASE_LABEL (gimple_switch_label (switch_stmt
, i
));
12235 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
12236 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
12242 case GIMPLE_RETURN
:
12243 diagnose_sb_0 (gsi_p
, context
, NULL
);
12253 /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant
12256 make_gimple_omp_edges (basic_block bb
, struct omp_region
**region
,
12259 gimple last
= last_stmt (bb
);
12260 enum gimple_code code
= gimple_code (last
);
12261 struct omp_region
*cur_region
= *region
;
12262 bool fallthru
= false;
12266 case GIMPLE_OMP_PARALLEL
:
12267 case GIMPLE_OMP_TASK
:
12268 case GIMPLE_OMP_FOR
:
12269 case GIMPLE_OMP_SINGLE
:
12270 case GIMPLE_OMP_TEAMS
:
12271 case GIMPLE_OMP_MASTER
:
12272 case GIMPLE_OMP_TASKGROUP
:
12273 case GIMPLE_OMP_ORDERED
:
12274 case GIMPLE_OMP_CRITICAL
:
12275 case GIMPLE_OMP_SECTION
:
12276 cur_region
= new_omp_region (bb
, code
, cur_region
);
12280 case GIMPLE_OMP_TARGET
:
12281 cur_region
= new_omp_region (bb
, code
, cur_region
);
12283 switch (gimple_omp_target_kind (last
))
12285 case GF_OMP_TARGET_KIND_REGION
:
12286 case GF_OMP_TARGET_KIND_DATA
:
12287 case GF_OMP_TARGET_KIND_OACC_PARALLEL
:
12288 case GF_OMP_TARGET_KIND_OACC_KERNELS
:
12289 case GF_OMP_TARGET_KIND_OACC_DATA
:
12291 case GF_OMP_TARGET_KIND_UPDATE
:
12292 case GF_OMP_TARGET_KIND_OACC_UPDATE
:
12293 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
:
12294 cur_region
= cur_region
->outer
;
12297 gcc_unreachable ();
12301 case GIMPLE_OMP_SECTIONS
:
12302 cur_region
= new_omp_region (bb
, code
, cur_region
);
12306 case GIMPLE_OMP_SECTIONS_SWITCH
:
12310 case GIMPLE_OMP_ATOMIC_LOAD
:
12311 case GIMPLE_OMP_ATOMIC_STORE
:
12315 case GIMPLE_OMP_RETURN
:
12316 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
12317 somewhere other than the next block. This will be
12319 cur_region
->exit
= bb
;
12320 if (cur_region
->type
== GIMPLE_OMP_TASK
)
12321 /* Add an edge corresponding to not scheduling the task
12323 make_edge (cur_region
->entry
, bb
, EDGE_ABNORMAL
);
12324 fallthru
= cur_region
->type
!= GIMPLE_OMP_SECTION
;
12325 cur_region
= cur_region
->outer
;
12328 case GIMPLE_OMP_CONTINUE
:
12329 cur_region
->cont
= bb
;
12330 switch (cur_region
->type
)
12332 case GIMPLE_OMP_FOR
:
12333 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
12334 succs edges as abnormal to prevent splitting
12336 single_succ_edge (cur_region
->entry
)->flags
|= EDGE_ABNORMAL
;
12337 /* Make the loopback edge. */
12338 make_edge (bb
, single_succ (cur_region
->entry
),
12341 /* Create an edge from GIMPLE_OMP_FOR to exit, which
12342 corresponds to the case that the body of the loop
12343 is not executed at all. */
12344 make_edge (cur_region
->entry
, bb
->next_bb
, EDGE_ABNORMAL
);
12345 make_edge (bb
, bb
->next_bb
, EDGE_FALLTHRU
| EDGE_ABNORMAL
);
12349 case GIMPLE_OMP_SECTIONS
:
12350 /* Wire up the edges into and out of the nested sections. */
12352 basic_block switch_bb
= single_succ (cur_region
->entry
);
12354 struct omp_region
*i
;
12355 for (i
= cur_region
->inner
; i
; i
= i
->next
)
12357 gcc_assert (i
->type
== GIMPLE_OMP_SECTION
);
12358 make_edge (switch_bb
, i
->entry
, 0);
12359 make_edge (i
->exit
, bb
, EDGE_FALLTHRU
);
12362 /* Make the loopback edge to the block with
12363 GIMPLE_OMP_SECTIONS_SWITCH. */
12364 make_edge (bb
, switch_bb
, 0);
12366 /* Make the edge from the switch to exit. */
12367 make_edge (switch_bb
, bb
->next_bb
, 0);
12372 case GIMPLE_OMP_TASK
:
12377 gcc_unreachable ();
12382 gcc_unreachable ();
12385 if (*region
!= cur_region
)
12387 *region
= cur_region
;
12389 *region_idx
= cur_region
->entry
->index
;
12397 static unsigned int
12398 diagnose_omp_structured_block_errors (void)
12400 struct walk_stmt_info wi
;
12401 gimple_seq body
= gimple_body (current_function_decl
);
12403 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
12405 memset (&wi
, 0, sizeof (wi
));
12406 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
12408 memset (&wi
, 0, sizeof (wi
));
12409 wi
.want_locations
= true;
12410 walk_gimple_seq_mod (&body
, diagnose_sb_2
, NULL
, &wi
);
12412 gimple_set_body (current_function_decl
, body
);
12414 splay_tree_delete (all_labels
);
12422 const pass_data pass_data_diagnose_omp_blocks
=
12424 GIMPLE_PASS
, /* type */
12425 "*diagnose_omp_blocks", /* name */
12426 OPTGROUP_NONE
, /* optinfo_flags */
12427 TV_NONE
, /* tv_id */
12428 PROP_gimple_any
, /* properties_required */
12429 0, /* properties_provided */
12430 0, /* properties_destroyed */
12431 0, /* todo_flags_start */
12432 0, /* todo_flags_finish */
12435 class pass_diagnose_omp_blocks
: public gimple_opt_pass
12438 pass_diagnose_omp_blocks (gcc::context
*ctxt
)
12439 : gimple_opt_pass (pass_data_diagnose_omp_blocks
, ctxt
)
12442 /* opt_pass methods: */
12443 virtual bool gate (function
*)
12445 return flag_cilkplus
|| flag_openacc
|| flag_openmp
;
12447 virtual unsigned int execute (function
*)
12449 return diagnose_omp_structured_block_errors ();
12452 }; // class pass_diagnose_omp_blocks
12454 } // anon namespace
12457 make_pass_diagnose_omp_blocks (gcc::context
*ctxt
)
12459 return new pass_diagnose_omp_blocks (ctxt
);
12462 /* SIMD clone supporting code. */
12464 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
12465 of arguments to reserve space for. */
12467 static struct cgraph_simd_clone
*
12468 simd_clone_struct_alloc (int nargs
)
12470 struct cgraph_simd_clone
*clone_info
;
12471 size_t len
= (sizeof (struct cgraph_simd_clone
)
12472 + nargs
* sizeof (struct cgraph_simd_clone_arg
));
12473 clone_info
= (struct cgraph_simd_clone
*)
12474 ggc_internal_cleared_alloc (len
);
12478 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
12481 simd_clone_struct_copy (struct cgraph_simd_clone
*to
,
12482 struct cgraph_simd_clone
*from
)
12484 memcpy (to
, from
, (sizeof (struct cgraph_simd_clone
)
12485 + ((from
->nargs
- from
->inbranch
)
12486 * sizeof (struct cgraph_simd_clone_arg
))));
12489 /* Return vector of parameter types of function FNDECL. This uses
12490 TYPE_ARG_TYPES if available, otherwise falls back to types of
12491 DECL_ARGUMENTS types. */
12494 simd_clone_vector_of_formal_parm_types (tree fndecl
)
12496 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)))
12497 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl
));
12498 vec
<tree
> args
= ipa_get_vector_of_formal_parms (fndecl
);
12501 FOR_EACH_VEC_ELT (args
, i
, arg
)
12502 args
[i
] = TREE_TYPE (args
[i
]);
12506 /* Given a simd function in NODE, extract the simd specific
12507 information from the OMP clauses passed in CLAUSES, and return
12508 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
12509 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
12510 otherwise set to FALSE. */
12512 static struct cgraph_simd_clone
*
12513 simd_clone_clauses_extract (struct cgraph_node
*node
, tree clauses
,
12514 bool *inbranch_specified
)
12516 vec
<tree
> args
= simd_clone_vector_of_formal_parm_types (node
->decl
);
12519 *inbranch_specified
= false;
12521 n
= args
.length ();
12522 if (n
> 0 && args
.last () == void_type_node
)
12525 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
12526 be cloned have a distinctive artificial label in addition to "omp
12530 && lookup_attribute ("cilk simd function",
12531 DECL_ATTRIBUTES (node
->decl
)));
12533 /* Allocate one more than needed just in case this is an in-branch
12534 clone which will require a mask argument. */
12535 struct cgraph_simd_clone
*clone_info
= simd_clone_struct_alloc (n
+ 1);
12536 clone_info
->nargs
= n
;
12537 clone_info
->cilk_elemental
= cilk_clone
;
12544 clauses
= TREE_VALUE (clauses
);
12545 if (!clauses
|| TREE_CODE (clauses
) != OMP_CLAUSE
)
12548 for (t
= clauses
; t
; t
= OMP_CLAUSE_CHAIN (t
))
12550 switch (OMP_CLAUSE_CODE (t
))
12552 case OMP_CLAUSE_INBRANCH
:
12553 clone_info
->inbranch
= 1;
12554 *inbranch_specified
= true;
12556 case OMP_CLAUSE_NOTINBRANCH
:
12557 clone_info
->inbranch
= 0;
12558 *inbranch_specified
= true;
12560 case OMP_CLAUSE_SIMDLEN
:
12561 clone_info
->simdlen
12562 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t
));
12564 case OMP_CLAUSE_LINEAR
:
12566 tree decl
= OMP_CLAUSE_DECL (t
);
12567 tree step
= OMP_CLAUSE_LINEAR_STEP (t
);
12568 int argno
= TREE_INT_CST_LOW (decl
);
12569 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t
))
12571 clone_info
->args
[argno
].arg_type
12572 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
;
12573 clone_info
->args
[argno
].linear_step
= tree_to_shwi (step
);
12574 gcc_assert (clone_info
->args
[argno
].linear_step
>= 0
12575 && clone_info
->args
[argno
].linear_step
< n
);
12579 if (POINTER_TYPE_P (args
[argno
]))
12580 step
= fold_convert (ssizetype
, step
);
12581 if (!tree_fits_shwi_p (step
))
12583 warning_at (OMP_CLAUSE_LOCATION (t
), 0,
12584 "ignoring large linear step");
12588 else if (integer_zerop (step
))
12590 warning_at (OMP_CLAUSE_LOCATION (t
), 0,
12591 "ignoring zero linear step");
12597 clone_info
->args
[argno
].arg_type
12598 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
;
12599 clone_info
->args
[argno
].linear_step
= tree_to_shwi (step
);
12604 case OMP_CLAUSE_UNIFORM
:
12606 tree decl
= OMP_CLAUSE_DECL (t
);
12607 int argno
= tree_to_uhwi (decl
);
12608 clone_info
->args
[argno
].arg_type
12609 = SIMD_CLONE_ARG_TYPE_UNIFORM
;
12612 case OMP_CLAUSE_ALIGNED
:
12614 tree decl
= OMP_CLAUSE_DECL (t
);
12615 int argno
= tree_to_uhwi (decl
);
12616 clone_info
->args
[argno
].alignment
12617 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t
));
12628 /* Given a SIMD clone in NODE, calculate the characteristic data
12629 type and return the coresponding type. The characteristic data
12630 type is computed as described in the Intel Vector ABI. */
12633 simd_clone_compute_base_data_type (struct cgraph_node
*node
,
12634 struct cgraph_simd_clone
*clone_info
)
12636 tree type
= integer_type_node
;
12637 tree fndecl
= node
->decl
;
12639 /* a) For non-void function, the characteristic data type is the
12641 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl
))) != VOID_TYPE
)
12642 type
= TREE_TYPE (TREE_TYPE (fndecl
));
12644 /* b) If the function has any non-uniform, non-linear parameters,
12645 then the characteristic data type is the type of the first
12649 vec
<tree
> map
= simd_clone_vector_of_formal_parm_types (fndecl
);
12650 for (unsigned int i
= 0; i
< clone_info
->nargs
; ++i
)
12651 if (clone_info
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
12659 /* c) If the characteristic data type determined by a) or b) above
12660 is struct, union, or class type which is pass-by-value (except
12661 for the type that maps to the built-in complex data type), the
12662 characteristic data type is int. */
12663 if (RECORD_OR_UNION_TYPE_P (type
)
12664 && !aggregate_value_p (type
, NULL
)
12665 && TREE_CODE (type
) != COMPLEX_TYPE
)
12666 return integer_type_node
;
12668 /* d) If none of the above three classes is applicable, the
12669 characteristic data type is int. */
12673 /* e) For Intel Xeon Phi native and offload compilation, if the
12674 resulting characteristic data type is 8-bit or 16-bit integer
12675 data type, the characteristic data type is int. */
12676 /* Well, we don't handle Xeon Phi yet. */
12680 simd_clone_mangle (struct cgraph_node
*node
,
12681 struct cgraph_simd_clone
*clone_info
)
12683 char vecsize_mangle
= clone_info
->vecsize_mangle
;
12684 char mask
= clone_info
->inbranch
? 'M' : 'N';
12685 unsigned int simdlen
= clone_info
->simdlen
;
12689 gcc_assert (vecsize_mangle
&& simdlen
);
12691 pp_string (&pp
, "_ZGV");
12692 pp_character (&pp
, vecsize_mangle
);
12693 pp_character (&pp
, mask
);
12694 pp_decimal_int (&pp
, simdlen
);
12696 for (n
= 0; n
< clone_info
->nargs
; ++n
)
12698 struct cgraph_simd_clone_arg arg
= clone_info
->args
[n
];
12700 if (arg
.arg_type
== SIMD_CLONE_ARG_TYPE_UNIFORM
)
12701 pp_character (&pp
, 'u');
12702 else if (arg
.arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
12704 gcc_assert (arg
.linear_step
!= 0);
12705 pp_character (&pp
, 'l');
12706 if (arg
.linear_step
> 1)
12707 pp_unsigned_wide_integer (&pp
, arg
.linear_step
);
12708 else if (arg
.linear_step
< 0)
12710 pp_character (&pp
, 'n');
12711 pp_unsigned_wide_integer (&pp
, (-(unsigned HOST_WIDE_INT
)
12715 else if (arg
.arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
)
12717 pp_character (&pp
, 's');
12718 pp_unsigned_wide_integer (&pp
, arg
.linear_step
);
12721 pp_character (&pp
, 'v');
12724 pp_character (&pp
, 'a');
12725 pp_decimal_int (&pp
, arg
.alignment
);
12729 pp_underscore (&pp
);
12730 const char *str
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node
->decl
));
12733 pp_string (&pp
, str
);
12734 str
= pp_formatted_text (&pp
);
12736 /* If there already is a SIMD clone with the same mangled name, don't
12737 add another one. This can happen e.g. for
12738 #pragma omp declare simd
12739 #pragma omp declare simd simdlen(8)
12740 int foo (int, int);
12741 if the simdlen is assumed to be 8 for the first one, etc. */
12742 for (struct cgraph_node
*clone
= node
->simd_clones
; clone
;
12743 clone
= clone
->simdclone
->next_clone
)
12744 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone
->decl
)),
12748 return get_identifier (str
);
12751 /* Create a simd clone of OLD_NODE and return it. */
12753 static struct cgraph_node
*
12754 simd_clone_create (struct cgraph_node
*old_node
)
12756 struct cgraph_node
*new_node
;
12757 if (old_node
->definition
)
12759 if (!old_node
->has_gimple_body_p ())
12761 old_node
->get_body ();
12762 new_node
= old_node
->create_version_clone_with_body (vNULL
, NULL
, NULL
,
12768 tree old_decl
= old_node
->decl
;
12769 tree new_decl
= copy_node (old_node
->decl
);
12770 DECL_NAME (new_decl
) = clone_function_name (old_decl
, "simdclone");
12771 SET_DECL_ASSEMBLER_NAME (new_decl
, DECL_NAME (new_decl
));
12772 SET_DECL_RTL (new_decl
, NULL
);
12773 DECL_STATIC_CONSTRUCTOR (new_decl
) = 0;
12774 DECL_STATIC_DESTRUCTOR (new_decl
) = 0;
12775 new_node
= old_node
->create_version_clone (new_decl
, vNULL
, NULL
);
12776 symtab
->call_cgraph_insertion_hooks (new_node
);
12778 if (new_node
== NULL
)
12781 TREE_PUBLIC (new_node
->decl
) = TREE_PUBLIC (old_node
->decl
);
12783 /* The function cgraph_function_versioning () will force the new
12784 symbol local. Undo this, and inherit external visability from
12786 new_node
->local
.local
= old_node
->local
.local
;
12787 new_node
->externally_visible
= old_node
->externally_visible
;
12792 /* Adjust the return type of the given function to its appropriate
12793 vector counterpart. Returns a simd array to be used throughout the
12794 function as a return value. */
12797 simd_clone_adjust_return_type (struct cgraph_node
*node
)
12799 tree fndecl
= node
->decl
;
12800 tree orig_rettype
= TREE_TYPE (TREE_TYPE (fndecl
));
12801 unsigned int veclen
;
12804 /* Adjust the function return type. */
12805 if (orig_rettype
== void_type_node
)
12807 TREE_TYPE (fndecl
) = build_distinct_type_copy (TREE_TYPE (fndecl
));
12808 t
= TREE_TYPE (TREE_TYPE (fndecl
));
12809 if (INTEGRAL_TYPE_P (t
) || POINTER_TYPE_P (t
))
12810 veclen
= node
->simdclone
->vecsize_int
;
12812 veclen
= node
->simdclone
->vecsize_float
;
12813 veclen
/= GET_MODE_BITSIZE (TYPE_MODE (t
));
12814 if (veclen
> node
->simdclone
->simdlen
)
12815 veclen
= node
->simdclone
->simdlen
;
12816 if (POINTER_TYPE_P (t
))
12817 t
= pointer_sized_int_node
;
12818 if (veclen
== node
->simdclone
->simdlen
)
12819 t
= build_vector_type (t
, node
->simdclone
->simdlen
);
12822 t
= build_vector_type (t
, veclen
);
12823 t
= build_array_type_nelts (t
, node
->simdclone
->simdlen
/ veclen
);
12825 TREE_TYPE (TREE_TYPE (fndecl
)) = t
;
12826 if (!node
->definition
)
12829 t
= DECL_RESULT (fndecl
);
12830 /* Adjust the DECL_RESULT. */
12831 gcc_assert (TREE_TYPE (t
) != void_type_node
);
12832 TREE_TYPE (t
) = TREE_TYPE (TREE_TYPE (fndecl
));
12835 tree atype
= build_array_type_nelts (orig_rettype
,
12836 node
->simdclone
->simdlen
);
12837 if (veclen
!= node
->simdclone
->simdlen
)
12838 return build1 (VIEW_CONVERT_EXPR
, atype
, t
);
12840 /* Set up a SIMD array to use as the return value. */
12841 tree retval
= create_tmp_var_raw (atype
, "retval");
12842 gimple_add_tmp_var (retval
);
12846 /* Each vector argument has a corresponding array to be used locally
12847 as part of the eventual loop. Create such temporary array and
12850 PREFIX is the prefix to be used for the temporary.
12852 TYPE is the inner element type.
12854 SIMDLEN is the number of elements. */
12857 create_tmp_simd_array (const char *prefix
, tree type
, int simdlen
)
12859 tree atype
= build_array_type_nelts (type
, simdlen
);
12860 tree avar
= create_tmp_var_raw (atype
, prefix
);
12861 gimple_add_tmp_var (avar
);
12865 /* Modify the function argument types to their corresponding vector
12866 counterparts if appropriate. Also, create one array for each simd
12867 argument to be used locally when using the function arguments as
12870 NODE is the function whose arguments are to be adjusted.
12872 Returns an adjustment vector that will be filled describing how the
12873 argument types will be adjusted. */
12875 static ipa_parm_adjustment_vec
12876 simd_clone_adjust_argument_types (struct cgraph_node
*node
)
12879 ipa_parm_adjustment_vec adjustments
;
12881 if (node
->definition
)
12882 args
= ipa_get_vector_of_formal_parms (node
->decl
);
12884 args
= simd_clone_vector_of_formal_parm_types (node
->decl
);
12885 adjustments
.create (args
.length ());
12886 unsigned i
, j
, veclen
;
12887 struct ipa_parm_adjustment adj
;
12888 for (i
= 0; i
< node
->simdclone
->nargs
; ++i
)
12890 memset (&adj
, 0, sizeof (adj
));
12891 tree parm
= args
[i
];
12892 tree parm_type
= node
->definition
? TREE_TYPE (parm
) : parm
;
12893 adj
.base_index
= i
;
12896 node
->simdclone
->args
[i
].orig_arg
= node
->definition
? parm
: NULL_TREE
;
12897 node
->simdclone
->args
[i
].orig_type
= parm_type
;
12899 if (node
->simdclone
->args
[i
].arg_type
!= SIMD_CLONE_ARG_TYPE_VECTOR
)
12901 /* No adjustment necessary for scalar arguments. */
12902 adj
.op
= IPA_PARM_OP_COPY
;
12906 if (INTEGRAL_TYPE_P (parm_type
) || POINTER_TYPE_P (parm_type
))
12907 veclen
= node
->simdclone
->vecsize_int
;
12909 veclen
= node
->simdclone
->vecsize_float
;
12910 veclen
/= GET_MODE_BITSIZE (TYPE_MODE (parm_type
));
12911 if (veclen
> node
->simdclone
->simdlen
)
12912 veclen
= node
->simdclone
->simdlen
;
12913 adj
.arg_prefix
= "simd";
12914 if (POINTER_TYPE_P (parm_type
))
12915 adj
.type
= build_vector_type (pointer_sized_int_node
, veclen
);
12917 adj
.type
= build_vector_type (parm_type
, veclen
);
12918 node
->simdclone
->args
[i
].vector_type
= adj
.type
;
12919 for (j
= veclen
; j
< node
->simdclone
->simdlen
; j
+= veclen
)
12921 adjustments
.safe_push (adj
);
12924 memset (&adj
, 0, sizeof (adj
));
12925 adj
.op
= IPA_PARM_OP_NEW
;
12926 adj
.arg_prefix
= "simd";
12927 adj
.base_index
= i
;
12928 adj
.type
= node
->simdclone
->args
[i
].vector_type
;
12932 if (node
->definition
)
12933 node
->simdclone
->args
[i
].simd_array
12934 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm
)),
12935 parm_type
, node
->simdclone
->simdlen
);
12937 adjustments
.safe_push (adj
);
12940 if (node
->simdclone
->inbranch
)
12943 = simd_clone_compute_base_data_type (node
->simdclone
->origin
,
12946 memset (&adj
, 0, sizeof (adj
));
12947 adj
.op
= IPA_PARM_OP_NEW
;
12948 adj
.arg_prefix
= "mask";
12950 adj
.base_index
= i
;
12951 if (INTEGRAL_TYPE_P (base_type
) || POINTER_TYPE_P (base_type
))
12952 veclen
= node
->simdclone
->vecsize_int
;
12954 veclen
= node
->simdclone
->vecsize_float
;
12955 veclen
/= GET_MODE_BITSIZE (TYPE_MODE (base_type
));
12956 if (veclen
> node
->simdclone
->simdlen
)
12957 veclen
= node
->simdclone
->simdlen
;
12958 if (POINTER_TYPE_P (base_type
))
12959 adj
.type
= build_vector_type (pointer_sized_int_node
, veclen
);
12961 adj
.type
= build_vector_type (base_type
, veclen
);
12962 adjustments
.safe_push (adj
);
12964 for (j
= veclen
; j
< node
->simdclone
->simdlen
; j
+= veclen
)
12965 adjustments
.safe_push (adj
);
12967 /* We have previously allocated one extra entry for the mask. Use
12969 struct cgraph_simd_clone
*sc
= node
->simdclone
;
12971 if (node
->definition
)
12973 sc
->args
[i
].orig_arg
12974 = build_decl (UNKNOWN_LOCATION
, PARM_DECL
, NULL
, base_type
);
12975 sc
->args
[i
].simd_array
12976 = create_tmp_simd_array ("mask", base_type
, sc
->simdlen
);
12978 sc
->args
[i
].orig_type
= base_type
;
12979 sc
->args
[i
].arg_type
= SIMD_CLONE_ARG_TYPE_MASK
;
12982 if (node
->definition
)
12983 ipa_modify_formal_parameters (node
->decl
, adjustments
);
12986 tree new_arg_types
= NULL_TREE
, new_reversed
;
12987 bool last_parm_void
= false;
12988 if (args
.length () > 0 && args
.last () == void_type_node
)
12989 last_parm_void
= true;
12991 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node
->decl
)));
12992 j
= adjustments
.length ();
12993 for (i
= 0; i
< j
; i
++)
12995 struct ipa_parm_adjustment
*adj
= &adjustments
[i
];
12997 if (adj
->op
== IPA_PARM_OP_COPY
)
12998 ptype
= args
[adj
->base_index
];
13001 new_arg_types
= tree_cons (NULL_TREE
, ptype
, new_arg_types
);
13003 new_reversed
= nreverse (new_arg_types
);
13004 if (last_parm_void
)
13007 TREE_CHAIN (new_arg_types
) = void_list_node
;
13009 new_reversed
= void_list_node
;
13012 tree new_type
= build_distinct_type_copy (TREE_TYPE (node
->decl
));
13013 TYPE_ARG_TYPES (new_type
) = new_reversed
;
13014 TREE_TYPE (node
->decl
) = new_type
;
13016 adjustments
.release ();
13019 return adjustments
;
13022 /* Initialize and copy the function arguments in NODE to their
13023 corresponding local simd arrays. Returns a fresh gimple_seq with
13024 the instruction sequence generated. */
13027 simd_clone_init_simd_arrays (struct cgraph_node
*node
,
13028 ipa_parm_adjustment_vec adjustments
)
13030 gimple_seq seq
= NULL
;
13031 unsigned i
= 0, j
= 0, k
;
13033 for (tree arg
= DECL_ARGUMENTS (node
->decl
);
13035 arg
= DECL_CHAIN (arg
), i
++, j
++)
13037 if (adjustments
[j
].op
== IPA_PARM_OP_COPY
)
13040 node
->simdclone
->args
[i
].vector_arg
= arg
;
13042 tree array
= node
->simdclone
->args
[i
].simd_array
;
13043 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg
)) == node
->simdclone
->simdlen
)
13045 tree ptype
= build_pointer_type (TREE_TYPE (TREE_TYPE (array
)));
13046 tree ptr
= build_fold_addr_expr (array
);
13047 tree t
= build2 (MEM_REF
, TREE_TYPE (arg
), ptr
,
13048 build_int_cst (ptype
, 0));
13049 t
= build2 (MODIFY_EXPR
, TREE_TYPE (t
), t
, arg
);
13050 gimplify_and_add (t
, &seq
);
13054 unsigned int simdlen
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg
));
13055 tree ptype
= build_pointer_type (TREE_TYPE (TREE_TYPE (array
)));
13056 for (k
= 0; k
< node
->simdclone
->simdlen
; k
+= simdlen
)
13058 tree ptr
= build_fold_addr_expr (array
);
13062 arg
= DECL_CHAIN (arg
);
13066 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg
))));
13067 tree t
= build2 (MEM_REF
, TREE_TYPE (arg
), ptr
,
13068 build_int_cst (ptype
, k
* elemsize
));
13069 t
= build2 (MODIFY_EXPR
, TREE_TYPE (t
), t
, arg
);
13070 gimplify_and_add (t
, &seq
);
13077 /* Callback info for ipa_simd_modify_stmt_ops below. */
13079 struct modify_stmt_info
{
13080 ipa_parm_adjustment_vec adjustments
;
13082 /* True if the parent statement was modified by
13083 ipa_simd_modify_stmt_ops. */
13087 /* Callback for walk_gimple_op.
13089 Adjust operands from a given statement as specified in the
13090 adjustments vector in the callback data. */
13093 ipa_simd_modify_stmt_ops (tree
*tp
, int *walk_subtrees
, void *data
)
13095 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
13096 struct modify_stmt_info
*info
= (struct modify_stmt_info
*) wi
->info
;
13097 tree
*orig_tp
= tp
;
13098 if (TREE_CODE (*tp
) == ADDR_EXPR
)
13099 tp
= &TREE_OPERAND (*tp
, 0);
13100 struct ipa_parm_adjustment
*cand
= NULL
;
13101 if (TREE_CODE (*tp
) == PARM_DECL
)
13102 cand
= ipa_get_adjustment_candidate (&tp
, NULL
, info
->adjustments
, true);
13106 *walk_subtrees
= 0;
13109 tree repl
= NULL_TREE
;
13111 repl
= unshare_expr (cand
->new_decl
);
13116 *walk_subtrees
= 0;
13117 bool modified
= info
->modified
;
13118 info
->modified
= false;
13119 walk_tree (tp
, ipa_simd_modify_stmt_ops
, wi
, wi
->pset
);
13120 if (!info
->modified
)
13122 info
->modified
= modified
;
13125 info
->modified
= modified
;
13134 repl
= build_fold_addr_expr (repl
);
13136 if (is_gimple_debug (info
->stmt
))
13138 tree vexpr
= make_node (DEBUG_EXPR_DECL
);
13139 stmt
= gimple_build_debug_source_bind (vexpr
, repl
, NULL
);
13140 DECL_ARTIFICIAL (vexpr
) = 1;
13141 TREE_TYPE (vexpr
) = TREE_TYPE (repl
);
13142 DECL_MODE (vexpr
) = TYPE_MODE (TREE_TYPE (repl
));
13147 stmt
= gimple_build_assign (make_ssa_name (TREE_TYPE (repl
)), repl
);
13148 repl
= gimple_assign_lhs (stmt
);
13150 gimple_stmt_iterator gsi
= gsi_for_stmt (info
->stmt
);
13151 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
13154 else if (!useless_type_conversion_p (TREE_TYPE (*tp
), TREE_TYPE (repl
)))
13156 tree vce
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (*tp
), repl
);
13162 info
->modified
= true;
13166 /* Traverse the function body and perform all modifications as
13167 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
13168 modified such that the replacement/reduction value will now be an
13169 offset into the corresponding simd_array.
13171 This function will replace all function argument uses with their
13172 corresponding simd array elements, and ajust the return values
13176 ipa_simd_modify_function_body (struct cgraph_node
*node
,
13177 ipa_parm_adjustment_vec adjustments
,
13178 tree retval_array
, tree iter
)
13181 unsigned int i
, j
, l
;
13183 /* Re-use the adjustments array, but this time use it to replace
13184 every function argument use to an offset into the corresponding
13186 for (i
= 0, j
= 0; i
< node
->simdclone
->nargs
; ++i
, ++j
)
13188 if (!node
->simdclone
->args
[i
].vector_arg
)
13191 tree basetype
= TREE_TYPE (node
->simdclone
->args
[i
].orig_arg
);
13192 tree vectype
= TREE_TYPE (node
->simdclone
->args
[i
].vector_arg
);
13193 adjustments
[j
].new_decl
13194 = build4 (ARRAY_REF
,
13196 node
->simdclone
->args
[i
].simd_array
,
13198 NULL_TREE
, NULL_TREE
);
13199 if (adjustments
[j
].op
== IPA_PARM_OP_NONE
13200 && TYPE_VECTOR_SUBPARTS (vectype
) < node
->simdclone
->simdlen
)
13201 j
+= node
->simdclone
->simdlen
/ TYPE_VECTOR_SUBPARTS (vectype
) - 1;
13204 l
= adjustments
.length ();
13205 for (i
= 1; i
< num_ssa_names
; i
++)
13207 tree name
= ssa_name (i
);
13209 && SSA_NAME_VAR (name
)
13210 && TREE_CODE (SSA_NAME_VAR (name
)) == PARM_DECL
)
13212 for (j
= 0; j
< l
; j
++)
13213 if (SSA_NAME_VAR (name
) == adjustments
[j
].base
13214 && adjustments
[j
].new_decl
)
13217 if (adjustments
[j
].new_ssa_base
== NULL_TREE
)
13220 = copy_var_decl (adjustments
[j
].base
,
13221 DECL_NAME (adjustments
[j
].base
),
13222 TREE_TYPE (adjustments
[j
].base
));
13223 adjustments
[j
].new_ssa_base
= base_var
;
13226 base_var
= adjustments
[j
].new_ssa_base
;
13227 if (SSA_NAME_IS_DEFAULT_DEF (name
))
13229 bb
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
13230 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
13231 tree new_decl
= unshare_expr (adjustments
[j
].new_decl
);
13232 set_ssa_default_def (cfun
, adjustments
[j
].base
, NULL_TREE
);
13233 SET_SSA_NAME_VAR_OR_IDENTIFIER (name
, base_var
);
13234 SSA_NAME_IS_DEFAULT_DEF (name
) = 0;
13235 gimple stmt
= gimple_build_assign (name
, new_decl
);
13236 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
13239 SET_SSA_NAME_VAR_OR_IDENTIFIER (name
, base_var
);
13244 struct modify_stmt_info info
;
13245 info
.adjustments
= adjustments
;
13247 FOR_EACH_BB_FN (bb
, DECL_STRUCT_FUNCTION (node
->decl
))
13249 gimple_stmt_iterator gsi
;
13251 gsi
= gsi_start_bb (bb
);
13252 while (!gsi_end_p (gsi
))
13254 gimple stmt
= gsi_stmt (gsi
);
13256 struct walk_stmt_info wi
;
13258 memset (&wi
, 0, sizeof (wi
));
13259 info
.modified
= false;
13261 walk_gimple_op (stmt
, ipa_simd_modify_stmt_ops
, &wi
);
13263 if (greturn
*return_stmt
= dyn_cast
<greturn
*> (stmt
))
13265 tree retval
= gimple_return_retval (return_stmt
);
13268 gsi_remove (&gsi
, true);
13272 /* Replace `return foo' with `retval_array[iter] = foo'. */
13273 tree ref
= build4 (ARRAY_REF
, TREE_TYPE (retval
),
13274 retval_array
, iter
, NULL
, NULL
);
13275 stmt
= gimple_build_assign (ref
, retval
);
13276 gsi_replace (&gsi
, stmt
, true);
13277 info
.modified
= true;
13282 update_stmt (stmt
);
13283 if (maybe_clean_eh_stmt (stmt
))
13284 gimple_purge_dead_eh_edges (gimple_bb (stmt
));
13291 /* Adjust the argument types in NODE to their appropriate vector
13295 simd_clone_adjust (struct cgraph_node
*node
)
13297 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
13299 targetm
.simd_clone
.adjust (node
);
13301 tree retval
= simd_clone_adjust_return_type (node
);
13302 ipa_parm_adjustment_vec adjustments
13303 = simd_clone_adjust_argument_types (node
);
13305 push_gimplify_context ();
13307 gimple_seq seq
= simd_clone_init_simd_arrays (node
, adjustments
);
13309 /* Adjust all uses of vector arguments accordingly. Adjust all
13310 return values accordingly. */
13311 tree iter
= create_tmp_var (unsigned_type_node
, "iter");
13312 tree iter1
= make_ssa_name (iter
);
13313 tree iter2
= make_ssa_name (iter
);
13314 ipa_simd_modify_function_body (node
, adjustments
, retval
, iter1
);
13316 /* Initialize the iteration variable. */
13317 basic_block entry_bb
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
13318 basic_block body_bb
= split_block_after_labels (entry_bb
)->dest
;
13319 gimple_stmt_iterator gsi
= gsi_after_labels (entry_bb
);
13320 /* Insert the SIMD array and iv initialization at function
13322 gsi_insert_seq_before (&gsi
, seq
, GSI_NEW_STMT
);
13324 pop_gimplify_context (NULL
);
13326 /* Create a new BB right before the original exit BB, to hold the
13327 iteration increment and the condition/branch. */
13328 basic_block orig_exit
= EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun
), 0)->src
;
13329 basic_block incr_bb
= create_empty_bb (orig_exit
);
13330 add_bb_to_loop (incr_bb
, body_bb
->loop_father
);
13331 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
13332 flag. Set it now to be a FALLTHRU_EDGE. */
13333 gcc_assert (EDGE_COUNT (orig_exit
->succs
) == 1);
13334 EDGE_SUCC (orig_exit
, 0)->flags
|= EDGE_FALLTHRU
;
13335 for (unsigned i
= 0;
13336 i
< EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
); ++i
)
13338 edge e
= EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun
), i
);
13339 redirect_edge_succ (e
, incr_bb
);
13341 edge e
= make_edge (incr_bb
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
13342 e
->probability
= REG_BR_PROB_BASE
;
13343 gsi
= gsi_last_bb (incr_bb
);
13344 gimple g
= gimple_build_assign (iter2
, PLUS_EXPR
, iter1
,
13345 build_int_cst (unsigned_type_node
, 1));
13346 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
13348 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
13349 struct loop
*loop
= alloc_loop ();
13350 cfun
->has_force_vectorize_loops
= true;
13351 loop
->safelen
= node
->simdclone
->simdlen
;
13352 loop
->force_vectorize
= true;
13353 loop
->header
= body_bb
;
13355 /* Branch around the body if the mask applies. */
13356 if (node
->simdclone
->inbranch
)
13358 gimple_stmt_iterator gsi
= gsi_last_bb (loop
->header
);
13360 = node
->simdclone
->args
[node
->simdclone
->nargs
- 1].simd_array
;
13361 tree mask
= make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array
)));
13362 tree aref
= build4 (ARRAY_REF
,
13363 TREE_TYPE (TREE_TYPE (mask_array
)),
13366 g
= gimple_build_assign (mask
, aref
);
13367 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
13368 int bitsize
= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref
)));
13369 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref
)))
13371 aref
= build1 (VIEW_CONVERT_EXPR
,
13372 build_nonstandard_integer_type (bitsize
, 0), mask
);
13373 mask
= make_ssa_name (TREE_TYPE (aref
));
13374 g
= gimple_build_assign (mask
, aref
);
13375 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
13378 g
= gimple_build_cond (EQ_EXPR
, mask
, build_zero_cst (TREE_TYPE (mask
)),
13380 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
13381 make_edge (loop
->header
, incr_bb
, EDGE_TRUE_VALUE
);
13382 FALLTHRU_EDGE (loop
->header
)->flags
= EDGE_FALSE_VALUE
;
13385 /* Generate the condition. */
13386 g
= gimple_build_cond (LT_EXPR
,
13388 build_int_cst (unsigned_type_node
,
13389 node
->simdclone
->simdlen
),
13391 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
13392 e
= split_block (incr_bb
, gsi_stmt (gsi
));
13393 basic_block latch_bb
= e
->dest
;
13394 basic_block new_exit_bb
;
13395 new_exit_bb
= split_block_after_labels (latch_bb
)->dest
;
13396 loop
->latch
= latch_bb
;
13398 redirect_edge_succ (FALLTHRU_EDGE (latch_bb
), body_bb
);
13400 make_edge (incr_bb
, new_exit_bb
, EDGE_FALSE_VALUE
);
13401 /* The successor of incr_bb is already pointing to latch_bb; just
13403 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
13404 FALLTHRU_EDGE (incr_bb
)->flags
= EDGE_TRUE_VALUE
;
13406 gphi
*phi
= create_phi_node (iter1
, body_bb
);
13407 edge preheader_edge
= find_edge (entry_bb
, body_bb
);
13408 edge latch_edge
= single_succ_edge (latch_bb
);
13409 add_phi_arg (phi
, build_zero_cst (unsigned_type_node
), preheader_edge
,
13411 add_phi_arg (phi
, iter2
, latch_edge
, UNKNOWN_LOCATION
);
13413 /* Generate the new return. */
13414 gsi
= gsi_last_bb (new_exit_bb
);
13416 && TREE_CODE (retval
) == VIEW_CONVERT_EXPR
13417 && TREE_CODE (TREE_OPERAND (retval
, 0)) == RESULT_DECL
)
13418 retval
= TREE_OPERAND (retval
, 0);
13421 retval
= build1 (VIEW_CONVERT_EXPR
,
13422 TREE_TYPE (TREE_TYPE (node
->decl
)),
13424 retval
= force_gimple_operand_gsi (&gsi
, retval
, true, NULL
,
13425 false, GSI_CONTINUE_LINKING
);
13427 g
= gimple_build_return (retval
);
13428 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
13430 /* Handle aligned clauses by replacing default defs of the aligned
13431 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
13432 lhs. Handle linear by adding PHIs. */
13433 for (unsigned i
= 0; i
< node
->simdclone
->nargs
; i
++)
13434 if (node
->simdclone
->args
[i
].alignment
13435 && node
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_UNIFORM
13436 && (node
->simdclone
->args
[i
].alignment
13437 & (node
->simdclone
->args
[i
].alignment
- 1)) == 0
13438 && TREE_CODE (TREE_TYPE (node
->simdclone
->args
[i
].orig_arg
))
13441 unsigned int alignment
= node
->simdclone
->args
[i
].alignment
;
13442 tree orig_arg
= node
->simdclone
->args
[i
].orig_arg
;
13443 tree def
= ssa_default_def (cfun
, orig_arg
);
13444 if (def
&& !has_zero_uses (def
))
13446 tree fn
= builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED
);
13447 gimple_seq seq
= NULL
;
13448 bool need_cvt
= false;
13450 = gimple_build_call (fn
, 2, def
, size_int (alignment
));
13452 if (!useless_type_conversion_p (TREE_TYPE (orig_arg
),
13455 tree t
= make_ssa_name (need_cvt
? ptr_type_node
: orig_arg
);
13456 gimple_call_set_lhs (g
, t
);
13457 gimple_seq_add_stmt_without_update (&seq
, g
);
13460 t
= make_ssa_name (orig_arg
);
13461 g
= gimple_build_assign (t
, NOP_EXPR
, gimple_call_lhs (g
));
13462 gimple_seq_add_stmt_without_update (&seq
, g
);
13464 gsi_insert_seq_on_edge_immediate
13465 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
)), seq
);
13467 entry_bb
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
13468 int freq
= compute_call_stmt_bb_frequency (current_function_decl
,
13470 node
->create_edge (cgraph_node::get_create (fn
),
13471 call
, entry_bb
->count
, freq
);
13473 imm_use_iterator iter
;
13474 use_operand_p use_p
;
13476 tree repl
= gimple_get_lhs (g
);
13477 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, def
)
13478 if (is_gimple_debug (use_stmt
) || use_stmt
== call
)
13481 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
13482 SET_USE (use_p
, repl
);
13485 else if (node
->simdclone
->args
[i
].arg_type
13486 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
13488 tree orig_arg
= node
->simdclone
->args
[i
].orig_arg
;
13489 tree def
= ssa_default_def (cfun
, orig_arg
);
13490 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg
))
13491 || POINTER_TYPE_P (TREE_TYPE (orig_arg
)));
13492 if (def
&& !has_zero_uses (def
))
13494 iter1
= make_ssa_name (orig_arg
);
13495 iter2
= make_ssa_name (orig_arg
);
13496 phi
= create_phi_node (iter1
, body_bb
);
13497 add_phi_arg (phi
, def
, preheader_edge
, UNKNOWN_LOCATION
);
13498 add_phi_arg (phi
, iter2
, latch_edge
, UNKNOWN_LOCATION
);
13499 enum tree_code code
= INTEGRAL_TYPE_P (TREE_TYPE (orig_arg
))
13500 ? PLUS_EXPR
: POINTER_PLUS_EXPR
;
13501 tree addtype
= INTEGRAL_TYPE_P (TREE_TYPE (orig_arg
))
13502 ? TREE_TYPE (orig_arg
) : sizetype
;
13504 = build_int_cst (addtype
, node
->simdclone
->args
[i
].linear_step
);
13505 g
= gimple_build_assign (iter2
, code
, iter1
, addcst
);
13506 gsi
= gsi_last_bb (incr_bb
);
13507 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
13509 imm_use_iterator iter
;
13510 use_operand_p use_p
;
13512 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, def
)
13513 if (use_stmt
== phi
)
13516 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
13517 SET_USE (use_p
, iter1
);
13521 calculate_dominance_info (CDI_DOMINATORS
);
13522 add_loop (loop
, loop
->header
->loop_father
);
13523 update_ssa (TODO_update_ssa
);
13528 /* If the function in NODE is tagged as an elemental SIMD function,
13529 create the appropriate SIMD clones. */
13532 expand_simd_clones (struct cgraph_node
*node
)
13534 tree attr
= lookup_attribute ("omp declare simd",
13535 DECL_ATTRIBUTES (node
->decl
));
13536 if (attr
== NULL_TREE
13537 || node
->global
.inlined_to
13538 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node
->decl
)))
13542 #pragma omp declare simd
13544 in C, there we don't know the argument types at all. */
13545 if (!node
->definition
13546 && TYPE_ARG_TYPES (TREE_TYPE (node
->decl
)) == NULL_TREE
)
13551 /* Start with parsing the "omp declare simd" attribute(s). */
13552 bool inbranch_clause_specified
;
13553 struct cgraph_simd_clone
*clone_info
13554 = simd_clone_clauses_extract (node
, TREE_VALUE (attr
),
13555 &inbranch_clause_specified
);
13556 if (clone_info
== NULL
)
13559 int orig_simdlen
= clone_info
->simdlen
;
13560 tree base_type
= simd_clone_compute_base_data_type (node
, clone_info
);
13561 /* The target can return 0 (no simd clones should be created),
13562 1 (just one ISA of simd clones should be created) or higher
13563 count of ISA variants. In that case, clone_info is initialized
13564 for the first ISA variant. */
13566 = targetm
.simd_clone
.compute_vecsize_and_simdlen (node
, clone_info
,
13571 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
13572 also create one inbranch and one !inbranch clone of it. */
13573 for (int i
= 0; i
< count
* 2; i
++)
13575 struct cgraph_simd_clone
*clone
= clone_info
;
13576 if (inbranch_clause_specified
&& (i
& 1) != 0)
13581 clone
= simd_clone_struct_alloc (clone_info
->nargs
13583 simd_clone_struct_copy (clone
, clone_info
);
13584 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
13585 and simd_clone_adjust_argument_types did to the first
13587 clone
->nargs
-= clone_info
->inbranch
;
13588 clone
->simdlen
= orig_simdlen
;
13589 /* And call the target hook again to get the right ISA. */
13590 targetm
.simd_clone
.compute_vecsize_and_simdlen (node
, clone
,
13594 clone
->inbranch
= 1;
13597 /* simd_clone_mangle might fail if such a clone has been created
13599 tree id
= simd_clone_mangle (node
, clone
);
13600 if (id
== NULL_TREE
)
13603 /* Only when we are sure we want to create the clone actually
13604 clone the function (or definitions) or create another
13605 extern FUNCTION_DECL (for prototypes without definitions). */
13606 struct cgraph_node
*n
= simd_clone_create (node
);
13610 n
->simdclone
= clone
;
13611 clone
->origin
= node
;
13612 clone
->next_clone
= NULL
;
13613 if (node
->simd_clones
== NULL
)
13615 clone
->prev_clone
= n
;
13616 node
->simd_clones
= n
;
13620 clone
->prev_clone
= node
->simd_clones
->simdclone
->prev_clone
;
13621 clone
->prev_clone
->simdclone
->next_clone
= n
;
13622 node
->simd_clones
->simdclone
->prev_clone
= n
;
13624 symtab
->change_decl_assembler_name (n
->decl
, id
);
13625 /* And finally adjust the return type, parameters and for
13626 definitions also function body. */
13627 if (node
->definition
)
13628 simd_clone_adjust (n
);
13631 simd_clone_adjust_return_type (n
);
13632 simd_clone_adjust_argument_types (n
);
13636 while ((attr
= lookup_attribute ("omp declare simd", TREE_CHAIN (attr
))));
13639 /* Entry point for IPA simd clone creation pass. */
13641 static unsigned int
13642 ipa_omp_simd_clone (void)
13644 struct cgraph_node
*node
;
13645 FOR_EACH_FUNCTION (node
)
13646 expand_simd_clones (node
);
13652 const pass_data pass_data_omp_simd_clone
=
13654 SIMPLE_IPA_PASS
, /* type */
13655 "simdclone", /* name */
13656 OPTGROUP_NONE
, /* optinfo_flags */
13657 TV_NONE
, /* tv_id */
13658 ( PROP_ssa
| PROP_cfg
), /* properties_required */
13659 0, /* properties_provided */
13660 0, /* properties_destroyed */
13661 0, /* todo_flags_start */
13662 0, /* todo_flags_finish */
13665 class pass_omp_simd_clone
: public simple_ipa_opt_pass
13668 pass_omp_simd_clone(gcc::context
*ctxt
)
13669 : simple_ipa_opt_pass(pass_data_omp_simd_clone
, ctxt
)
13672 /* opt_pass methods: */
13673 virtual bool gate (function
*);
13674 virtual unsigned int execute (function
*) { return ipa_omp_simd_clone (); }
13678 pass_omp_simd_clone::gate (function
*)
13680 return ((flag_openmp
|| flag_openmp_simd
13682 || (in_lto_p
&& !flag_wpa
))
13683 && (targetm
.simd_clone
.compute_vecsize_and_simdlen
!= NULL
));
13686 } // anon namespace
13688 simple_ipa_opt_pass
*
13689 make_pass_omp_simd_clone (gcc::context
*ctxt
)
13691 return new pass_omp_simd_clone (ctxt
);
13694 /* Helper function for omp_finish_file routine. Takes decls from V_DECLS and
13695 adds their addresses and sizes to constructor-vector V_CTOR. */
13697 add_decls_addresses_to_decl_constructor (vec
<tree
, va_gc
> *v_decls
,
13698 vec
<constructor_elt
, va_gc
> *v_ctor
)
13700 unsigned len
= vec_safe_length (v_decls
);
13701 for (unsigned i
= 0; i
< len
; i
++)
13703 tree it
= (*v_decls
)[i
];
13704 bool is_function
= TREE_CODE (it
) != VAR_DECL
;
13706 CONSTRUCTOR_APPEND_ELT (v_ctor
, NULL_TREE
, build_fold_addr_expr (it
));
13708 CONSTRUCTOR_APPEND_ELT (v_ctor
, NULL_TREE
,
13709 fold_convert (const_ptr_type_node
,
13710 DECL_SIZE_UNIT (it
)));
13714 /* Create new symbols containing (address, size) pairs for global variables,
13715 marked with "omp declare target" attribute, as well as addresses for the
13716 functions, which are outlined offloading regions. */
13718 omp_finish_file (void)
13720 unsigned num_funcs
= vec_safe_length (offload_funcs
);
13721 unsigned num_vars
= vec_safe_length (offload_vars
);
13723 if (num_funcs
== 0 && num_vars
== 0)
13726 if (targetm_common
.have_named_sections
)
13728 vec
<constructor_elt
, va_gc
> *v_f
, *v_v
;
13729 vec_alloc (v_f
, num_funcs
);
13730 vec_alloc (v_v
, num_vars
* 2);
13732 add_decls_addresses_to_decl_constructor (offload_funcs
, v_f
);
13733 add_decls_addresses_to_decl_constructor (offload_vars
, v_v
);
13735 tree vars_decl_type
= build_array_type_nelts (pointer_sized_int_node
,
13737 tree funcs_decl_type
= build_array_type_nelts (pointer_sized_int_node
,
13739 TYPE_ALIGN (vars_decl_type
) = TYPE_ALIGN (pointer_sized_int_node
);
13740 TYPE_ALIGN (funcs_decl_type
) = TYPE_ALIGN (pointer_sized_int_node
);
13741 tree ctor_v
= build_constructor (vars_decl_type
, v_v
);
13742 tree ctor_f
= build_constructor (funcs_decl_type
, v_f
);
13743 TREE_CONSTANT (ctor_v
) = TREE_CONSTANT (ctor_f
) = 1;
13744 TREE_STATIC (ctor_v
) = TREE_STATIC (ctor_f
) = 1;
13745 tree funcs_decl
= build_decl (UNKNOWN_LOCATION
, VAR_DECL
,
13746 get_identifier (".offload_func_table"),
13748 tree vars_decl
= build_decl (UNKNOWN_LOCATION
, VAR_DECL
,
13749 get_identifier (".offload_var_table"),
13751 TREE_STATIC (funcs_decl
) = TREE_STATIC (vars_decl
) = 1;
13752 /* Do not align tables more than TYPE_ALIGN (pointer_sized_int_node),
13753 otherwise a joint table in a binary will contain padding between
13754 tables from multiple object files. */
13755 DECL_USER_ALIGN (funcs_decl
) = DECL_USER_ALIGN (vars_decl
) = 1;
13756 DECL_ALIGN (funcs_decl
) = TYPE_ALIGN (funcs_decl_type
);
13757 DECL_ALIGN (vars_decl
) = TYPE_ALIGN (vars_decl_type
);
13758 DECL_INITIAL (funcs_decl
) = ctor_f
;
13759 DECL_INITIAL (vars_decl
) = ctor_v
;
13760 set_decl_section_name (funcs_decl
, OFFLOAD_FUNC_TABLE_SECTION_NAME
);
13761 set_decl_section_name (vars_decl
, OFFLOAD_VAR_TABLE_SECTION_NAME
);
13763 varpool_node::finalize_decl (vars_decl
);
13764 varpool_node::finalize_decl (funcs_decl
);
13768 for (unsigned i
= 0; i
< num_funcs
; i
++)
13770 tree it
= (*offload_funcs
)[i
];
13771 targetm
.record_offload_symbol (it
);
13773 for (unsigned i
= 0; i
< num_vars
; i
++)
13775 tree it
= (*offload_vars
)[i
];
13776 targetm
.record_offload_symbol (it
);
13781 #include "gt-omp-low.h"