1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
32 #include "pointer-set.h"
33 #include "basic-block.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-fold.h"
37 #include "gimple-expr.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "gimple-walk.h"
44 #include "tree-iterator.h"
45 #include "tree-inline.h"
46 #include "langhooks.h"
47 #include "diagnostic-core.h"
48 #include "gimple-ssa.h"
51 #include "tree-phinodes.h"
52 #include "ssa-iterators.h"
53 #include "tree-ssanames.h"
54 #include "tree-into-ssa.h"
61 #include "tree-pass.h"
63 #include "splay-tree.h"
68 #include "gimple-low.h"
69 #include "tree-cfgcleanup.h"
70 #include "pretty-print.h"
72 #include "tree-nested.h"
76 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
77 phases. The first phase scans the function looking for OMP statements
78 and then for variables that must be replaced to satisfy data sharing
79 clauses. The second phase expands code for the constructs, as well as
80 re-gimplifying things when variables have been replaced with complex
83 Final code generation is done by pass_expand_omp. The flowgraph is
84 scanned for parallel regions which are then moved to a new
85 function, to be invoked by the thread library. */
87 /* Parallel region information. Every parallel and workshare
88 directive is enclosed between two markers, the OMP_* directive
89 and a corresponding OMP_RETURN statement. */
93 /* The enclosing region. */
94 struct omp_region
*outer
;
96 /* First child region. */
97 struct omp_region
*inner
;
99 /* Next peer region. */
100 struct omp_region
*next
;
102 /* Block containing the omp directive as its last stmt. */
105 /* Block containing the OMP_RETURN as its last stmt. */
108 /* Block containing the OMP_CONTINUE as its last stmt. */
111 /* If this is a combined parallel+workshare region, this is a list
112 of additional arguments needed by the combined parallel+workshare
114 vec
<tree
, va_gc
> *ws_args
;
116 /* The code for the omp directive of this region. */
117 enum gimple_code type
;
119 /* Schedule kind, only used for OMP_FOR type regions. */
120 enum omp_clause_schedule_kind sched_kind
;
122 /* True if this is a combined parallel+workshare region. */
123 bool is_combined_parallel
;
126 /* Context structure. Used to store information about each parallel
127 directive in the code. */
129 typedef struct omp_context
131 /* This field must be at the beginning, as we do "inheritance": Some
132 callback functions for tree-inline.c (e.g., omp_copy_decl)
133 receive a copy_body_data pointer that is up-casted to an
134 omp_context pointer. */
137 /* The tree of contexts corresponding to the encountered constructs. */
138 struct omp_context
*outer
;
141 /* Map variables to fields in a structure that allows communication
142 between sending and receiving threads. */
143 splay_tree field_map
;
148 /* These are used just by task contexts, if task firstprivate fn is
149 needed. srecord_type is used to communicate from the thread
150 that encountered the task construct to task firstprivate fn,
151 record_type is allocated by GOMP_task, initialized by task firstprivate
152 fn and passed to the task body fn. */
153 splay_tree sfield_map
;
156 /* A chain of variables to add to the top-level block surrounding the
157 construct. In the case of a parallel, this is in the child function. */
160 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
161 barriers should jump to during omplower pass. */
164 /* What to do with variables with implicitly determined sharing
166 enum omp_clause_default_kind default_kind
;
168 /* Nesting depth of this context. Used to beautify error messages re
169 invalid gotos. The outermost ctx is depth 1, with depth 0 being
170 reserved for the main body of the function. */
173 /* True if this parallel directive is nested within another. */
176 /* True if this construct can be cancelled. */
181 struct omp_for_data_loop
183 tree v
, n1
, n2
, step
;
184 enum tree_code cond_code
;
187 /* A structure describing the main elements of a parallel loop. */
191 struct omp_for_data_loop loop
;
196 bool have_nowait
, have_ordered
;
197 enum omp_clause_schedule_kind sched_kind
;
198 struct omp_for_data_loop
*loops
;
202 static splay_tree all_contexts
;
203 static int taskreg_nesting_level
;
204 static int target_nesting_level
;
205 static struct omp_region
*root_omp_region
;
206 static bitmap task_shared_vars
;
208 static void scan_omp (gimple_seq
*, omp_context
*);
209 static tree
scan_omp_1_op (tree
*, int *, void *);
211 #define WALK_SUBSTMTS \
215 case GIMPLE_EH_FILTER: \
216 case GIMPLE_TRANSACTION: \
217 /* The sub-statements for these should be walked. */ \
218 *handled_ops_p = false; \
221 /* Convenience function for calling scan_omp_1_op on tree operands. */
224 scan_omp_op (tree
*tp
, omp_context
*ctx
)
226 struct walk_stmt_info wi
;
228 memset (&wi
, 0, sizeof (wi
));
230 wi
.want_locations
= true;
232 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
235 static void lower_omp (gimple_seq
*, omp_context
*);
236 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
237 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
239 /* Find an OpenMP clause of type KIND within CLAUSES. */
242 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
244 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
245 if (OMP_CLAUSE_CODE (clauses
) == kind
)
251 /* Return true if CTX is for an omp parallel. */
254 is_parallel_ctx (omp_context
*ctx
)
256 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
260 /* Return true if CTX is for an omp task. */
263 is_task_ctx (omp_context
*ctx
)
265 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
269 /* Return true if CTX is for an omp parallel or omp task. */
272 is_taskreg_ctx (omp_context
*ctx
)
274 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
275 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
279 /* Return true if REGION is a combined parallel+workshare region. */
282 is_combined_parallel (struct omp_region
*region
)
284 return region
->is_combined_parallel
;
288 /* Extract the header elements of parallel loop FOR_STMT and store
292 extract_omp_for_data (gimple for_stmt
, struct omp_for_data
*fd
,
293 struct omp_for_data_loop
*loops
)
295 tree t
, var
, *collapse_iter
, *collapse_count
;
296 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
297 struct omp_for_data_loop
*loop
;
299 struct omp_for_data_loop dummy_loop
;
300 location_t loc
= gimple_location (for_stmt
);
301 bool simd
= gimple_omp_for_kind (for_stmt
) & GF_OMP_FOR_SIMD
;
302 bool distribute
= gimple_omp_for_kind (for_stmt
)
303 == GF_OMP_FOR_KIND_DISTRIBUTE
;
305 fd
->for_stmt
= for_stmt
;
307 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
308 if (fd
->collapse
> 1)
311 fd
->loops
= &fd
->loop
;
313 fd
->have_nowait
= distribute
|| simd
;
314 fd
->have_ordered
= false;
315 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
316 fd
->chunk_size
= NULL_TREE
;
317 collapse_iter
= NULL
;
318 collapse_count
= NULL
;
320 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
321 switch (OMP_CLAUSE_CODE (t
))
323 case OMP_CLAUSE_NOWAIT
:
324 fd
->have_nowait
= true;
326 case OMP_CLAUSE_ORDERED
:
327 fd
->have_ordered
= true;
329 case OMP_CLAUSE_SCHEDULE
:
330 gcc_assert (!distribute
);
331 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
332 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
334 case OMP_CLAUSE_DIST_SCHEDULE
:
335 gcc_assert (distribute
);
336 fd
->chunk_size
= OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t
);
338 case OMP_CLAUSE_COLLAPSE
:
339 if (fd
->collapse
> 1)
341 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
342 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
349 /* FIXME: for now map schedule(auto) to schedule(static).
350 There should be analysis to determine whether all iterations
351 are approximately the same amount of work (then schedule(static)
352 is best) or if it varies (then schedule(dynamic,N) is better). */
353 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
355 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
356 gcc_assert (fd
->chunk_size
== NULL
);
358 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
359 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
360 gcc_assert (fd
->chunk_size
== NULL
);
361 else if (fd
->chunk_size
== NULL
)
363 /* We only need to compute a default chunk size for ordered
364 static loops and dynamic loops. */
365 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
367 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
368 ? integer_zero_node
: integer_one_node
;
371 for (i
= 0; i
< fd
->collapse
; i
++)
373 if (fd
->collapse
== 1)
375 else if (loops
!= NULL
)
380 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
381 gcc_assert (SSA_VAR_P (loop
->v
));
382 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
383 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
384 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
385 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
387 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
388 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
389 switch (loop
->cond_code
)
395 gcc_assert (gimple_omp_for_kind (for_stmt
)
396 == GF_OMP_FOR_KIND_CILKSIMD
);
399 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
400 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, 1);
402 loop
->n2
= fold_build2_loc (loc
,
403 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
404 build_int_cst (TREE_TYPE (loop
->n2
), 1));
405 loop
->cond_code
= LT_EXPR
;
408 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
409 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, -1);
411 loop
->n2
= fold_build2_loc (loc
,
412 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
413 build_int_cst (TREE_TYPE (loop
->n2
), 1));
414 loop
->cond_code
= GT_EXPR
;
420 t
= gimple_omp_for_incr (for_stmt
, i
);
421 gcc_assert (TREE_OPERAND (t
, 0) == var
);
422 switch (TREE_CODE (t
))
425 loop
->step
= TREE_OPERAND (t
, 1);
427 case POINTER_PLUS_EXPR
:
428 loop
->step
= fold_convert (ssizetype
, TREE_OPERAND (t
, 1));
431 loop
->step
= TREE_OPERAND (t
, 1);
432 loop
->step
= fold_build1_loc (loc
,
433 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
441 || (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
442 && !fd
->have_ordered
))
444 if (fd
->collapse
== 1)
445 iter_type
= TREE_TYPE (loop
->v
);
447 || TYPE_PRECISION (iter_type
)
448 < TYPE_PRECISION (TREE_TYPE (loop
->v
)))
450 = build_nonstandard_integer_type
451 (TYPE_PRECISION (TREE_TYPE (loop
->v
)), 1);
453 else if (iter_type
!= long_long_unsigned_type_node
)
455 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
456 iter_type
= long_long_unsigned_type_node
;
457 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
458 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
459 >= TYPE_PRECISION (iter_type
))
463 if (loop
->cond_code
== LT_EXPR
)
464 n
= fold_build2_loc (loc
,
465 PLUS_EXPR
, TREE_TYPE (loop
->v
),
466 loop
->n2
, loop
->step
);
469 if (TREE_CODE (n
) != INTEGER_CST
470 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
471 iter_type
= long_long_unsigned_type_node
;
473 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
474 > TYPE_PRECISION (iter_type
))
478 if (loop
->cond_code
== LT_EXPR
)
481 n2
= fold_build2_loc (loc
,
482 PLUS_EXPR
, TREE_TYPE (loop
->v
),
483 loop
->n2
, loop
->step
);
487 n1
= fold_build2_loc (loc
,
488 MINUS_EXPR
, TREE_TYPE (loop
->v
),
489 loop
->n2
, loop
->step
);
492 if (TREE_CODE (n1
) != INTEGER_CST
493 || TREE_CODE (n2
) != INTEGER_CST
494 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
495 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
496 iter_type
= long_long_unsigned_type_node
;
500 if (collapse_count
&& *collapse_count
== NULL
)
502 t
= fold_binary (loop
->cond_code
, boolean_type_node
,
503 fold_convert (TREE_TYPE (loop
->v
), loop
->n1
),
504 fold_convert (TREE_TYPE (loop
->v
), loop
->n2
));
505 if (t
&& integer_zerop (t
))
506 count
= build_zero_cst (long_long_unsigned_type_node
);
507 else if ((i
== 0 || count
!= NULL_TREE
)
508 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
509 && TREE_CONSTANT (loop
->n1
)
510 && TREE_CONSTANT (loop
->n2
)
511 && TREE_CODE (loop
->step
) == INTEGER_CST
)
513 tree itype
= TREE_TYPE (loop
->v
);
515 if (POINTER_TYPE_P (itype
))
516 itype
= signed_type_for (itype
);
517 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
518 t
= fold_build2_loc (loc
,
520 fold_convert_loc (loc
, itype
, loop
->step
), t
);
521 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
522 fold_convert_loc (loc
, itype
, loop
->n2
));
523 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
524 fold_convert_loc (loc
, itype
, loop
->n1
));
525 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
526 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
527 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
528 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
529 fold_convert_loc (loc
, itype
,
532 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
533 fold_convert_loc (loc
, itype
, loop
->step
));
534 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
535 if (count
!= NULL_TREE
)
536 count
= fold_build2_loc (loc
,
537 MULT_EXPR
, long_long_unsigned_type_node
,
541 if (TREE_CODE (count
) != INTEGER_CST
)
544 else if (count
&& !integer_zerop (count
))
551 && (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
552 || fd
->have_ordered
))
554 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
555 iter_type
= long_long_unsigned_type_node
;
557 iter_type
= long_integer_type_node
;
559 else if (collapse_iter
&& *collapse_iter
!= NULL
)
560 iter_type
= TREE_TYPE (*collapse_iter
);
561 fd
->iter_type
= iter_type
;
562 if (collapse_iter
&& *collapse_iter
== NULL
)
563 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
564 if (collapse_count
&& *collapse_count
== NULL
)
567 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
569 *collapse_count
= create_tmp_var (iter_type
, ".count");
572 if (fd
->collapse
> 1)
574 fd
->loop
.v
= *collapse_iter
;
575 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
576 fd
->loop
.n2
= *collapse_count
;
577 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
578 fd
->loop
.cond_code
= LT_EXPR
;
583 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
584 is the immediate dominator of PAR_ENTRY_BB, return true if there
585 are no data dependencies that would prevent expanding the parallel
586 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
588 When expanding a combined parallel+workshare region, the call to
589 the child function may need additional arguments in the case of
590 GIMPLE_OMP_FOR regions. In some cases, these arguments are
591 computed out of variables passed in from the parent to the child
592 via 'struct .omp_data_s'. For instance:
594 #pragma omp parallel for schedule (guided, i * 4)
599 # BLOCK 2 (PAR_ENTRY_BB)
601 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
603 # BLOCK 3 (WS_ENTRY_BB)
604 .omp_data_i = &.omp_data_o;
605 D.1667 = .omp_data_i->i;
607 #pragma omp for schedule (guided, D.1598)
609 When we outline the parallel region, the call to the child function
610 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
611 that value is computed *after* the call site. So, in principle we
612 cannot do the transformation.
614 To see whether the code in WS_ENTRY_BB blocks the combined
615 parallel+workshare call, we collect all the variables used in the
616 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
617 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
620 FIXME. If we had the SSA form built at this point, we could merely
621 hoist the code in block 3 into block 2 and be done with it. But at
622 this point we don't have dataflow information and though we could
623 hack something up here, it is really not worth the aggravation. */
626 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
628 struct omp_for_data fd
;
629 gimple ws_stmt
= last_stmt (ws_entry_bb
);
631 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
634 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
636 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
638 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
640 if (fd
.iter_type
!= long_integer_type_node
)
643 /* FIXME. We give up too easily here. If any of these arguments
644 are not constants, they will likely involve variables that have
645 been mapped into fields of .omp_data_s for sharing with the child
646 function. With appropriate data flow, it would be possible to
648 if (!is_gimple_min_invariant (fd
.loop
.n1
)
649 || !is_gimple_min_invariant (fd
.loop
.n2
)
650 || !is_gimple_min_invariant (fd
.loop
.step
)
651 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
658 /* Collect additional arguments needed to emit a combined
659 parallel+workshare call. WS_STMT is the workshare directive being
662 static vec
<tree
, va_gc
> *
663 get_ws_args_for (gimple par_stmt
, gimple ws_stmt
)
666 location_t loc
= gimple_location (ws_stmt
);
667 vec
<tree
, va_gc
> *ws_args
;
669 if (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
)
671 struct omp_for_data fd
;
674 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
678 if (gimple_omp_for_combined_into_p (ws_stmt
))
681 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt
),
682 OMP_CLAUSE__LOOPTEMP_
);
684 n1
= OMP_CLAUSE_DECL (innerc
);
685 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
686 OMP_CLAUSE__LOOPTEMP_
);
688 n2
= OMP_CLAUSE_DECL (innerc
);
691 vec_alloc (ws_args
, 3 + (fd
.chunk_size
!= 0));
693 t
= fold_convert_loc (loc
, long_integer_type_node
, n1
);
694 ws_args
->quick_push (t
);
696 t
= fold_convert_loc (loc
, long_integer_type_node
, n2
);
697 ws_args
->quick_push (t
);
699 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
700 ws_args
->quick_push (t
);
704 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
705 ws_args
->quick_push (t
);
710 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
712 /* Number of sections is equal to the number of edges from the
713 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
714 the exit of the sections region. */
715 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
716 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
717 vec_alloc (ws_args
, 1);
718 ws_args
->quick_push (t
);
726 /* Discover whether REGION is a combined parallel+workshare region. */
729 determine_parallel_type (struct omp_region
*region
)
731 basic_block par_entry_bb
, par_exit_bb
;
732 basic_block ws_entry_bb
, ws_exit_bb
;
734 if (region
== NULL
|| region
->inner
== NULL
735 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
736 || region
->inner
->cont
== NULL
)
739 /* We only support parallel+for and parallel+sections. */
740 if (region
->type
!= GIMPLE_OMP_PARALLEL
741 || (region
->inner
->type
!= GIMPLE_OMP_FOR
742 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
745 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
746 WS_EXIT_BB -> PAR_EXIT_BB. */
747 par_entry_bb
= region
->entry
;
748 par_exit_bb
= region
->exit
;
749 ws_entry_bb
= region
->inner
->entry
;
750 ws_exit_bb
= region
->inner
->exit
;
752 if (single_succ (par_entry_bb
) == ws_entry_bb
753 && single_succ (ws_exit_bb
) == par_exit_bb
754 && workshare_safe_to_combine_p (ws_entry_bb
)
755 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
756 || (last_and_only_stmt (ws_entry_bb
)
757 && last_and_only_stmt (par_exit_bb
))))
759 gimple par_stmt
= last_stmt (par_entry_bb
);
760 gimple ws_stmt
= last_stmt (ws_entry_bb
);
762 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
764 /* If this is a combined parallel loop, we need to determine
765 whether or not to use the combined library calls. There
766 are two cases where we do not apply the transformation:
767 static loops and any kind of ordered loop. In the first
768 case, we already open code the loop so there is no need
769 to do anything else. In the latter case, the combined
770 parallel loop call would still need extra synchronization
771 to implement ordered semantics, so there would not be any
772 gain in using the combined call. */
773 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
774 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
776 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
777 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
779 region
->is_combined_parallel
= false;
780 region
->inner
->is_combined_parallel
= false;
785 region
->is_combined_parallel
= true;
786 region
->inner
->is_combined_parallel
= true;
787 region
->ws_args
= get_ws_args_for (par_stmt
, ws_stmt
);
792 /* Return true if EXPR is variable sized. */
795 is_variable_sized (const_tree expr
)
797 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
800 /* Return true if DECL is a reference type. */
803 is_reference (tree decl
)
805 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
808 /* Lookup variables in the decl or field splay trees. The "maybe" form
809 allows for the variable form to not have been entered, otherwise we
810 assert that the variable must have been entered. */
813 lookup_decl (tree var
, omp_context
*ctx
)
816 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
821 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
824 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
825 return n
? *n
: NULL_TREE
;
829 lookup_field (tree var
, omp_context
*ctx
)
832 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
833 return (tree
) n
->value
;
837 lookup_sfield (tree var
, omp_context
*ctx
)
840 n
= splay_tree_lookup (ctx
->sfield_map
841 ? ctx
->sfield_map
: ctx
->field_map
,
842 (splay_tree_key
) var
);
843 return (tree
) n
->value
;
847 maybe_lookup_field (tree var
, omp_context
*ctx
)
850 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
851 return n
? (tree
) n
->value
: NULL_TREE
;
854 /* Return true if DECL should be copied by pointer. SHARED_CTX is
855 the parallel context if DECL is to be shared. */
858 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
860 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
863 /* We can only use copy-in/copy-out semantics for shared variables
864 when we know the value is not accessible from an outer scope. */
867 /* ??? Trivially accessible from anywhere. But why would we even
868 be passing an address in this case? Should we simply assert
869 this to be false, or should we have a cleanup pass that removes
870 these from the list of mappings? */
871 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
874 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
875 without analyzing the expression whether or not its location
876 is accessible to anyone else. In the case of nested parallel
877 regions it certainly may be. */
878 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
881 /* Do not use copy-in/copy-out for variables that have their
883 if (TREE_ADDRESSABLE (decl
))
886 /* lower_send_shared_vars only uses copy-in, but not copy-out
888 if (TREE_READONLY (decl
)
889 || ((TREE_CODE (decl
) == RESULT_DECL
890 || TREE_CODE (decl
) == PARM_DECL
)
891 && DECL_BY_REFERENCE (decl
)))
894 /* Disallow copy-in/out in nested parallel if
895 decl is shared in outer parallel, otherwise
896 each thread could store the shared variable
897 in its own copy-in location, making the
898 variable no longer really shared. */
899 if (shared_ctx
->is_nested
)
903 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
904 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
911 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
912 c
; c
= OMP_CLAUSE_CHAIN (c
))
913 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
914 && OMP_CLAUSE_DECL (c
) == decl
)
918 goto maybe_mark_addressable_and_ret
;
922 /* For tasks avoid using copy-in/out. As tasks can be
923 deferred or executed in different thread, when GOMP_task
924 returns, the task hasn't necessarily terminated. */
925 if (is_task_ctx (shared_ctx
))
928 maybe_mark_addressable_and_ret
:
929 outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
930 if (is_gimple_reg (outer
))
932 /* Taking address of OUTER in lower_send_shared_vars
933 might need regimplification of everything that uses the
935 if (!task_shared_vars
)
936 task_shared_vars
= BITMAP_ALLOC (NULL
);
937 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
938 TREE_ADDRESSABLE (outer
) = 1;
947 /* Construct a new automatic decl similar to VAR. */
950 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
952 tree copy
= copy_var_decl (var
, name
, type
);
954 DECL_CONTEXT (copy
) = current_function_decl
;
955 DECL_CHAIN (copy
) = ctx
->block_vars
;
956 ctx
->block_vars
= copy
;
962 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
964 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
967 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
970 omp_build_component_ref (tree obj
, tree field
)
972 tree ret
= build3 (COMPONENT_REF
, TREE_TYPE (field
), obj
, field
, NULL
);
973 if (TREE_THIS_VOLATILE (field
))
974 TREE_THIS_VOLATILE (ret
) |= 1;
975 if (TREE_READONLY (field
))
976 TREE_READONLY (ret
) |= 1;
980 /* Build tree nodes to access the field for VAR on the receiver side. */
983 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
985 tree x
, field
= lookup_field (var
, ctx
);
987 /* If the receiver record type was remapped in the child function,
988 remap the field into the new record type. */
989 x
= maybe_lookup_field (field
, ctx
);
993 x
= build_simple_mem_ref (ctx
->receiver_decl
);
994 x
= omp_build_component_ref (x
, field
);
996 x
= build_simple_mem_ref (x
);
1001 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1002 of a parallel, this is a component reference; for workshare constructs
1003 this is some variable. */
1006 build_outer_var_ref (tree var
, omp_context
*ctx
)
1010 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
1012 else if (is_variable_sized (var
))
1014 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
1015 x
= build_outer_var_ref (x
, ctx
);
1016 x
= build_simple_mem_ref (x
);
1018 else if (is_taskreg_ctx (ctx
))
1020 bool by_ref
= use_pointer_for_field (var
, NULL
);
1021 x
= build_receiver_ref (var
, by_ref
, ctx
);
1023 else if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
1024 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
1026 /* #pragma omp simd isn't a worksharing construct, and can reference even
1027 private vars in its linear etc. clauses. */
1029 if (ctx
->outer
&& is_taskreg_ctx (ctx
))
1030 x
= lookup_decl (var
, ctx
->outer
);
1031 else if (ctx
->outer
)
1032 x
= maybe_lookup_decl_in_outer_ctx (var
, ctx
);
1036 else if (ctx
->outer
)
1037 x
= lookup_decl (var
, ctx
->outer
);
1038 else if (is_reference (var
))
1039 /* This can happen with orphaned constructs. If var is reference, it is
1040 possible it is shared and as such valid. */
1045 if (is_reference (var
))
1046 x
= build_simple_mem_ref (x
);
1051 /* Build tree nodes to access the field for VAR on the sender side. */
1054 build_sender_ref (tree var
, omp_context
*ctx
)
1056 tree field
= lookup_sfield (var
, ctx
);
1057 return omp_build_component_ref (ctx
->sender_decl
, field
);
1060 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1063 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
1065 tree field
, type
, sfield
= NULL_TREE
;
1067 gcc_assert ((mask
& 1) == 0
1068 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
1069 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
1070 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
1072 type
= TREE_TYPE (var
);
1075 gcc_assert (TREE_CODE (type
) == ARRAY_TYPE
);
1076 type
= build_pointer_type (build_pointer_type (type
));
1079 type
= build_pointer_type (type
);
1080 else if ((mask
& 3) == 1 && is_reference (var
))
1081 type
= TREE_TYPE (type
);
1083 field
= build_decl (DECL_SOURCE_LOCATION (var
),
1084 FIELD_DECL
, DECL_NAME (var
), type
);
1086 /* Remember what variable this field was created for. This does have a
1087 side effect of making dwarf2out ignore this member, so for helpful
1088 debugging we clear it later in delete_omp_context. */
1089 DECL_ABSTRACT_ORIGIN (field
) = var
;
1090 if (type
== TREE_TYPE (var
))
1092 DECL_ALIGN (field
) = DECL_ALIGN (var
);
1093 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
1094 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
1097 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
1099 if ((mask
& 3) == 3)
1101 insert_field_into_struct (ctx
->record_type
, field
);
1102 if (ctx
->srecord_type
)
1104 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
1105 FIELD_DECL
, DECL_NAME (var
), type
);
1106 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
1107 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
1108 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
1109 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
1110 insert_field_into_struct (ctx
->srecord_type
, sfield
);
1115 if (ctx
->srecord_type
== NULL_TREE
)
1119 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1120 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1121 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
1123 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
1124 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
1125 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
1126 insert_field_into_struct (ctx
->srecord_type
, sfield
);
1127 splay_tree_insert (ctx
->sfield_map
,
1128 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
1129 (splay_tree_value
) sfield
);
1133 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
1134 : ctx
->srecord_type
, field
);
1138 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
1139 (splay_tree_value
) field
);
1140 if ((mask
& 2) && ctx
->sfield_map
)
1141 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
1142 (splay_tree_value
) sfield
);
1146 install_var_local (tree var
, omp_context
*ctx
)
1148 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1149 insert_decl_map (&ctx
->cb
, var
, new_var
);
1153 /* Adjust the replacement for DECL in CTX for the new context. This means
1154 copying the DECL_VALUE_EXPR, and fixing up the type. */
1157 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1159 tree new_decl
, size
;
1161 new_decl
= lookup_decl (decl
, ctx
);
1163 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1165 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1166 && DECL_HAS_VALUE_EXPR_P (decl
))
1168 tree ve
= DECL_VALUE_EXPR (decl
);
1169 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1170 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1171 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1174 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1176 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1177 if (size
== error_mark_node
)
1178 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1179 DECL_SIZE (new_decl
) = size
;
1181 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1182 if (size
== error_mark_node
)
1183 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1184 DECL_SIZE_UNIT (new_decl
) = size
;
1188 /* The callback for remap_decl. Search all containing contexts for a
1189 mapping of the variable; this avoids having to duplicate the splay
1190 tree ahead of time. We know a mapping doesn't already exist in the
1191 given context. Create new mappings to implement default semantics. */
1194 omp_copy_decl (tree var
, copy_body_data
*cb
)
1196 omp_context
*ctx
= (omp_context
*) cb
;
1199 if (TREE_CODE (var
) == LABEL_DECL
)
1201 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1202 DECL_CONTEXT (new_var
) = current_function_decl
;
1203 insert_decl_map (&ctx
->cb
, var
, new_var
);
1207 while (!is_taskreg_ctx (ctx
))
1212 new_var
= maybe_lookup_decl (var
, ctx
);
1217 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1220 return error_mark_node
;
1224 /* Debugging dumps for parallel regions. */
1225 void dump_omp_region (FILE *, struct omp_region
*, int);
1226 void debug_omp_region (struct omp_region
*);
1227 void debug_all_omp_regions (void);
1229 /* Dump the parallel region tree rooted at REGION. */
1232 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1234 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1235 gimple_code_name
[region
->type
]);
1238 dump_omp_region (file
, region
->inner
, indent
+ 4);
1242 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1243 region
->cont
->index
);
1247 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1248 region
->exit
->index
);
1250 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1253 dump_omp_region (file
, region
->next
, indent
);
1257 debug_omp_region (struct omp_region
*region
)
1259 dump_omp_region (stderr
, region
, 0);
1263 debug_all_omp_regions (void)
1265 dump_omp_region (stderr
, root_omp_region
, 0);
1269 /* Create a new parallel region starting at STMT inside region PARENT. */
1271 static struct omp_region
*
1272 new_omp_region (basic_block bb
, enum gimple_code type
,
1273 struct omp_region
*parent
)
1275 struct omp_region
*region
= XCNEW (struct omp_region
);
1277 region
->outer
= parent
;
1279 region
->type
= type
;
1283 /* This is a nested region. Add it to the list of inner
1284 regions in PARENT. */
1285 region
->next
= parent
->inner
;
1286 parent
->inner
= region
;
1290 /* This is a toplevel region. Add it to the list of toplevel
1291 regions in ROOT_OMP_REGION. */
1292 region
->next
= root_omp_region
;
1293 root_omp_region
= region
;
1299 /* Release the memory associated with the region tree rooted at REGION. */
1302 free_omp_region_1 (struct omp_region
*region
)
1304 struct omp_region
*i
, *n
;
1306 for (i
= region
->inner
; i
; i
= n
)
1309 free_omp_region_1 (i
);
1315 /* Release the memory for the entire omp region tree. */
1318 free_omp_regions (void)
1320 struct omp_region
*r
, *n
;
1321 for (r
= root_omp_region
; r
; r
= n
)
1324 free_omp_region_1 (r
);
1326 root_omp_region
= NULL
;
1330 /* Create a new context, with OUTER_CTX being the surrounding context. */
1332 static omp_context
*
1333 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1335 omp_context
*ctx
= XCNEW (omp_context
);
1337 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1338 (splay_tree_value
) ctx
);
1343 ctx
->outer
= outer_ctx
;
1344 ctx
->cb
= outer_ctx
->cb
;
1345 ctx
->cb
.block
= NULL
;
1346 ctx
->depth
= outer_ctx
->depth
+ 1;
1350 ctx
->cb
.src_fn
= current_function_decl
;
1351 ctx
->cb
.dst_fn
= current_function_decl
;
1352 ctx
->cb
.src_node
= cgraph_get_node (current_function_decl
);
1353 gcc_checking_assert (ctx
->cb
.src_node
);
1354 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1355 ctx
->cb
.src_cfun
= cfun
;
1356 ctx
->cb
.copy_decl
= omp_copy_decl
;
1357 ctx
->cb
.eh_lp_nr
= 0;
1358 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1362 ctx
->cb
.decl_map
= pointer_map_create ();
1367 static gimple_seq
maybe_catch_exception (gimple_seq
);
1369 /* Finalize task copyfn. */
1372 finalize_task_copyfn (gimple task_stmt
)
1374 struct function
*child_cfun
;
1376 gimple_seq seq
= NULL
, new_seq
;
1379 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1380 if (child_fn
== NULL_TREE
)
1383 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1384 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
= cfun
->curr_properties
;
1386 push_cfun (child_cfun
);
1387 bind
= gimplify_body (child_fn
, false);
1388 gimple_seq_add_stmt (&seq
, bind
);
1389 new_seq
= maybe_catch_exception (seq
);
1392 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1394 gimple_seq_add_stmt (&seq
, bind
);
1396 gimple_set_body (child_fn
, seq
);
1399 /* Inform the callgraph about the new function. */
1400 cgraph_add_new_function (child_fn
, false);
1403 /* Destroy a omp_context data structures. Called through the splay tree
1404 value delete callback. */
1407 delete_omp_context (splay_tree_value value
)
1409 omp_context
*ctx
= (omp_context
*) value
;
1411 pointer_map_destroy (ctx
->cb
.decl_map
);
1414 splay_tree_delete (ctx
->field_map
);
1415 if (ctx
->sfield_map
)
1416 splay_tree_delete (ctx
->sfield_map
);
1418 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1419 it produces corrupt debug information. */
1420 if (ctx
->record_type
)
1423 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= DECL_CHAIN (t
))
1424 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1426 if (ctx
->srecord_type
)
1429 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= DECL_CHAIN (t
))
1430 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1433 if (is_task_ctx (ctx
))
1434 finalize_task_copyfn (ctx
->stmt
);
1439 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1443 fixup_child_record_type (omp_context
*ctx
)
1445 tree f
, type
= ctx
->record_type
;
1447 /* ??? It isn't sufficient to just call remap_type here, because
1448 variably_modified_type_p doesn't work the way we expect for
1449 record types. Testing each field for whether it needs remapping
1450 and creating a new record by hand works, however. */
1451 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
1452 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1456 tree name
, new_fields
= NULL
;
1458 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1459 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1460 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1461 TYPE_DECL
, name
, type
);
1462 TYPE_NAME (type
) = name
;
1464 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= DECL_CHAIN (f
))
1466 tree new_f
= copy_node (f
);
1467 DECL_CONTEXT (new_f
) = type
;
1468 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1469 DECL_CHAIN (new_f
) = new_fields
;
1470 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1471 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1473 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1477 /* Arrange to be able to look up the receiver field
1478 given the sender field. */
1479 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1480 (splay_tree_value
) new_f
);
1482 TYPE_FIELDS (type
) = nreverse (new_fields
);
1486 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
1489 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1490 specified by CLAUSES. */
1493 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1496 bool scan_array_reductions
= false;
1498 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1502 switch (OMP_CLAUSE_CODE (c
))
1504 case OMP_CLAUSE_PRIVATE
:
1505 decl
= OMP_CLAUSE_DECL (c
);
1506 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1508 else if (!is_variable_sized (decl
))
1509 install_var_local (decl
, ctx
);
1512 case OMP_CLAUSE_SHARED
:
1513 decl
= OMP_CLAUSE_DECL (c
);
1514 /* Ignore shared directives in teams construct. */
1515 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
1517 /* Global variables don't need to be copied,
1518 the receiver side will use them directly. */
1519 tree odecl
= maybe_lookup_decl_in_outer_ctx (decl
, ctx
);
1520 if (is_global_var (odecl
))
1522 insert_decl_map (&ctx
->cb
, decl
, odecl
);
1525 gcc_assert (is_taskreg_ctx (ctx
));
1526 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1527 || !is_variable_sized (decl
));
1528 /* Global variables don't need to be copied,
1529 the receiver side will use them directly. */
1530 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1532 by_ref
= use_pointer_for_field (decl
, ctx
);
1533 if (! TREE_READONLY (decl
)
1534 || TREE_ADDRESSABLE (decl
)
1536 || is_reference (decl
))
1538 install_var_field (decl
, by_ref
, 3, ctx
);
1539 install_var_local (decl
, ctx
);
1542 /* We don't need to copy const scalar vars back. */
1543 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1546 case OMP_CLAUSE_LASTPRIVATE
:
1547 /* Let the corresponding firstprivate clause create
1549 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1553 case OMP_CLAUSE_FIRSTPRIVATE
:
1554 case OMP_CLAUSE_REDUCTION
:
1555 case OMP_CLAUSE_LINEAR
:
1556 decl
= OMP_CLAUSE_DECL (c
);
1558 if (is_variable_sized (decl
))
1560 if (is_task_ctx (ctx
))
1561 install_var_field (decl
, false, 1, ctx
);
1564 else if (is_taskreg_ctx (ctx
))
1567 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1568 by_ref
= use_pointer_for_field (decl
, NULL
);
1570 if (is_task_ctx (ctx
)
1571 && (global
|| by_ref
|| is_reference (decl
)))
1573 install_var_field (decl
, false, 1, ctx
);
1575 install_var_field (decl
, by_ref
, 2, ctx
);
1578 install_var_field (decl
, by_ref
, 3, ctx
);
1580 install_var_local (decl
, ctx
);
1583 case OMP_CLAUSE__LOOPTEMP_
:
1584 gcc_assert (is_parallel_ctx (ctx
));
1585 decl
= OMP_CLAUSE_DECL (c
);
1586 install_var_field (decl
, false, 3, ctx
);
1587 install_var_local (decl
, ctx
);
1590 case OMP_CLAUSE_COPYPRIVATE
:
1591 case OMP_CLAUSE_COPYIN
:
1592 decl
= OMP_CLAUSE_DECL (c
);
1593 by_ref
= use_pointer_for_field (decl
, NULL
);
1594 install_var_field (decl
, by_ref
, 3, ctx
);
1597 case OMP_CLAUSE_DEFAULT
:
1598 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1601 case OMP_CLAUSE_FINAL
:
1603 case OMP_CLAUSE_NUM_THREADS
:
1604 case OMP_CLAUSE_NUM_TEAMS
:
1605 case OMP_CLAUSE_THREAD_LIMIT
:
1606 case OMP_CLAUSE_DEVICE
:
1607 case OMP_CLAUSE_SCHEDULE
:
1608 case OMP_CLAUSE_DIST_SCHEDULE
:
1609 case OMP_CLAUSE_DEPEND
:
1611 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1615 case OMP_CLAUSE_FROM
:
1616 case OMP_CLAUSE_MAP
:
1618 scan_omp_op (&OMP_CLAUSE_SIZE (c
), ctx
->outer
);
1619 decl
= OMP_CLAUSE_DECL (c
);
1620 /* Global variables with "omp declare target" attribute
1621 don't need to be copied, the receiver side will use them
1623 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
1625 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
))
1626 && lookup_attribute ("omp declare target",
1627 DECL_ATTRIBUTES (decl
)))
1629 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
1630 && OMP_CLAUSE_MAP_KIND (c
) == OMP_CLAUSE_MAP_POINTER
)
1632 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1633 #pragma omp target data, there is nothing to map for
1635 if (gimple_omp_target_kind (ctx
->stmt
) == GF_OMP_TARGET_KIND_DATA
1636 && !POINTER_TYPE_P (TREE_TYPE (decl
)))
1641 if (DECL_SIZE (decl
)
1642 && TREE_CODE (DECL_SIZE (decl
)) != INTEGER_CST
)
1644 tree decl2
= DECL_VALUE_EXPR (decl
);
1645 gcc_assert (TREE_CODE (decl2
) == INDIRECT_REF
);
1646 decl2
= TREE_OPERAND (decl2
, 0);
1647 gcc_assert (DECL_P (decl2
));
1648 install_var_field (decl2
, true, 3, ctx
);
1649 install_var_local (decl2
, ctx
);
1650 install_var_local (decl
, ctx
);
1654 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
1655 && OMP_CLAUSE_MAP_KIND (c
) == OMP_CLAUSE_MAP_POINTER
1656 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
)
1657 && TREE_CODE (TREE_TYPE (decl
)) == ARRAY_TYPE
)
1658 install_var_field (decl
, true, 7, ctx
);
1660 install_var_field (decl
, true, 3, ctx
);
1661 if (gimple_omp_target_kind (ctx
->stmt
)
1662 == GF_OMP_TARGET_KIND_REGION
)
1663 install_var_local (decl
, ctx
);
1668 tree base
= get_base_address (decl
);
1669 tree nc
= OMP_CLAUSE_CHAIN (c
);
1672 && OMP_CLAUSE_CODE (nc
) == OMP_CLAUSE_MAP
1673 && OMP_CLAUSE_DECL (nc
) == base
1674 && OMP_CLAUSE_MAP_KIND (nc
) == OMP_CLAUSE_MAP_POINTER
1675 && integer_zerop (OMP_CLAUSE_SIZE (nc
)))
1677 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
) = 1;
1678 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc
) = 1;
1684 scan_omp_op (&OMP_CLAUSE_DECL (c
), ctx
->outer
);
1685 decl
= OMP_CLAUSE_DECL (c
);
1687 gcc_assert (!splay_tree_lookup (ctx
->field_map
,
1688 (splay_tree_key
) decl
));
1690 = build_decl (OMP_CLAUSE_LOCATION (c
),
1691 FIELD_DECL
, NULL_TREE
, ptr_type_node
);
1692 DECL_ALIGN (field
) = TYPE_ALIGN (ptr_type_node
);
1693 insert_field_into_struct (ctx
->record_type
, field
);
1694 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) decl
,
1695 (splay_tree_value
) field
);
1700 case OMP_CLAUSE_NOWAIT
:
1701 case OMP_CLAUSE_ORDERED
:
1702 case OMP_CLAUSE_COLLAPSE
:
1703 case OMP_CLAUSE_UNTIED
:
1704 case OMP_CLAUSE_MERGEABLE
:
1705 case OMP_CLAUSE_PROC_BIND
:
1706 case OMP_CLAUSE_SAFELEN
:
1709 case OMP_CLAUSE_ALIGNED
:
1710 decl
= OMP_CLAUSE_DECL (c
);
1711 if (is_global_var (decl
)
1712 && TREE_CODE (TREE_TYPE (decl
)) == ARRAY_TYPE
)
1713 install_var_local (decl
, ctx
);
1721 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1723 switch (OMP_CLAUSE_CODE (c
))
1725 case OMP_CLAUSE_LASTPRIVATE
:
1726 /* Let the corresponding firstprivate clause create
1728 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1729 scan_array_reductions
= true;
1730 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1734 case OMP_CLAUSE_PRIVATE
:
1735 case OMP_CLAUSE_FIRSTPRIVATE
:
1736 case OMP_CLAUSE_REDUCTION
:
1737 case OMP_CLAUSE_LINEAR
:
1738 decl
= OMP_CLAUSE_DECL (c
);
1739 if (is_variable_sized (decl
))
1740 install_var_local (decl
, ctx
);
1741 fixup_remapped_decl (decl
, ctx
,
1742 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1743 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1744 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1745 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1746 scan_array_reductions
= true;
1747 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
1748 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
))
1749 scan_array_reductions
= true;
1752 case OMP_CLAUSE_SHARED
:
1753 /* Ignore shared directives in teams construct. */
1754 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
1756 decl
= OMP_CLAUSE_DECL (c
);
1757 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1758 fixup_remapped_decl (decl
, ctx
, false);
1761 case OMP_CLAUSE_MAP
:
1762 if (gimple_omp_target_kind (ctx
->stmt
) == GF_OMP_TARGET_KIND_DATA
)
1764 decl
= OMP_CLAUSE_DECL (c
);
1766 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
))
1767 && lookup_attribute ("omp declare target",
1768 DECL_ATTRIBUTES (decl
)))
1772 if (OMP_CLAUSE_MAP_KIND (c
) == OMP_CLAUSE_MAP_POINTER
1773 && TREE_CODE (TREE_TYPE (decl
)) == ARRAY_TYPE
1774 && !COMPLETE_TYPE_P (TREE_TYPE (decl
)))
1776 tree new_decl
= lookup_decl (decl
, ctx
);
1777 TREE_TYPE (new_decl
)
1778 = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1780 else if (DECL_SIZE (decl
)
1781 && TREE_CODE (DECL_SIZE (decl
)) != INTEGER_CST
)
1783 tree decl2
= DECL_VALUE_EXPR (decl
);
1784 gcc_assert (TREE_CODE (decl2
) == INDIRECT_REF
);
1785 decl2
= TREE_OPERAND (decl2
, 0);
1786 gcc_assert (DECL_P (decl2
));
1787 fixup_remapped_decl (decl2
, ctx
, false);
1788 fixup_remapped_decl (decl
, ctx
, true);
1791 fixup_remapped_decl (decl
, ctx
, false);
1795 case OMP_CLAUSE_COPYPRIVATE
:
1796 case OMP_CLAUSE_COPYIN
:
1797 case OMP_CLAUSE_DEFAULT
:
1799 case OMP_CLAUSE_NUM_THREADS
:
1800 case OMP_CLAUSE_NUM_TEAMS
:
1801 case OMP_CLAUSE_THREAD_LIMIT
:
1802 case OMP_CLAUSE_DEVICE
:
1803 case OMP_CLAUSE_SCHEDULE
:
1804 case OMP_CLAUSE_DIST_SCHEDULE
:
1805 case OMP_CLAUSE_NOWAIT
:
1806 case OMP_CLAUSE_ORDERED
:
1807 case OMP_CLAUSE_COLLAPSE
:
1808 case OMP_CLAUSE_UNTIED
:
1809 case OMP_CLAUSE_FINAL
:
1810 case OMP_CLAUSE_MERGEABLE
:
1811 case OMP_CLAUSE_PROC_BIND
:
1812 case OMP_CLAUSE_SAFELEN
:
1813 case OMP_CLAUSE_ALIGNED
:
1814 case OMP_CLAUSE_DEPEND
:
1815 case OMP_CLAUSE__LOOPTEMP_
:
1817 case OMP_CLAUSE_FROM
:
1825 if (scan_array_reductions
)
1826 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1827 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1828 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1830 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
1831 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
1833 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
1834 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1835 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
1836 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
1837 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
))
1838 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
), ctx
);
1841 /* Create a new name for omp child function. Returns an identifier. */
1844 create_omp_child_function_name (bool task_copy
)
1846 return (clone_function_name (current_function_decl
,
1847 task_copy
? "_omp_cpyfn" : "_omp_fn"));
1850 /* Build a decl for the omp child function. It'll not contain a body
1851 yet, just the bare decl. */
1854 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
1856 tree decl
, type
, name
, t
;
1858 name
= create_omp_child_function_name (task_copy
);
1860 type
= build_function_type_list (void_type_node
, ptr_type_node
,
1861 ptr_type_node
, NULL_TREE
);
1863 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1865 decl
= build_decl (gimple_location (ctx
->stmt
),
1866 FUNCTION_DECL
, name
, type
);
1869 ctx
->cb
.dst_fn
= decl
;
1871 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
1873 TREE_STATIC (decl
) = 1;
1874 TREE_USED (decl
) = 1;
1875 DECL_ARTIFICIAL (decl
) = 1;
1876 DECL_IGNORED_P (decl
) = 0;
1877 TREE_PUBLIC (decl
) = 0;
1878 DECL_UNINLINABLE (decl
) = 1;
1879 DECL_EXTERNAL (decl
) = 0;
1880 DECL_CONTEXT (decl
) = NULL_TREE
;
1881 DECL_INITIAL (decl
) = make_node (BLOCK
);
1882 bool target_p
= false;
1883 if (lookup_attribute ("omp declare target",
1884 DECL_ATTRIBUTES (current_function_decl
)))
1889 for (octx
= ctx
; octx
; octx
= octx
->outer
)
1890 if (gimple_code (octx
->stmt
) == GIMPLE_OMP_TARGET
1891 && gimple_omp_target_kind (octx
->stmt
)
1892 == GF_OMP_TARGET_KIND_REGION
)
1899 DECL_ATTRIBUTES (decl
)
1900 = tree_cons (get_identifier ("omp declare target"),
1901 NULL_TREE
, DECL_ATTRIBUTES (decl
));
1903 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1904 RESULT_DECL
, NULL_TREE
, void_type_node
);
1905 DECL_ARTIFICIAL (t
) = 1;
1906 DECL_IGNORED_P (t
) = 1;
1907 DECL_CONTEXT (t
) = decl
;
1908 DECL_RESULT (decl
) = t
;
1910 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1911 PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1912 DECL_ARTIFICIAL (t
) = 1;
1913 DECL_NAMELESS (t
) = 1;
1914 DECL_ARG_TYPE (t
) = ptr_type_node
;
1915 DECL_CONTEXT (t
) = current_function_decl
;
1917 DECL_ARGUMENTS (decl
) = t
;
1919 ctx
->receiver_decl
= t
;
1922 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1923 PARM_DECL
, get_identifier (".omp_data_o"),
1925 DECL_ARTIFICIAL (t
) = 1;
1926 DECL_NAMELESS (t
) = 1;
1927 DECL_ARG_TYPE (t
) = ptr_type_node
;
1928 DECL_CONTEXT (t
) = current_function_decl
;
1930 TREE_ADDRESSABLE (t
) = 1;
1931 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
1932 DECL_ARGUMENTS (decl
) = t
;
1935 /* Allocate memory for the function structure. The call to
1936 allocate_struct_function clobbers CFUN, so we need to restore
1938 push_struct_function (decl
);
1939 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
1943 /* Callback for walk_gimple_seq. Check if combined parallel
1944 contains gimple_omp_for_combined_into_p OMP_FOR. */
1947 find_combined_for (gimple_stmt_iterator
*gsi_p
,
1948 bool *handled_ops_p
,
1949 struct walk_stmt_info
*wi
)
1951 gimple stmt
= gsi_stmt (*gsi_p
);
1953 *handled_ops_p
= true;
1954 switch (gimple_code (stmt
))
1958 case GIMPLE_OMP_FOR
:
1959 if (gimple_omp_for_combined_into_p (stmt
)
1960 && gimple_omp_for_kind (stmt
) == GF_OMP_FOR_KIND_FOR
)
1963 return integer_zero_node
;
1972 /* Scan an OpenMP parallel directive. */
1975 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1979 gimple stmt
= gsi_stmt (*gsi
);
1981 /* Ignore parallel directives with empty bodies, unless there
1982 are copyin clauses. */
1984 && empty_body_p (gimple_omp_body (stmt
))
1985 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
1986 OMP_CLAUSE_COPYIN
) == NULL
)
1988 gsi_replace (gsi
, gimple_build_nop (), false);
1992 if (gimple_omp_parallel_combined_p (stmt
))
1995 struct walk_stmt_info wi
;
1997 memset (&wi
, 0, sizeof (wi
));
1999 walk_gimple_seq (gimple_omp_body (stmt
),
2000 find_combined_for
, NULL
, &wi
);
2001 for_stmt
= (gimple
) wi
.info
;
2004 struct omp_for_data fd
;
2005 extract_omp_for_data (for_stmt
, &fd
, NULL
);
2006 /* We need two temporaries with fd.loop.v type (istart/iend)
2007 and then (fd.collapse - 1) temporaries with the same
2008 type for count2 ... countN-1 vars if not constant. */
2009 size_t count
= 2, i
;
2010 tree type
= fd
.iter_type
;
2012 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
2013 count
+= fd
.collapse
- 1;
2014 for (i
= 0; i
< count
; i
++)
2016 tree temp
= create_tmp_var (type
, NULL
);
2017 tree c
= build_omp_clause (UNKNOWN_LOCATION
,
2018 OMP_CLAUSE__LOOPTEMP_
);
2019 insert_decl_map (&outer_ctx
->cb
, temp
, temp
);
2020 OMP_CLAUSE_DECL (c
) = temp
;
2021 OMP_CLAUSE_CHAIN (c
) = gimple_omp_parallel_clauses (stmt
);
2022 gimple_omp_parallel_set_clauses (stmt
, c
);
2027 ctx
= new_omp_context (stmt
, outer_ctx
);
2028 if (taskreg_nesting_level
> 1)
2029 ctx
->is_nested
= true;
2030 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
2031 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
2032 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
2033 name
= create_tmp_var_name (".omp_data_s");
2034 name
= build_decl (gimple_location (stmt
),
2035 TYPE_DECL
, name
, ctx
->record_type
);
2036 DECL_ARTIFICIAL (name
) = 1;
2037 DECL_NAMELESS (name
) = 1;
2038 TYPE_NAME (ctx
->record_type
) = name
;
2039 create_omp_child_function (ctx
, false);
2040 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
2042 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
2043 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2045 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
2046 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
2049 layout_type (ctx
->record_type
);
2050 fixup_child_record_type (ctx
);
2054 /* Scan an OpenMP task directive. */
2057 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
2061 gimple stmt
= gsi_stmt (*gsi
);
2062 location_t loc
= gimple_location (stmt
);
2064 /* Ignore task directives with empty bodies. */
2066 && empty_body_p (gimple_omp_body (stmt
)))
2068 gsi_replace (gsi
, gimple_build_nop (), false);
2072 ctx
= new_omp_context (stmt
, outer_ctx
);
2073 if (taskreg_nesting_level
> 1)
2074 ctx
->is_nested
= true;
2075 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
2076 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
2077 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
2078 name
= create_tmp_var_name (".omp_data_s");
2079 name
= build_decl (gimple_location (stmt
),
2080 TYPE_DECL
, name
, ctx
->record_type
);
2081 DECL_ARTIFICIAL (name
) = 1;
2082 DECL_NAMELESS (name
) = 1;
2083 TYPE_NAME (ctx
->record_type
) = name
;
2084 create_omp_child_function (ctx
, false);
2085 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
2087 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
2089 if (ctx
->srecord_type
)
2091 name
= create_tmp_var_name (".omp_data_a");
2092 name
= build_decl (gimple_location (stmt
),
2093 TYPE_DECL
, name
, ctx
->srecord_type
);
2094 DECL_ARTIFICIAL (name
) = 1;
2095 DECL_NAMELESS (name
) = 1;
2096 TYPE_NAME (ctx
->srecord_type
) = name
;
2097 create_omp_child_function (ctx
, true);
2100 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2102 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
2104 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
2105 t
= build_int_cst (long_integer_type_node
, 0);
2106 gimple_omp_task_set_arg_size (stmt
, t
);
2107 t
= build_int_cst (long_integer_type_node
, 1);
2108 gimple_omp_task_set_arg_align (stmt
, t
);
2112 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
2113 /* Move VLA fields to the end. */
2114 p
= &TYPE_FIELDS (ctx
->record_type
);
2116 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
2117 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
2120 *p
= TREE_CHAIN (*p
);
2121 TREE_CHAIN (*q
) = NULL_TREE
;
2122 q
= &TREE_CHAIN (*q
);
2125 p
= &DECL_CHAIN (*p
);
2127 layout_type (ctx
->record_type
);
2128 fixup_child_record_type (ctx
);
2129 if (ctx
->srecord_type
)
2130 layout_type (ctx
->srecord_type
);
2131 t
= fold_convert_loc (loc
, long_integer_type_node
,
2132 TYPE_SIZE_UNIT (ctx
->record_type
));
2133 gimple_omp_task_set_arg_size (stmt
, t
);
2134 t
= build_int_cst (long_integer_type_node
,
2135 TYPE_ALIGN_UNIT (ctx
->record_type
));
2136 gimple_omp_task_set_arg_align (stmt
, t
);
2141 /* Scan an OpenMP loop directive. */
2144 scan_omp_for (gimple stmt
, omp_context
*outer_ctx
)
2149 ctx
= new_omp_context (stmt
, outer_ctx
);
2151 scan_sharing_clauses (gimple_omp_for_clauses (stmt
), ctx
);
2153 scan_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
2154 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
2156 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
2157 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
2158 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
2159 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
2161 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2164 /* Scan an OpenMP sections directive. */
2167 scan_omp_sections (gimple stmt
, omp_context
*outer_ctx
)
2171 ctx
= new_omp_context (stmt
, outer_ctx
);
2172 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
2173 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2176 /* Scan an OpenMP single directive. */
2179 scan_omp_single (gimple stmt
, omp_context
*outer_ctx
)
2184 ctx
= new_omp_context (stmt
, outer_ctx
);
2185 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
2186 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
2187 name
= create_tmp_var_name (".omp_copy_s");
2188 name
= build_decl (gimple_location (stmt
),
2189 TYPE_DECL
, name
, ctx
->record_type
);
2190 TYPE_NAME (ctx
->record_type
) = name
;
2192 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
2193 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2195 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
2196 ctx
->record_type
= NULL
;
2198 layout_type (ctx
->record_type
);
2201 /* Scan an OpenMP target{, data, update} directive. */
2204 scan_omp_target (gimple stmt
, omp_context
*outer_ctx
)
2208 int kind
= gimple_omp_target_kind (stmt
);
2210 ctx
= new_omp_context (stmt
, outer_ctx
);
2211 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
2212 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
2213 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
2214 name
= create_tmp_var_name (".omp_data_t");
2215 name
= build_decl (gimple_location (stmt
),
2216 TYPE_DECL
, name
, ctx
->record_type
);
2217 DECL_ARTIFICIAL (name
) = 1;
2218 DECL_NAMELESS (name
) = 1;
2219 TYPE_NAME (ctx
->record_type
) = name
;
2220 if (kind
== GF_OMP_TARGET_KIND_REGION
)
2222 create_omp_child_function (ctx
, false);
2223 gimple_omp_target_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
2226 scan_sharing_clauses (gimple_omp_target_clauses (stmt
), ctx
);
2227 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2229 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
2230 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
2233 TYPE_FIELDS (ctx
->record_type
)
2234 = nreverse (TYPE_FIELDS (ctx
->record_type
));
2235 #ifdef ENABLE_CHECKING
2237 unsigned int align
= DECL_ALIGN (TYPE_FIELDS (ctx
->record_type
));
2238 for (field
= TYPE_FIELDS (ctx
->record_type
);
2240 field
= DECL_CHAIN (field
))
2241 gcc_assert (DECL_ALIGN (field
) == align
);
2243 layout_type (ctx
->record_type
);
2244 if (kind
== GF_OMP_TARGET_KIND_REGION
)
2245 fixup_child_record_type (ctx
);
2249 /* Scan an OpenMP teams directive. */
2252 scan_omp_teams (gimple stmt
, omp_context
*outer_ctx
)
2254 omp_context
*ctx
= new_omp_context (stmt
, outer_ctx
);
2255 scan_sharing_clauses (gimple_omp_teams_clauses (stmt
), ctx
);
2256 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2259 /* Check OpenMP nesting restrictions. */
2261 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
2265 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
2266 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
2268 error_at (gimple_location (stmt
),
2269 "OpenMP constructs may not be nested inside simd region");
2272 else if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
2274 if ((gimple_code (stmt
) != GIMPLE_OMP_FOR
2275 || (gimple_omp_for_kind (stmt
)
2276 != GF_OMP_FOR_KIND_DISTRIBUTE
))
2277 && gimple_code (stmt
) != GIMPLE_OMP_PARALLEL
)
2279 error_at (gimple_location (stmt
),
2280 "only distribute or parallel constructs are allowed to "
2281 "be closely nested inside teams construct");
2286 switch (gimple_code (stmt
))
2288 case GIMPLE_OMP_FOR
:
2289 if (gimple_omp_for_kind (stmt
) & GF_OMP_FOR_SIMD
)
2291 if (gimple_omp_for_kind (stmt
) == GF_OMP_FOR_KIND_DISTRIBUTE
)
2293 if (ctx
!= NULL
&& gimple_code (ctx
->stmt
) != GIMPLE_OMP_TEAMS
)
2295 error_at (gimple_location (stmt
),
2296 "distribute construct must be closely nested inside "
2304 if (is_gimple_call (stmt
)
2305 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2306 == BUILT_IN_GOMP_CANCEL
2307 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2308 == BUILT_IN_GOMP_CANCELLATION_POINT
))
2310 const char *bad
= NULL
;
2311 const char *kind
= NULL
;
2314 error_at (gimple_location (stmt
), "orphaned %qs construct",
2315 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2316 == BUILT_IN_GOMP_CANCEL
2317 ? "#pragma omp cancel"
2318 : "#pragma omp cancellation point");
2321 switch (tree_fits_shwi_p (gimple_call_arg (stmt
, 0))
2322 ? tree_to_shwi (gimple_call_arg (stmt
, 0))
2326 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_PARALLEL
)
2327 bad
= "#pragma omp parallel";
2328 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2329 == BUILT_IN_GOMP_CANCEL
2330 && !integer_zerop (gimple_call_arg (stmt
, 1)))
2331 ctx
->cancellable
= true;
2335 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_FOR
2336 || gimple_omp_for_kind (ctx
->stmt
) != GF_OMP_FOR_KIND_FOR
)
2337 bad
= "#pragma omp for";
2338 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2339 == BUILT_IN_GOMP_CANCEL
2340 && !integer_zerop (gimple_call_arg (stmt
, 1)))
2342 ctx
->cancellable
= true;
2343 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
2345 warning_at (gimple_location (stmt
), 0,
2346 "%<#pragma omp cancel for%> inside "
2347 "%<nowait%> for construct");
2348 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
2349 OMP_CLAUSE_ORDERED
))
2350 warning_at (gimple_location (stmt
), 0,
2351 "%<#pragma omp cancel for%> inside "
2352 "%<ordered%> for construct");
2357 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_SECTIONS
2358 && gimple_code (ctx
->stmt
) != GIMPLE_OMP_SECTION
)
2359 bad
= "#pragma omp sections";
2360 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2361 == BUILT_IN_GOMP_CANCEL
2362 && !integer_zerop (gimple_call_arg (stmt
, 1)))
2364 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_SECTIONS
)
2366 ctx
->cancellable
= true;
2367 if (find_omp_clause (gimple_omp_sections_clauses
2370 warning_at (gimple_location (stmt
), 0,
2371 "%<#pragma omp cancel sections%> inside "
2372 "%<nowait%> sections construct");
2376 gcc_assert (ctx
->outer
2377 && gimple_code (ctx
->outer
->stmt
)
2378 == GIMPLE_OMP_SECTIONS
);
2379 ctx
->outer
->cancellable
= true;
2380 if (find_omp_clause (gimple_omp_sections_clauses
2383 warning_at (gimple_location (stmt
), 0,
2384 "%<#pragma omp cancel sections%> inside "
2385 "%<nowait%> sections construct");
2391 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_TASK
)
2392 bad
= "#pragma omp task";
2394 ctx
->cancellable
= true;
2398 error_at (gimple_location (stmt
), "invalid arguments");
2403 error_at (gimple_location (stmt
),
2404 "%<%s %s%> construct not closely nested inside of %qs",
2405 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2406 == BUILT_IN_GOMP_CANCEL
2407 ? "#pragma omp cancel"
2408 : "#pragma omp cancellation point", kind
, bad
);
2413 case GIMPLE_OMP_SECTIONS
:
2414 case GIMPLE_OMP_SINGLE
:
2415 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
2416 switch (gimple_code (ctx
->stmt
))
2418 case GIMPLE_OMP_FOR
:
2419 case GIMPLE_OMP_SECTIONS
:
2420 case GIMPLE_OMP_SINGLE
:
2421 case GIMPLE_OMP_ORDERED
:
2422 case GIMPLE_OMP_MASTER
:
2423 case GIMPLE_OMP_TASK
:
2424 case GIMPLE_OMP_CRITICAL
:
2425 if (is_gimple_call (stmt
))
2427 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt
))
2428 != BUILT_IN_GOMP_BARRIER
)
2430 error_at (gimple_location (stmt
),
2431 "barrier region may not be closely nested inside "
2432 "of work-sharing, critical, ordered, master or "
2433 "explicit task region");
2436 error_at (gimple_location (stmt
),
2437 "work-sharing region may not be closely nested inside "
2438 "of work-sharing, critical, ordered, master or explicit "
2441 case GIMPLE_OMP_PARALLEL
:
2447 case GIMPLE_OMP_MASTER
:
2448 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
2449 switch (gimple_code (ctx
->stmt
))
2451 case GIMPLE_OMP_FOR
:
2452 case GIMPLE_OMP_SECTIONS
:
2453 case GIMPLE_OMP_SINGLE
:
2454 case GIMPLE_OMP_TASK
:
2455 error_at (gimple_location (stmt
),
2456 "master region may not be closely nested inside "
2457 "of work-sharing or explicit task region");
2459 case GIMPLE_OMP_PARALLEL
:
2465 case GIMPLE_OMP_ORDERED
:
2466 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
2467 switch (gimple_code (ctx
->stmt
))
2469 case GIMPLE_OMP_CRITICAL
:
2470 case GIMPLE_OMP_TASK
:
2471 error_at (gimple_location (stmt
),
2472 "ordered region may not be closely nested inside "
2473 "of critical or explicit task region");
2475 case GIMPLE_OMP_FOR
:
2476 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
2477 OMP_CLAUSE_ORDERED
) == NULL
)
2479 error_at (gimple_location (stmt
),
2480 "ordered region must be closely nested inside "
2481 "a loop region with an ordered clause");
2485 case GIMPLE_OMP_PARALLEL
:
2486 error_at (gimple_location (stmt
),
2487 "ordered region must be closely nested inside "
2488 "a loop region with an ordered clause");
2494 case GIMPLE_OMP_CRITICAL
:
2495 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
2496 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_CRITICAL
2497 && (gimple_omp_critical_name (stmt
)
2498 == gimple_omp_critical_name (ctx
->stmt
)))
2500 error_at (gimple_location (stmt
),
2501 "critical region may not be nested inside a critical "
2502 "region with the same name");
2506 case GIMPLE_OMP_TEAMS
:
2508 || gimple_code (ctx
->stmt
) != GIMPLE_OMP_TARGET
2509 || gimple_omp_target_kind (ctx
->stmt
) != GF_OMP_TARGET_KIND_REGION
)
2511 error_at (gimple_location (stmt
),
2512 "teams construct not closely nested inside of target "
2517 case GIMPLE_OMP_TARGET
:
2518 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
2519 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TARGET
2520 && gimple_omp_target_kind (ctx
->stmt
) == GF_OMP_TARGET_KIND_REGION
)
2523 switch (gimple_omp_target_kind (stmt
))
2525 case GF_OMP_TARGET_KIND_REGION
: name
= "target"; break;
2526 case GF_OMP_TARGET_KIND_DATA
: name
= "target data"; break;
2527 case GF_OMP_TARGET_KIND_UPDATE
: name
= "target update"; break;
2528 default: gcc_unreachable ();
2530 warning_at (gimple_location (stmt
), 0,
2531 "%s construct inside of target region", name
);
2541 /* Helper function scan_omp.
2543 Callback for walk_tree or operators in walk_gimple_stmt used to
2544 scan for OpenMP directives in TP. */
2547 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
2549 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
2550 omp_context
*ctx
= (omp_context
*) wi
->info
;
2553 switch (TREE_CODE (t
))
2560 *tp
= remap_decl (t
, &ctx
->cb
);
2564 if (ctx
&& TYPE_P (t
))
2565 *tp
= remap_type (t
, &ctx
->cb
);
2566 else if (!DECL_P (t
))
2571 tree tem
= remap_type (TREE_TYPE (t
), &ctx
->cb
);
2572 if (tem
!= TREE_TYPE (t
))
2574 if (TREE_CODE (t
) == INTEGER_CST
)
2575 *tp
= wide_int_to_tree (tem
, t
);
2577 TREE_TYPE (t
) = tem
;
2587 /* Return true if FNDECL is a setjmp or a longjmp. */
2590 setjmp_or_longjmp_p (const_tree fndecl
)
2592 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
2593 && (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_SETJMP
2594 || DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_LONGJMP
))
2597 tree declname
= DECL_NAME (fndecl
);
2600 const char *name
= IDENTIFIER_POINTER (declname
);
2601 return !strcmp (name
, "setjmp") || !strcmp (name
, "longjmp");
2605 /* Helper function for scan_omp.
2607 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2608 the current statement in GSI. */
2611 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
2612 struct walk_stmt_info
*wi
)
2614 gimple stmt
= gsi_stmt (*gsi
);
2615 omp_context
*ctx
= (omp_context
*) wi
->info
;
2617 if (gimple_has_location (stmt
))
2618 input_location
= gimple_location (stmt
);
2620 /* Check the OpenMP nesting restrictions. */
2621 bool remove
= false;
2622 if (is_gimple_omp (stmt
))
2623 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
2624 else if (is_gimple_call (stmt
))
2626 tree fndecl
= gimple_call_fndecl (stmt
);
2629 if (setjmp_or_longjmp_p (fndecl
)
2631 && gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
2632 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
2635 error_at (gimple_location (stmt
),
2636 "setjmp/longjmp inside simd construct");
2638 else if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
2639 switch (DECL_FUNCTION_CODE (fndecl
))
2641 case BUILT_IN_GOMP_BARRIER
:
2642 case BUILT_IN_GOMP_CANCEL
:
2643 case BUILT_IN_GOMP_CANCELLATION_POINT
:
2644 case BUILT_IN_GOMP_TASKYIELD
:
2645 case BUILT_IN_GOMP_TASKWAIT
:
2646 case BUILT_IN_GOMP_TASKGROUP_START
:
2647 case BUILT_IN_GOMP_TASKGROUP_END
:
2648 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
2657 stmt
= gimple_build_nop ();
2658 gsi_replace (gsi
, stmt
, false);
2661 *handled_ops_p
= true;
2663 switch (gimple_code (stmt
))
2665 case GIMPLE_OMP_PARALLEL
:
2666 taskreg_nesting_level
++;
2667 scan_omp_parallel (gsi
, ctx
);
2668 taskreg_nesting_level
--;
2671 case GIMPLE_OMP_TASK
:
2672 taskreg_nesting_level
++;
2673 scan_omp_task (gsi
, ctx
);
2674 taskreg_nesting_level
--;
2677 case GIMPLE_OMP_FOR
:
2678 scan_omp_for (stmt
, ctx
);
2681 case GIMPLE_OMP_SECTIONS
:
2682 scan_omp_sections (stmt
, ctx
);
2685 case GIMPLE_OMP_SINGLE
:
2686 scan_omp_single (stmt
, ctx
);
2689 case GIMPLE_OMP_SECTION
:
2690 case GIMPLE_OMP_MASTER
:
2691 case GIMPLE_OMP_TASKGROUP
:
2692 case GIMPLE_OMP_ORDERED
:
2693 case GIMPLE_OMP_CRITICAL
:
2694 ctx
= new_omp_context (stmt
, ctx
);
2695 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2698 case GIMPLE_OMP_TARGET
:
2699 scan_omp_target (stmt
, ctx
);
2702 case GIMPLE_OMP_TEAMS
:
2703 scan_omp_teams (stmt
, ctx
);
2710 *handled_ops_p
= false;
2712 for (var
= gimple_bind_vars (stmt
); var
; var
= DECL_CHAIN (var
))
2713 insert_decl_map (&ctx
->cb
, var
, var
);
2717 *handled_ops_p
= false;
2725 /* Scan all the statements starting at the current statement. CTX
2726 contains context information about the OpenMP directives and
2727 clauses found during the scan. */
2730 scan_omp (gimple_seq
*body_p
, omp_context
*ctx
)
2732 location_t saved_location
;
2733 struct walk_stmt_info wi
;
2735 memset (&wi
, 0, sizeof (wi
));
2737 wi
.want_locations
= true;
2739 saved_location
= input_location
;
2740 walk_gimple_seq_mod (body_p
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
2741 input_location
= saved_location
;
2744 /* Re-gimplification and code generation routines. */
2746 /* Build a call to GOMP_barrier. */
2749 build_omp_barrier (tree lhs
)
2751 tree fndecl
= builtin_decl_explicit (lhs
? BUILT_IN_GOMP_BARRIER_CANCEL
2752 : BUILT_IN_GOMP_BARRIER
);
2753 gimple g
= gimple_build_call (fndecl
, 0);
2755 gimple_call_set_lhs (g
, lhs
);
2759 /* If a context was created for STMT when it was scanned, return it. */
2761 static omp_context
*
2762 maybe_lookup_ctx (gimple stmt
)
2765 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
2766 return n
? (omp_context
*) n
->value
: NULL
;
2770 /* Find the mapping for DECL in CTX or the immediately enclosing
2771 context that has a mapping for DECL.
2773 If CTX is a nested parallel directive, we may have to use the decl
2774 mappings created in CTX's parent context. Suppose that we have the
2775 following parallel nesting (variable UIDs showed for clarity):
2778 #omp parallel shared(iD.1562) -> outer parallel
2779 iD.1562 = iD.1562 + 1;
2781 #omp parallel shared (iD.1562) -> inner parallel
2782 iD.1562 = iD.1562 - 1;
2784 Each parallel structure will create a distinct .omp_data_s structure
2785 for copying iD.1562 in/out of the directive:
2787 outer parallel .omp_data_s.1.i -> iD.1562
2788 inner parallel .omp_data_s.2.i -> iD.1562
2790 A shared variable mapping will produce a copy-out operation before
2791 the parallel directive and a copy-in operation after it. So, in
2792 this case we would have:
2795 .omp_data_o.1.i = iD.1562;
2796 #omp parallel shared(iD.1562) -> outer parallel
2797 .omp_data_i.1 = &.omp_data_o.1
2798 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2800 .omp_data_o.2.i = iD.1562; -> **
2801 #omp parallel shared(iD.1562) -> inner parallel
2802 .omp_data_i.2 = &.omp_data_o.2
2803 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2806 ** This is a problem. The symbol iD.1562 cannot be referenced
2807 inside the body of the outer parallel region. But since we are
2808 emitting this copy operation while expanding the inner parallel
2809 directive, we need to access the CTX structure of the outer
2810 parallel directive to get the correct mapping:
2812 .omp_data_o.2.i = .omp_data_i.1->i
2814 Since there may be other workshare or parallel directives enclosing
2815 the parallel directive, it may be necessary to walk up the context
2816 parent chain. This is not a problem in general because nested
2817 parallelism happens only rarely. */
2820 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2825 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2826 t
= maybe_lookup_decl (decl
, up
);
2828 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
2830 return t
? t
: decl
;
2834 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2835 in outer contexts. */
2838 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2843 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2844 t
= maybe_lookup_decl (decl
, up
);
2846 return t
? t
: decl
;
2850 /* Construct the initialization value for reduction CLAUSE. */
2853 omp_reduction_init (tree clause
, tree type
)
2855 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
2856 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
2863 case TRUTH_ORIF_EXPR
:
2864 case TRUTH_XOR_EXPR
:
2866 return build_zero_cst (type
);
2869 case TRUTH_AND_EXPR
:
2870 case TRUTH_ANDIF_EXPR
:
2872 return fold_convert_loc (loc
, type
, integer_one_node
);
2875 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
2878 if (SCALAR_FLOAT_TYPE_P (type
))
2880 REAL_VALUE_TYPE max
, min
;
2881 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2884 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
2887 real_maxval (&min
, 1, TYPE_MODE (type
));
2888 return build_real (type
, min
);
2892 gcc_assert (INTEGRAL_TYPE_P (type
));
2893 return TYPE_MIN_VALUE (type
);
2897 if (SCALAR_FLOAT_TYPE_P (type
))
2899 REAL_VALUE_TYPE max
;
2900 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2903 real_maxval (&max
, 0, TYPE_MODE (type
));
2904 return build_real (type
, max
);
2908 gcc_assert (INTEGRAL_TYPE_P (type
));
2909 return TYPE_MAX_VALUE (type
);
2917 /* Return alignment to be assumed for var in CLAUSE, which should be
2918 OMP_CLAUSE_ALIGNED. */
2921 omp_clause_aligned_alignment (tree clause
)
2923 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause
))
2924 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause
);
2926 /* Otherwise return implementation defined alignment. */
2927 unsigned int al
= 1;
2928 enum machine_mode mode
, vmode
;
2929 int vs
= targetm
.vectorize
.autovectorize_vector_sizes ();
2931 vs
= 1 << floor_log2 (vs
);
2932 static enum mode_class classes
[]
2933 = { MODE_INT
, MODE_VECTOR_INT
, MODE_FLOAT
, MODE_VECTOR_FLOAT
};
2934 for (int i
= 0; i
< 4; i
+= 2)
2935 for (mode
= GET_CLASS_NARROWEST_MODE (classes
[i
]);
2937 mode
= GET_MODE_WIDER_MODE (mode
))
2939 vmode
= targetm
.vectorize
.preferred_simd_mode (mode
);
2940 if (GET_MODE_CLASS (vmode
) != classes
[i
+ 1])
2943 && GET_MODE_SIZE (vmode
) < vs
2944 && GET_MODE_2XWIDER_MODE (vmode
) != VOIDmode
)
2945 vmode
= GET_MODE_2XWIDER_MODE (vmode
);
2947 tree type
= lang_hooks
.types
.type_for_mode (mode
, 1);
2948 if (type
== NULL_TREE
|| TYPE_MODE (type
) != mode
)
2950 type
= build_vector_type (type
, GET_MODE_SIZE (vmode
)
2951 / GET_MODE_SIZE (mode
));
2952 if (TYPE_MODE (type
) != vmode
)
2954 if (TYPE_ALIGN_UNIT (type
) > al
)
2955 al
= TYPE_ALIGN_UNIT (type
);
2957 return build_int_cst (integer_type_node
, al
);
2960 /* Return maximum possible vectorization factor for the target. */
2967 || !flag_tree_loop_optimize
2968 || (!flag_tree_loop_vectorize
2969 && (global_options_set
.x_flag_tree_loop_vectorize
2970 || global_options_set
.x_flag_tree_vectorize
)))
2973 int vs
= targetm
.vectorize
.autovectorize_vector_sizes ();
2976 vs
= 1 << floor_log2 (vs
);
2979 enum machine_mode vqimode
= targetm
.vectorize
.preferred_simd_mode (QImode
);
2980 if (GET_MODE_CLASS (vqimode
) == MODE_VECTOR_INT
)
2981 return GET_MODE_NUNITS (vqimode
);
2985 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2989 lower_rec_simd_input_clauses (tree new_var
, omp_context
*ctx
, int &max_vf
,
2990 tree
&idx
, tree
&lane
, tree
&ivar
, tree
&lvar
)
2994 max_vf
= omp_max_vf ();
2997 tree c
= find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
2998 OMP_CLAUSE_SAFELEN
);
2999 if (c
&& TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c
)) != INTEGER_CST
)
3001 else if (c
&& compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c
),
3003 max_vf
= tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c
));
3007 idx
= create_tmp_var (unsigned_type_node
, NULL
);
3008 lane
= create_tmp_var (unsigned_type_node
, NULL
);
3014 tree atype
= build_array_type_nelts (TREE_TYPE (new_var
), max_vf
);
3015 tree avar
= create_tmp_var_raw (atype
, NULL
);
3016 if (TREE_ADDRESSABLE (new_var
))
3017 TREE_ADDRESSABLE (avar
) = 1;
3018 DECL_ATTRIBUTES (avar
)
3019 = tree_cons (get_identifier ("omp simd array"), NULL
,
3020 DECL_ATTRIBUTES (avar
));
3021 gimple_add_tmp_var (avar
);
3022 ivar
= build4 (ARRAY_REF
, TREE_TYPE (new_var
), avar
, idx
,
3023 NULL_TREE
, NULL_TREE
);
3024 lvar
= build4 (ARRAY_REF
, TREE_TYPE (new_var
), avar
, lane
,
3025 NULL_TREE
, NULL_TREE
);
3026 if (DECL_P (new_var
))
3028 SET_DECL_VALUE_EXPR (new_var
, lvar
);
3029 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
3034 /* Helper function of lower_rec_input_clauses. For a reference
3035 in simd reduction, add an underlying variable it will reference. */
3038 handle_simd_reference (location_t loc
, tree new_vard
, gimple_seq
*ilist
)
3040 tree z
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard
)));
3041 if (TREE_CONSTANT (z
))
3043 const char *name
= NULL
;
3044 if (DECL_NAME (new_vard
))
3045 name
= IDENTIFIER_POINTER (DECL_NAME (new_vard
));
3047 z
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard
)), name
);
3048 gimple_add_tmp_var (z
);
3049 TREE_ADDRESSABLE (z
) = 1;
3050 z
= build_fold_addr_expr_loc (loc
, z
);
3051 gimplify_assign (new_vard
, z
, ilist
);
3055 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3056 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3057 private variables. Initialization statements go in ILIST, while calls
3058 to destructors go in DLIST. */
3061 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
3062 omp_context
*ctx
, struct omp_for_data
*fd
)
3064 tree c
, dtor
, copyin_seq
, x
, ptr
;
3065 bool copyin_by_ref
= false;
3066 bool lastprivate_firstprivate
= false;
3067 bool reduction_omp_orig_ref
= false;
3069 bool is_simd
= (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
3070 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
);
3072 tree lane
= NULL_TREE
, idx
= NULL_TREE
;
3073 tree ivar
= NULL_TREE
, lvar
= NULL_TREE
;
3074 gimple_seq llist
[2] = { NULL
, NULL
};
3078 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3079 with data sharing clauses referencing variable sized vars. That
3080 is unnecessarily hard to support and very unlikely to result in
3081 vectorized code anyway. */
3083 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
3084 switch (OMP_CLAUSE_CODE (c
))
3086 case OMP_CLAUSE_LINEAR
:
3087 if (OMP_CLAUSE_LINEAR_ARRAY (c
))
3090 case OMP_CLAUSE_REDUCTION
:
3091 case OMP_CLAUSE_PRIVATE
:
3092 case OMP_CLAUSE_FIRSTPRIVATE
:
3093 case OMP_CLAUSE_LASTPRIVATE
:
3094 if (is_variable_sized (OMP_CLAUSE_DECL (c
)))
3101 /* Do all the fixed sized types in the first pass, and the variable sized
3102 types in the second pass. This makes sure that the scalar arguments to
3103 the variable sized types are processed before we use them in the
3104 variable sized operations. */
3105 for (pass
= 0; pass
< 2; ++pass
)
3107 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
3109 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
3112 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
3116 case OMP_CLAUSE_PRIVATE
:
3117 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
3120 case OMP_CLAUSE_SHARED
:
3121 /* Ignore shared directives in teams construct. */
3122 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
3124 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
3126 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
3129 case OMP_CLAUSE_FIRSTPRIVATE
:
3130 case OMP_CLAUSE_COPYIN
:
3131 case OMP_CLAUSE_LINEAR
:
3133 case OMP_CLAUSE_REDUCTION
:
3134 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c
))
3135 reduction_omp_orig_ref
= true;
3137 case OMP_CLAUSE__LOOPTEMP_
:
3138 /* Handle _looptemp_ clauses only on parallel. */
3142 case OMP_CLAUSE_LASTPRIVATE
:
3143 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
3145 lastprivate_firstprivate
= true;
3149 /* Even without corresponding firstprivate, if
3150 decl is Fortran allocatable, it needs outer var
3153 && lang_hooks
.decls
.omp_private_outer_ref
3154 (OMP_CLAUSE_DECL (c
)))
3155 lastprivate_firstprivate
= true;
3157 case OMP_CLAUSE_ALIGNED
:
3160 var
= OMP_CLAUSE_DECL (c
);
3161 if (TREE_CODE (TREE_TYPE (var
)) == POINTER_TYPE
3162 && !is_global_var (var
))
3164 new_var
= maybe_lookup_decl (var
, ctx
);
3165 if (new_var
== NULL_TREE
)
3166 new_var
= maybe_lookup_decl_in_outer_ctx (var
, ctx
);
3167 x
= builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED
);
3168 x
= build_call_expr_loc (clause_loc
, x
, 2, new_var
,
3169 omp_clause_aligned_alignment (c
));
3170 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
3171 x
= build2 (MODIFY_EXPR
, TREE_TYPE (new_var
), new_var
, x
);
3172 gimplify_and_add (x
, ilist
);
3174 else if (TREE_CODE (TREE_TYPE (var
)) == ARRAY_TYPE
3175 && is_global_var (var
))
3177 tree ptype
= build_pointer_type (TREE_TYPE (var
)), t
, t2
;
3178 new_var
= lookup_decl (var
, ctx
);
3179 t
= maybe_lookup_decl_in_outer_ctx (var
, ctx
);
3180 t
= build_fold_addr_expr_loc (clause_loc
, t
);
3181 t2
= builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED
);
3182 t
= build_call_expr_loc (clause_loc
, t2
, 2, t
,
3183 omp_clause_aligned_alignment (c
));
3184 t
= fold_convert_loc (clause_loc
, ptype
, t
);
3185 x
= create_tmp_var (ptype
, NULL
);
3186 t
= build2 (MODIFY_EXPR
, ptype
, x
, t
);
3187 gimplify_and_add (t
, ilist
);
3188 t
= build_simple_mem_ref_loc (clause_loc
, x
);
3189 SET_DECL_VALUE_EXPR (new_var
, t
);
3190 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
3197 new_var
= var
= OMP_CLAUSE_DECL (c
);
3198 if (c_kind
!= OMP_CLAUSE_COPYIN
)
3199 new_var
= lookup_decl (var
, ctx
);
3201 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
3206 else if (is_variable_sized (var
))
3208 /* For variable sized types, we need to allocate the
3209 actual storage here. Call alloca and store the
3210 result in the pointer decl that we created elsewhere. */
3214 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
3219 ptr
= DECL_VALUE_EXPR (new_var
);
3220 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
3221 ptr
= TREE_OPERAND (ptr
, 0);
3222 gcc_assert (DECL_P (ptr
));
3223 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
3225 /* void *tmp = __builtin_alloca */
3226 atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
3227 stmt
= gimple_build_call (atmp
, 1, x
);
3228 tmp
= create_tmp_var_raw (ptr_type_node
, NULL
);
3229 gimple_add_tmp_var (tmp
);
3230 gimple_call_set_lhs (stmt
, tmp
);
3232 gimple_seq_add_stmt (ilist
, stmt
);
3234 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
3235 gimplify_assign (ptr
, x
, ilist
);
3238 else if (is_reference (var
))
3240 /* For references that are being privatized for Fortran,
3241 allocate new backing storage for the new pointer
3242 variable. This allows us to avoid changing all the
3243 code that expects a pointer to something that expects
3244 a direct variable. */
3248 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
3249 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
3251 x
= build_receiver_ref (var
, false, ctx
);
3252 x
= build_fold_addr_expr_loc (clause_loc
, x
);
3254 else if (TREE_CONSTANT (x
))
3256 /* For reduction in SIMD loop, defer adding the
3257 initialization of the reference, because if we decide
3258 to use SIMD array for it, the initilization could cause
3260 if (c_kind
== OMP_CLAUSE_REDUCTION
&& is_simd
)
3264 const char *name
= NULL
;
3265 if (DECL_NAME (var
))
3266 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
3268 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
3270 gimple_add_tmp_var (x
);
3271 TREE_ADDRESSABLE (x
) = 1;
3272 x
= build_fold_addr_expr_loc (clause_loc
, x
);
3277 tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
3278 x
= build_call_expr_loc (clause_loc
, atmp
, 1, x
);
3283 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
3284 gimplify_assign (new_var
, x
, ilist
);
3287 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
3289 else if (c_kind
== OMP_CLAUSE_REDUCTION
3290 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
3298 switch (OMP_CLAUSE_CODE (c
))
3300 case OMP_CLAUSE_SHARED
:
3301 /* Ignore shared directives in teams construct. */
3302 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_TEAMS
)
3304 /* Shared global vars are just accessed directly. */
3305 if (is_global_var (new_var
))
3307 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3308 needs to be delayed until after fixup_child_record_type so
3309 that we get the correct type during the dereference. */
3310 by_ref
= use_pointer_for_field (var
, ctx
);
3311 x
= build_receiver_ref (var
, by_ref
, ctx
);
3312 SET_DECL_VALUE_EXPR (new_var
, x
);
3313 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
3315 /* ??? If VAR is not passed by reference, and the variable
3316 hasn't been initialized yet, then we'll get a warning for
3317 the store into the omp_data_s structure. Ideally, we'd be
3318 able to notice this and not store anything at all, but
3319 we're generating code too early. Suppress the warning. */
3321 TREE_NO_WARNING (var
) = 1;
3324 case OMP_CLAUSE_LASTPRIVATE
:
3325 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
3329 case OMP_CLAUSE_PRIVATE
:
3330 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
3331 x
= build_outer_var_ref (var
, ctx
);
3332 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
3334 if (is_task_ctx (ctx
))
3335 x
= build_receiver_ref (var
, false, ctx
);
3337 x
= build_outer_var_ref (var
, ctx
);
3343 nx
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
3346 tree y
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
3347 if ((TREE_ADDRESSABLE (new_var
) || nx
|| y
3348 || OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
3349 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
3350 idx
, lane
, ivar
, lvar
))
3353 x
= lang_hooks
.decls
.omp_clause_default_ctor
3354 (c
, unshare_expr (ivar
), x
);
3356 gimplify_and_add (x
, &llist
[0]);
3359 y
= lang_hooks
.decls
.omp_clause_dtor (c
, ivar
);
3362 gimple_seq tseq
= NULL
;
3365 gimplify_stmt (&dtor
, &tseq
);
3366 gimple_seq_add_seq (&llist
[1], tseq
);
3373 gimplify_and_add (nx
, ilist
);
3377 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
3380 gimple_seq tseq
= NULL
;
3383 gimplify_stmt (&dtor
, &tseq
);
3384 gimple_seq_add_seq (dlist
, tseq
);
3388 case OMP_CLAUSE_LINEAR
:
3389 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c
))
3390 goto do_firstprivate
;
3391 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c
))
3394 x
= build_outer_var_ref (var
, ctx
);
3397 case OMP_CLAUSE_FIRSTPRIVATE
:
3398 if (is_task_ctx (ctx
))
3400 if (is_reference (var
) || is_variable_sized (var
))
3402 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
3404 || use_pointer_for_field (var
, NULL
))
3406 x
= build_receiver_ref (var
, false, ctx
);
3407 SET_DECL_VALUE_EXPR (new_var
, x
);
3408 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
3413 x
= build_outer_var_ref (var
, ctx
);
3416 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
3417 && gimple_omp_for_combined_into_p (ctx
->stmt
))
3419 tree t
= OMP_CLAUSE_LINEAR_STEP (c
);
3420 tree stept
= TREE_TYPE (t
);
3421 tree ct
= find_omp_clause (clauses
,
3422 OMP_CLAUSE__LOOPTEMP_
);
3424 tree l
= OMP_CLAUSE_DECL (ct
);
3425 tree n1
= fd
->loop
.n1
;
3426 tree step
= fd
->loop
.step
;
3427 tree itype
= TREE_TYPE (l
);
3428 if (POINTER_TYPE_P (itype
))
3429 itype
= signed_type_for (itype
);
3430 l
= fold_build2 (MINUS_EXPR
, itype
, l
, n1
);
3431 if (TYPE_UNSIGNED (itype
)
3432 && fd
->loop
.cond_code
== GT_EXPR
)
3433 l
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3434 fold_build1 (NEGATE_EXPR
, itype
, l
),
3435 fold_build1 (NEGATE_EXPR
,
3438 l
= fold_build2 (TRUNC_DIV_EXPR
, itype
, l
, step
);
3439 t
= fold_build2 (MULT_EXPR
, stept
,
3440 fold_convert (stept
, l
), t
);
3442 if (OMP_CLAUSE_LINEAR_ARRAY (c
))
3444 x
= lang_hooks
.decls
.omp_clause_linear_ctor
3446 gimplify_and_add (x
, ilist
);
3450 if (POINTER_TYPE_P (TREE_TYPE (x
)))
3451 x
= fold_build2 (POINTER_PLUS_EXPR
,
3452 TREE_TYPE (x
), x
, t
);
3454 x
= fold_build2 (PLUS_EXPR
, TREE_TYPE (x
), x
, t
);
3457 if ((OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_LINEAR
3458 || TREE_ADDRESSABLE (new_var
))
3459 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
3460 idx
, lane
, ivar
, lvar
))
3462 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
)
3464 tree iv
= create_tmp_var (TREE_TYPE (new_var
), NULL
);
3465 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, iv
, x
);
3466 gimplify_and_add (x
, ilist
);
3467 gimple_stmt_iterator gsi
3468 = gsi_start_1 (gimple_omp_body_ptr (ctx
->stmt
));
3470 = gimple_build_assign (unshare_expr (lvar
), iv
);
3471 gsi_insert_before_without_update (&gsi
, g
,
3473 tree t
= OMP_CLAUSE_LINEAR_STEP (c
);
3474 enum tree_code code
= PLUS_EXPR
;
3475 if (POINTER_TYPE_P (TREE_TYPE (new_var
)))
3476 code
= POINTER_PLUS_EXPR
;
3477 g
= gimple_build_assign_with_ops (code
, iv
, iv
, t
);
3478 gsi_insert_before_without_update (&gsi
, g
,
3482 x
= lang_hooks
.decls
.omp_clause_copy_ctor
3483 (c
, unshare_expr (ivar
), x
);
3484 gimplify_and_add (x
, &llist
[0]);
3485 x
= lang_hooks
.decls
.omp_clause_dtor (c
, ivar
);
3488 gimple_seq tseq
= NULL
;
3491 gimplify_stmt (&dtor
, &tseq
);
3492 gimple_seq_add_seq (&llist
[1], tseq
);
3497 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
3498 gimplify_and_add (x
, ilist
);
3501 case OMP_CLAUSE__LOOPTEMP_
:
3502 gcc_assert (is_parallel_ctx (ctx
));
3503 x
= build_outer_var_ref (var
, ctx
);
3504 x
= build2 (MODIFY_EXPR
, TREE_TYPE (new_var
), new_var
, x
);
3505 gimplify_and_add (x
, ilist
);
3508 case OMP_CLAUSE_COPYIN
:
3509 by_ref
= use_pointer_for_field (var
, NULL
);
3510 x
= build_receiver_ref (var
, by_ref
, ctx
);
3511 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
3512 append_to_statement_list (x
, ©in_seq
);
3513 copyin_by_ref
|= by_ref
;
3516 case OMP_CLAUSE_REDUCTION
:
3517 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
3519 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
3521 x
= build_outer_var_ref (var
, ctx
);
3523 if (is_reference (var
)
3524 && !useless_type_conversion_p (TREE_TYPE (placeholder
),
3526 x
= build_fold_addr_expr_loc (clause_loc
, x
);
3527 SET_DECL_VALUE_EXPR (placeholder
, x
);
3528 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
3529 tree new_vard
= new_var
;
3530 if (is_reference (var
))
3532 gcc_assert (TREE_CODE (new_var
) == MEM_REF
);
3533 new_vard
= TREE_OPERAND (new_var
, 0);
3534 gcc_assert (DECL_P (new_vard
));
3537 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
3538 idx
, lane
, ivar
, lvar
))
3540 if (new_vard
== new_var
)
3542 gcc_assert (DECL_VALUE_EXPR (new_var
) == lvar
);
3543 SET_DECL_VALUE_EXPR (new_var
, ivar
);
3547 SET_DECL_VALUE_EXPR (new_vard
,
3548 build_fold_addr_expr (ivar
));
3549 DECL_HAS_VALUE_EXPR_P (new_vard
) = 1;
3551 x
= lang_hooks
.decls
.omp_clause_default_ctor
3552 (c
, unshare_expr (ivar
),
3553 build_outer_var_ref (var
, ctx
));
3555 gimplify_and_add (x
, &llist
[0]);
3556 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
))
3558 tseq
= OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
);
3559 lower_omp (&tseq
, ctx
);
3560 gimple_seq_add_seq (&llist
[0], tseq
);
3562 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
3563 tseq
= OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
);
3564 lower_omp (&tseq
, ctx
);
3565 gimple_seq_add_seq (&llist
[1], tseq
);
3566 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
3567 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
3568 if (new_vard
== new_var
)
3569 SET_DECL_VALUE_EXPR (new_var
, lvar
);
3571 SET_DECL_VALUE_EXPR (new_vard
,
3572 build_fold_addr_expr (lvar
));
3573 x
= lang_hooks
.decls
.omp_clause_dtor (c
, ivar
);
3578 gimplify_stmt (&dtor
, &tseq
);
3579 gimple_seq_add_seq (&llist
[1], tseq
);
3583 /* If this is a reference to constant size reduction var
3584 with placeholder, we haven't emitted the initializer
3585 for it because it is undesirable if SIMD arrays are used.
3586 But if they aren't used, we need to emit the deferred
3587 initialization now. */
3588 else if (is_reference (var
) && is_simd
)
3589 handle_simd_reference (clause_loc
, new_vard
, ilist
);
3590 x
= lang_hooks
.decls
.omp_clause_default_ctor
3591 (c
, unshare_expr (new_var
),
3592 build_outer_var_ref (var
, ctx
));
3594 gimplify_and_add (x
, ilist
);
3595 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
))
3597 tseq
= OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
);
3598 lower_omp (&tseq
, ctx
);
3599 gimple_seq_add_seq (ilist
, tseq
);
3601 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
3604 tseq
= OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
);
3605 lower_omp (&tseq
, ctx
);
3606 gimple_seq_add_seq (dlist
, tseq
);
3607 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
3609 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
3614 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
3615 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
3616 enum tree_code code
= OMP_CLAUSE_REDUCTION_CODE (c
);
3618 /* reduction(-:var) sums up the partial results, so it
3619 acts identically to reduction(+:var). */
3620 if (code
== MINUS_EXPR
)
3623 tree new_vard
= new_var
;
3624 if (is_simd
&& is_reference (var
))
3626 gcc_assert (TREE_CODE (new_var
) == MEM_REF
);
3627 new_vard
= TREE_OPERAND (new_var
, 0);
3628 gcc_assert (DECL_P (new_vard
));
3631 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
3632 idx
, lane
, ivar
, lvar
))
3634 tree ref
= build_outer_var_ref (var
, ctx
);
3636 gimplify_assign (unshare_expr (ivar
), x
, &llist
[0]);
3638 x
= build2 (code
, TREE_TYPE (ref
), ref
, ivar
);
3639 ref
= build_outer_var_ref (var
, ctx
);
3640 gimplify_assign (ref
, x
, &llist
[1]);
3642 if (new_vard
!= new_var
)
3644 SET_DECL_VALUE_EXPR (new_vard
,
3645 build_fold_addr_expr (lvar
));
3646 DECL_HAS_VALUE_EXPR_P (new_vard
) = 1;
3651 if (is_reference (var
) && is_simd
)
3652 handle_simd_reference (clause_loc
, new_vard
, ilist
);
3653 gimplify_assign (new_var
, x
, ilist
);
3656 tree ref
= build_outer_var_ref (var
, ctx
);
3658 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
3659 ref
= build_outer_var_ref (var
, ctx
);
3660 gimplify_assign (ref
, x
, dlist
);
3674 tree uid
= create_tmp_var (ptr_type_node
, "simduid");
3675 /* Don't want uninit warnings on simduid, it is always uninitialized,
3676 but we use it not for the value, but for the DECL_UID only. */
3677 TREE_NO_WARNING (uid
) = 1;
3679 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE
, 1, uid
);
3680 gimple_call_set_lhs (g
, lane
);
3681 gimple_stmt_iterator gsi
= gsi_start_1 (gimple_omp_body_ptr (ctx
->stmt
));
3682 gsi_insert_before_without_update (&gsi
, g
, GSI_SAME_STMT
);
3683 c
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE__SIMDUID_
);
3684 OMP_CLAUSE__SIMDUID__DECL (c
) = uid
;
3685 OMP_CLAUSE_CHAIN (c
) = gimple_omp_for_clauses (ctx
->stmt
);
3686 gimple_omp_for_set_clauses (ctx
->stmt
, c
);
3687 g
= gimple_build_assign_with_ops (INTEGER_CST
, lane
,
3688 build_int_cst (unsigned_type_node
, 0),
3690 gimple_seq_add_stmt (ilist
, g
);
3691 for (int i
= 0; i
< 2; i
++)
3694 tree vf
= create_tmp_var (unsigned_type_node
, NULL
);
3695 g
= gimple_build_call_internal (IFN_GOMP_SIMD_VF
, 1, uid
);
3696 gimple_call_set_lhs (g
, vf
);
3697 gimple_seq
*seq
= i
== 0 ? ilist
: dlist
;
3698 gimple_seq_add_stmt (seq
, g
);
3699 tree t
= build_int_cst (unsigned_type_node
, 0);
3700 g
= gimple_build_assign_with_ops (INTEGER_CST
, idx
, t
, NULL_TREE
);
3701 gimple_seq_add_stmt (seq
, g
);
3702 tree body
= create_artificial_label (UNKNOWN_LOCATION
);
3703 tree header
= create_artificial_label (UNKNOWN_LOCATION
);
3704 tree end
= create_artificial_label (UNKNOWN_LOCATION
);
3705 gimple_seq_add_stmt (seq
, gimple_build_goto (header
));
3706 gimple_seq_add_stmt (seq
, gimple_build_label (body
));
3707 gimple_seq_add_seq (seq
, llist
[i
]);
3708 t
= build_int_cst (unsigned_type_node
, 1);
3709 g
= gimple_build_assign_with_ops (PLUS_EXPR
, idx
, idx
, t
);
3710 gimple_seq_add_stmt (seq
, g
);
3711 gimple_seq_add_stmt (seq
, gimple_build_label (header
));
3712 g
= gimple_build_cond (LT_EXPR
, idx
, vf
, body
, end
);
3713 gimple_seq_add_stmt (seq
, g
);
3714 gimple_seq_add_stmt (seq
, gimple_build_label (end
));
3718 /* The copyin sequence is not to be executed by the main thread, since
3719 that would result in self-copies. Perhaps not visible to scalars,
3720 but it certainly is to C++ operator=. */
3723 x
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
),
3725 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
3726 build_int_cst (TREE_TYPE (x
), 0));
3727 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
3728 gimplify_and_add (x
, ilist
);
3731 /* If any copyin variable is passed by reference, we must ensure the
3732 master thread doesn't modify it before it is copied over in all
3733 threads. Similarly for variables in both firstprivate and
3734 lastprivate clauses we need to ensure the lastprivate copying
3735 happens after firstprivate copying in all threads. And similarly
3736 for UDRs if initializer expression refers to omp_orig. */
3737 if (copyin_by_ref
|| lastprivate_firstprivate
|| reduction_omp_orig_ref
)
3739 /* Don't add any barrier for #pragma omp simd or
3740 #pragma omp distribute. */
3741 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_FOR
3742 || gimple_omp_for_kind (ctx
->stmt
) == GF_OMP_FOR_KIND_FOR
)
3743 gimple_seq_add_stmt (ilist
, build_omp_barrier (NULL_TREE
));
3746 /* If max_vf is non-zero, then we can use only a vectorization factor
3747 up to the max_vf we chose. So stick it into the safelen clause. */
3750 tree c
= find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
3751 OMP_CLAUSE_SAFELEN
);
3753 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c
)) == INTEGER_CST
3754 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c
),
3757 c
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE_SAFELEN
);
3758 OMP_CLAUSE_SAFELEN_EXPR (c
) = build_int_cst (integer_type_node
,
3760 OMP_CLAUSE_CHAIN (c
) = gimple_omp_for_clauses (ctx
->stmt
);
3761 gimple_omp_for_set_clauses (ctx
->stmt
, c
);
3767 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3768 both parallel and workshare constructs. PREDICATE may be NULL if it's
3772 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
3775 tree x
, c
, label
= NULL
, orig_clauses
= clauses
;
3776 bool par_clauses
= false;
3777 tree simduid
= NULL
, lastlane
= NULL
;
3779 /* Early exit if there are no lastprivate or linear clauses. */
3780 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
3781 if (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_LASTPRIVATE
3782 || (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_LINEAR
3783 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses
)))
3785 if (clauses
== NULL
)
3787 /* If this was a workshare clause, see if it had been combined
3788 with its parallel. In that case, look for the clauses on the
3789 parallel statement itself. */
3790 if (is_parallel_ctx (ctx
))
3794 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
3797 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
3798 OMP_CLAUSE_LASTPRIVATE
);
3799 if (clauses
== NULL
)
3807 tree label_true
, arm1
, arm2
;
3809 label
= create_artificial_label (UNKNOWN_LOCATION
);
3810 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
3811 arm1
= TREE_OPERAND (predicate
, 0);
3812 arm2
= TREE_OPERAND (predicate
, 1);
3813 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
3814 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
3815 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
3817 gimple_seq_add_stmt (stmt_list
, stmt
);
3818 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
3821 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
3822 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
3824 simduid
= find_omp_clause (orig_clauses
, OMP_CLAUSE__SIMDUID_
);
3826 simduid
= OMP_CLAUSE__SIMDUID__DECL (simduid
);
3829 for (c
= clauses
; c
;)
3832 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
3834 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
3835 || (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
3836 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c
)))
3838 var
= OMP_CLAUSE_DECL (c
);
3839 new_var
= lookup_decl (var
, ctx
);
3841 if (simduid
&& DECL_HAS_VALUE_EXPR_P (new_var
))
3843 tree val
= DECL_VALUE_EXPR (new_var
);
3844 if (TREE_CODE (val
) == ARRAY_REF
3845 && VAR_P (TREE_OPERAND (val
, 0))
3846 && lookup_attribute ("omp simd array",
3847 DECL_ATTRIBUTES (TREE_OPERAND (val
,
3850 if (lastlane
== NULL
)
3852 lastlane
= create_tmp_var (unsigned_type_node
, NULL
);
3854 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE
,
3856 TREE_OPERAND (val
, 1));
3857 gimple_call_set_lhs (g
, lastlane
);
3858 gimple_seq_add_stmt (stmt_list
, g
);
3860 new_var
= build4 (ARRAY_REF
, TREE_TYPE (val
),
3861 TREE_OPERAND (val
, 0), lastlane
,
3862 NULL_TREE
, NULL_TREE
);
3866 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
3867 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
3869 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
3870 gimple_seq_add_seq (stmt_list
,
3871 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
3872 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
3874 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
3875 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
))
3877 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
), ctx
);
3878 gimple_seq_add_seq (stmt_list
,
3879 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
));
3880 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c
) = NULL
;
3883 x
= build_outer_var_ref (var
, ctx
);
3884 if (is_reference (var
))
3885 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
3886 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
3887 gimplify_and_add (x
, stmt_list
);
3889 c
= OMP_CLAUSE_CHAIN (c
);
3890 if (c
== NULL
&& !par_clauses
)
3892 /* If this was a workshare clause, see if it had been combined
3893 with its parallel. In that case, continue looking for the
3894 clauses also on the parallel statement itself. */
3895 if (is_parallel_ctx (ctx
))
3899 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
3902 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
3903 OMP_CLAUSE_LASTPRIVATE
);
3909 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
3913 /* Generate code to implement the REDUCTION clauses. */
3916 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
3918 gimple_seq sub_seq
= NULL
;
3923 /* SIMD reductions are handled in lower_rec_input_clauses. */
3924 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
3925 && gimple_omp_for_kind (ctx
->stmt
) & GF_OMP_FOR_SIMD
)
3928 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3929 update in that case, otherwise use a lock. */
3930 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
3931 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
3933 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
3935 /* Never use OMP_ATOMIC for array reductions or UDRs. */
3945 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
3947 tree var
, ref
, new_var
;
3948 enum tree_code code
;
3949 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
3951 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
3954 var
= OMP_CLAUSE_DECL (c
);
3955 new_var
= lookup_decl (var
, ctx
);
3956 if (is_reference (var
))
3957 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
3958 ref
= build_outer_var_ref (var
, ctx
);
3959 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
3961 /* reduction(-:var) sums up the partial results, so it acts
3962 identically to reduction(+:var). */
3963 if (code
== MINUS_EXPR
)
3968 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
3970 addr
= save_expr (addr
);
3971 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
3972 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
3973 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
3974 gimplify_and_add (x
, stmt_seqp
);
3978 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
3980 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
3982 if (is_reference (var
)
3983 && !useless_type_conversion_p (TREE_TYPE (placeholder
),
3985 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
3986 SET_DECL_VALUE_EXPR (placeholder
, ref
);
3987 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
3988 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
3989 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
3990 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
3991 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
3995 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
3996 ref
= build_outer_var_ref (var
, ctx
);
3997 gimplify_assign (ref
, x
, &sub_seq
);
4001 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
),
4003 gimple_seq_add_stmt (stmt_seqp
, stmt
);
4005 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
4007 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
),
4009 gimple_seq_add_stmt (stmt_seqp
, stmt
);
4013 /* Generate code to implement the COPYPRIVATE clauses. */
4016 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
4021 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
4023 tree var
, new_var
, ref
, x
;
4025 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
4027 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
4030 var
= OMP_CLAUSE_DECL (c
);
4031 by_ref
= use_pointer_for_field (var
, NULL
);
4033 ref
= build_sender_ref (var
, ctx
);
4034 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
4037 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
4038 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
4040 gimplify_assign (ref
, x
, slist
);
4042 ref
= build_receiver_ref (var
, false, ctx
);
4045 ref
= fold_convert_loc (clause_loc
,
4046 build_pointer_type (TREE_TYPE (new_var
)),
4048 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
4050 if (is_reference (var
))
4052 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
4053 ref
= build_simple_mem_ref_loc (clause_loc
, ref
);
4054 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
4056 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
4057 gimplify_and_add (x
, rlist
);
4062 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4063 and REDUCTION from the sender (aka parent) side. */
4066 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
4071 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
4073 tree val
, ref
, x
, var
;
4074 bool by_ref
, do_in
= false, do_out
= false;
4075 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
4077 switch (OMP_CLAUSE_CODE (c
))
4079 case OMP_CLAUSE_PRIVATE
:
4080 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
4083 case OMP_CLAUSE_FIRSTPRIVATE
:
4084 case OMP_CLAUSE_COPYIN
:
4085 case OMP_CLAUSE_LASTPRIVATE
:
4086 case OMP_CLAUSE_REDUCTION
:
4087 case OMP_CLAUSE__LOOPTEMP_
:
4093 val
= OMP_CLAUSE_DECL (c
);
4094 var
= lookup_decl_in_outer_ctx (val
, ctx
);
4096 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
4097 && is_global_var (var
))
4099 if (is_variable_sized (val
))
4101 by_ref
= use_pointer_for_field (val
, NULL
);
4103 switch (OMP_CLAUSE_CODE (c
))
4105 case OMP_CLAUSE_PRIVATE
:
4106 case OMP_CLAUSE_FIRSTPRIVATE
:
4107 case OMP_CLAUSE_COPYIN
:
4108 case OMP_CLAUSE__LOOPTEMP_
:
4112 case OMP_CLAUSE_LASTPRIVATE
:
4113 if (by_ref
|| is_reference (val
))
4115 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
4122 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
4127 case OMP_CLAUSE_REDUCTION
:
4129 do_out
= !(by_ref
|| is_reference (val
));
4138 ref
= build_sender_ref (val
, ctx
);
4139 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
4140 gimplify_assign (ref
, x
, ilist
);
4141 if (is_task_ctx (ctx
))
4142 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
4147 ref
= build_sender_ref (val
, ctx
);
4148 gimplify_assign (var
, ref
, olist
);
4153 /* Generate code to implement SHARED from the sender (aka parent)
4154 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4155 list things that got automatically shared. */
4158 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
4160 tree var
, ovar
, nvar
, f
, x
, record_type
;
4162 if (ctx
->record_type
== NULL
)
4165 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
4166 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
4168 ovar
= DECL_ABSTRACT_ORIGIN (f
);
4169 nvar
= maybe_lookup_decl (ovar
, ctx
);
4170 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
4173 /* If CTX is a nested parallel directive. Find the immediately
4174 enclosing parallel or workshare construct that contains a
4175 mapping for OVAR. */
4176 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
4178 if (use_pointer_for_field (ovar
, ctx
))
4180 x
= build_sender_ref (ovar
, ctx
);
4181 var
= build_fold_addr_expr (var
);
4182 gimplify_assign (x
, var
, ilist
);
4186 x
= build_sender_ref (ovar
, ctx
);
4187 gimplify_assign (x
, var
, ilist
);
4189 if (!TREE_READONLY (var
)
4190 /* We don't need to receive a new reference to a result
4191 or parm decl. In fact we may not store to it as we will
4192 invalidate any pending RSO and generate wrong gimple
4194 && !((TREE_CODE (var
) == RESULT_DECL
4195 || TREE_CODE (var
) == PARM_DECL
)
4196 && DECL_BY_REFERENCE (var
)))
4198 x
= build_sender_ref (ovar
, ctx
);
4199 gimplify_assign (var
, x
, olist
);
4206 /* A convenience function to build an empty GIMPLE_COND with just the
4210 gimple_build_cond_empty (tree cond
)
4212 enum tree_code pred_code
;
4215 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
4216 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
4220 /* Build the function calls to GOMP_parallel_start etc to actually
4221 generate the parallel operation. REGION is the parallel region
4222 being expanded. BB is the block where to insert the code. WS_ARGS
4223 will be set if this is a call to a combined parallel+workshare
4224 construct, it contains the list of additional arguments needed by
4225 the workshare construct. */
4228 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
4229 gimple entry_stmt
, vec
<tree
, va_gc
> *ws_args
)
4231 tree t
, t1
, t2
, val
, cond
, c
, clauses
, flags
;
4232 gimple_stmt_iterator gsi
;
4234 enum built_in_function start_ix
;
4236 location_t clause_loc
;
4237 vec
<tree
, va_gc
> *args
;
4239 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
4241 /* Determine what flavor of GOMP_parallel we will be
4243 start_ix
= BUILT_IN_GOMP_PARALLEL
;
4244 if (is_combined_parallel (region
))
4246 switch (region
->inner
->type
)
4248 case GIMPLE_OMP_FOR
:
4249 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
4250 start_ix2
= ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4251 + (region
->inner
->sched_kind
4252 == OMP_CLAUSE_SCHEDULE_RUNTIME
4253 ? 3 : region
->inner
->sched_kind
));
4254 start_ix
= (enum built_in_function
)start_ix2
;
4256 case GIMPLE_OMP_SECTIONS
:
4257 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS
;
4264 /* By default, the value of NUM_THREADS is zero (selected at run time)
4265 and there is no conditional. */
4267 val
= build_int_cst (unsigned_type_node
, 0);
4268 flags
= build_int_cst (unsigned_type_node
, 0);
4270 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
4272 cond
= OMP_CLAUSE_IF_EXPR (c
);
4274 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
4277 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
4278 clause_loc
= OMP_CLAUSE_LOCATION (c
);
4281 clause_loc
= gimple_location (entry_stmt
);
4283 c
= find_omp_clause (clauses
, OMP_CLAUSE_PROC_BIND
);
4285 flags
= build_int_cst (unsigned_type_node
, OMP_CLAUSE_PROC_BIND_KIND (c
));
4287 /* Ensure 'val' is of the correct type. */
4288 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
4290 /* If we found the clause 'if (cond)', build either
4291 (cond != 0) or (cond ? val : 1u). */
4294 cond
= gimple_boolify (cond
);
4296 if (integer_zerop (val
))
4297 val
= fold_build2_loc (clause_loc
,
4298 EQ_EXPR
, unsigned_type_node
, cond
,
4299 build_int_cst (TREE_TYPE (cond
), 0));
4302 basic_block cond_bb
, then_bb
, else_bb
;
4303 edge e
, e_then
, e_else
;
4304 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
4306 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
4307 if (gimple_in_ssa_p (cfun
))
4309 tmp_then
= make_ssa_name (tmp_var
, NULL
);
4310 tmp_else
= make_ssa_name (tmp_var
, NULL
);
4311 tmp_join
= make_ssa_name (tmp_var
, NULL
);
4320 e
= split_block (bb
, NULL
);
4325 then_bb
= create_empty_bb (cond_bb
);
4326 else_bb
= create_empty_bb (then_bb
);
4327 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
4328 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
4330 stmt
= gimple_build_cond_empty (cond
);
4331 gsi
= gsi_start_bb (cond_bb
);
4332 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4334 gsi
= gsi_start_bb (then_bb
);
4335 stmt
= gimple_build_assign (tmp_then
, val
);
4336 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4338 gsi
= gsi_start_bb (else_bb
);
4339 stmt
= gimple_build_assign
4340 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
4341 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4343 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
4344 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
4345 add_bb_to_loop (then_bb
, cond_bb
->loop_father
);
4346 add_bb_to_loop (else_bb
, cond_bb
->loop_father
);
4347 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
4348 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
4350 if (gimple_in_ssa_p (cfun
))
4352 gimple phi
= create_phi_node (tmp_join
, bb
);
4353 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
4354 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
4360 gsi
= gsi_start_bb (bb
);
4361 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
4362 false, GSI_CONTINUE_LINKING
);
4365 gsi
= gsi_last_bb (bb
);
4366 t
= gimple_omp_parallel_data_arg (entry_stmt
);
4368 t1
= null_pointer_node
;
4370 t1
= build_fold_addr_expr (t
);
4371 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
4373 vec_alloc (args
, 4 + vec_safe_length (ws_args
));
4374 args
->quick_push (t2
);
4375 args
->quick_push (t1
);
4376 args
->quick_push (val
);
4378 args
->splice (*ws_args
);
4379 args
->quick_push (flags
);
4381 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
4382 builtin_decl_explicit (start_ix
), args
);
4384 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4385 false, GSI_CONTINUE_LINKING
);
4389 /* Build the function call to GOMP_task to actually
4390 generate the task operation. BB is the block where to insert the code. */
4393 expand_task_call (basic_block bb
, gimple entry_stmt
)
4395 tree t
, t1
, t2
, t3
, flags
, cond
, c
, c2
, clauses
, depend
;
4396 gimple_stmt_iterator gsi
;
4397 location_t loc
= gimple_location (entry_stmt
);
4399 clauses
= gimple_omp_task_clauses (entry_stmt
);
4401 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
4403 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
4405 cond
= boolean_true_node
;
4407 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
4408 c2
= find_omp_clause (clauses
, OMP_CLAUSE_MERGEABLE
);
4409 depend
= find_omp_clause (clauses
, OMP_CLAUSE_DEPEND
);
4410 flags
= build_int_cst (unsigned_type_node
,
4411 (c
? 1 : 0) + (c2
? 4 : 0) + (depend
? 8 : 0));
4413 c
= find_omp_clause (clauses
, OMP_CLAUSE_FINAL
);
4416 c
= gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c
));
4417 c
= fold_build3_loc (loc
, COND_EXPR
, unsigned_type_node
, c
,
4418 build_int_cst (unsigned_type_node
, 2),
4419 build_int_cst (unsigned_type_node
, 0));
4420 flags
= fold_build2_loc (loc
, PLUS_EXPR
, unsigned_type_node
, flags
, c
);
4423 depend
= OMP_CLAUSE_DECL (depend
);
4425 depend
= build_int_cst (ptr_type_node
, 0);
4427 gsi
= gsi_last_bb (bb
);
4428 t
= gimple_omp_task_data_arg (entry_stmt
);
4430 t2
= null_pointer_node
;
4432 t2
= build_fold_addr_expr_loc (loc
, t
);
4433 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
4434 t
= gimple_omp_task_copy_fn (entry_stmt
);
4436 t3
= null_pointer_node
;
4438 t3
= build_fold_addr_expr_loc (loc
, t
);
4440 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK
),
4442 gimple_omp_task_arg_size (entry_stmt
),
4443 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
,
4446 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4447 false, GSI_CONTINUE_LINKING
);
4451 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4452 catch handler and return it. This prevents programs from violating the
4453 structured block semantics with throws. */
4456 maybe_catch_exception (gimple_seq body
)
4461 if (!flag_exceptions
)
4464 if (lang_hooks
.eh_protect_cleanup_actions
!= NULL
)
4465 decl
= lang_hooks
.eh_protect_cleanup_actions ();
4467 decl
= builtin_decl_explicit (BUILT_IN_TRAP
);
4469 g
= gimple_build_eh_must_not_throw (decl
);
4470 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
4473 return gimple_seq_alloc_with_stmt (g
);
4476 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4479 vec2chain (vec
<tree
, va_gc
> *v
)
4481 tree chain
= NULL_TREE
, t
;
4484 FOR_EACH_VEC_SAFE_ELT_REVERSE (v
, ix
, t
)
4486 DECL_CHAIN (t
) = chain
;
4494 /* Remove barriers in REGION->EXIT's block. Note that this is only
4495 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4496 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4497 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4501 remove_exit_barrier (struct omp_region
*region
)
4503 gimple_stmt_iterator gsi
;
4504 basic_block exit_bb
;
4508 int any_addressable_vars
= -1;
4510 exit_bb
= region
->exit
;
4512 /* If the parallel region doesn't return, we don't have REGION->EXIT
4517 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4518 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4519 statements that can appear in between are extremely limited -- no
4520 memory operations at all. Here, we allow nothing at all, so the
4521 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4522 gsi
= gsi_last_bb (exit_bb
);
4523 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
4525 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
4528 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
4530 gsi
= gsi_last_bb (e
->src
);
4531 if (gsi_end_p (gsi
))
4533 stmt
= gsi_stmt (gsi
);
4534 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
4535 && !gimple_omp_return_nowait_p (stmt
))
4537 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4538 in many cases. If there could be tasks queued, the barrier
4539 might be needed to let the tasks run before some local
4540 variable of the parallel that the task uses as shared
4541 runs out of scope. The task can be spawned either
4542 from within current function (this would be easy to check)
4543 or from some function it calls and gets passed an address
4544 of such a variable. */
4545 if (any_addressable_vars
< 0)
4547 gimple parallel_stmt
= last_stmt (region
->entry
);
4548 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
4549 tree local_decls
, block
, decl
;
4552 any_addressable_vars
= 0;
4553 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
4554 if (TREE_ADDRESSABLE (decl
))
4556 any_addressable_vars
= 1;
4559 for (block
= gimple_block (stmt
);
4560 !any_addressable_vars
4562 && TREE_CODE (block
) == BLOCK
;
4563 block
= BLOCK_SUPERCONTEXT (block
))
4565 for (local_decls
= BLOCK_VARS (block
);
4567 local_decls
= DECL_CHAIN (local_decls
))
4568 if (TREE_ADDRESSABLE (local_decls
))
4570 any_addressable_vars
= 1;
4573 if (block
== gimple_block (parallel_stmt
))
4577 if (!any_addressable_vars
)
4578 gimple_omp_return_set_nowait (stmt
);
4584 remove_exit_barriers (struct omp_region
*region
)
4586 if (region
->type
== GIMPLE_OMP_PARALLEL
)
4587 remove_exit_barrier (region
);
4591 region
= region
->inner
;
4592 remove_exit_barriers (region
);
4593 while (region
->next
)
4595 region
= region
->next
;
4596 remove_exit_barriers (region
);
4601 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4602 calls. These can't be declared as const functions, but
4603 within one parallel body they are constant, so they can be
4604 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4605 which are declared const. Similarly for task body, except
4606 that in untied task omp_get_thread_num () can change at any task
4607 scheduling point. */
4610 optimize_omp_library_calls (gimple entry_stmt
)
4613 gimple_stmt_iterator gsi
;
4614 tree thr_num_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
4615 tree thr_num_id
= DECL_ASSEMBLER_NAME (thr_num_tree
);
4616 tree num_thr_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
4617 tree num_thr_id
= DECL_ASSEMBLER_NAME (num_thr_tree
);
4618 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
4619 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
4620 OMP_CLAUSE_UNTIED
) != NULL
);
4622 FOR_EACH_BB_FN (bb
, cfun
)
4623 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4625 gimple call
= gsi_stmt (gsi
);
4628 if (is_gimple_call (call
)
4629 && (decl
= gimple_call_fndecl (call
))
4630 && DECL_EXTERNAL (decl
)
4631 && TREE_PUBLIC (decl
)
4632 && DECL_INITIAL (decl
) == NULL
)
4636 if (DECL_NAME (decl
) == thr_num_id
)
4638 /* In #pragma omp task untied omp_get_thread_num () can change
4639 during the execution of the task region. */
4642 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
4644 else if (DECL_NAME (decl
) == num_thr_id
)
4645 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
4649 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
4650 || gimple_call_num_args (call
) != 0)
4653 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
4656 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
4657 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
4658 TREE_TYPE (TREE_TYPE (built_in
))))
4661 gimple_call_set_fndecl (call
, built_in
);
4666 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4670 expand_omp_regimplify_p (tree
*tp
, int *walk_subtrees
, void *)
4674 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4675 if (TREE_CODE (t
) == VAR_DECL
&& DECL_HAS_VALUE_EXPR_P (t
))
4678 if (TREE_CODE (t
) == ADDR_EXPR
)
4679 recompute_tree_invariant_for_addr_expr (t
);
4681 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
4685 /* Prepend TO = FROM assignment before *GSI_P. */
4688 expand_omp_build_assign (gimple_stmt_iterator
*gsi_p
, tree to
, tree from
)
4690 bool simple_p
= DECL_P (to
) && TREE_ADDRESSABLE (to
);
4691 from
= force_gimple_operand_gsi (gsi_p
, from
, simple_p
, NULL_TREE
,
4692 true, GSI_SAME_STMT
);
4693 gimple stmt
= gimple_build_assign (to
, from
);
4694 gsi_insert_before (gsi_p
, stmt
, GSI_SAME_STMT
);
4695 if (walk_tree (&from
, expand_omp_regimplify_p
, NULL
, NULL
)
4696 || walk_tree (&to
, expand_omp_regimplify_p
, NULL
, NULL
))
4698 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4699 gimple_regimplify_operands (stmt
, &gsi
);
4703 /* Expand the OpenMP parallel or task directive starting at REGION. */
4706 expand_omp_taskreg (struct omp_region
*region
)
4708 basic_block entry_bb
, exit_bb
, new_bb
;
4709 struct function
*child_cfun
;
4710 tree child_fn
, block
, t
;
4711 gimple_stmt_iterator gsi
;
4712 gimple entry_stmt
, stmt
;
4714 vec
<tree
, va_gc
> *ws_args
;
4716 entry_stmt
= last_stmt (region
->entry
);
4717 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
4718 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
4720 entry_bb
= region
->entry
;
4721 exit_bb
= region
->exit
;
4723 if (is_combined_parallel (region
))
4724 ws_args
= region
->ws_args
;
4728 if (child_cfun
->cfg
)
4730 /* Due to inlining, it may happen that we have already outlined
4731 the region, in which case all we need to do is make the
4732 sub-graph unreachable and emit the parallel call. */
4733 edge entry_succ_e
, exit_succ_e
;
4735 entry_succ_e
= single_succ_edge (entry_bb
);
4737 gsi
= gsi_last_bb (entry_bb
);
4738 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
4739 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
4740 gsi_remove (&gsi
, true);
4745 exit_succ_e
= single_succ_edge (exit_bb
);
4746 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
4748 remove_edge_and_dominated_blocks (entry_succ_e
);
4752 unsigned srcidx
, dstidx
, num
;
4754 /* If the parallel region needs data sent from the parent
4755 function, then the very first statement (except possible
4756 tree profile counter updates) of the parallel body
4757 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4758 &.OMP_DATA_O is passed as an argument to the child function,
4759 we need to replace it with the argument as seen by the child
4762 In most cases, this will end up being the identity assignment
4763 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4764 a function call that has been inlined, the original PARM_DECL
4765 .OMP_DATA_I may have been converted into a different local
4766 variable. In which case, we need to keep the assignment. */
4767 if (gimple_omp_taskreg_data_arg (entry_stmt
))
4769 basic_block entry_succ_bb
= single_succ (entry_bb
);
4771 gimple parcopy_stmt
= NULL
;
4773 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
4777 gcc_assert (!gsi_end_p (gsi
));
4778 stmt
= gsi_stmt (gsi
);
4779 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
4782 if (gimple_num_ops (stmt
) == 2)
4784 tree arg
= gimple_assign_rhs1 (stmt
);
4786 /* We're ignore the subcode because we're
4787 effectively doing a STRIP_NOPS. */
4789 if (TREE_CODE (arg
) == ADDR_EXPR
4790 && TREE_OPERAND (arg
, 0)
4791 == gimple_omp_taskreg_data_arg (entry_stmt
))
4793 parcopy_stmt
= stmt
;
4799 gcc_assert (parcopy_stmt
!= NULL
);
4800 arg
= DECL_ARGUMENTS (child_fn
);
4802 if (!gimple_in_ssa_p (cfun
))
4804 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
4805 gsi_remove (&gsi
, true);
4808 /* ?? Is setting the subcode really necessary ?? */
4809 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
4810 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
4815 /* If we are in ssa form, we must load the value from the default
4816 definition of the argument. That should not be defined now,
4817 since the argument is not used uninitialized. */
4818 gcc_assert (ssa_default_def (cfun
, arg
) == NULL
);
4819 narg
= make_ssa_name (arg
, gimple_build_nop ());
4820 set_ssa_default_def (cfun
, arg
, narg
);
4821 /* ?? Is setting the subcode really necessary ?? */
4822 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
4823 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
4824 update_stmt (parcopy_stmt
);
4828 /* Declare local variables needed in CHILD_CFUN. */
4829 block
= DECL_INITIAL (child_fn
);
4830 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
4831 /* The gimplifier could record temporaries in parallel/task block
4832 rather than in containing function's local_decls chain,
4833 which would mean cgraph missed finalizing them. Do it now. */
4834 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
4835 if (TREE_CODE (t
) == VAR_DECL
4837 && !DECL_EXTERNAL (t
))
4838 varpool_finalize_decl (t
);
4839 DECL_SAVED_TREE (child_fn
) = NULL
;
4840 /* We'll create a CFG for child_fn, so no gimple body is needed. */
4841 gimple_set_body (child_fn
, NULL
);
4842 TREE_USED (block
) = 1;
4844 /* Reset DECL_CONTEXT on function arguments. */
4845 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
4846 DECL_CONTEXT (t
) = child_fn
;
4848 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
4849 so that it can be moved to the child function. */
4850 gsi
= gsi_last_bb (entry_bb
);
4851 stmt
= gsi_stmt (gsi
);
4852 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
4853 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
4854 gsi_remove (&gsi
, true);
4855 e
= split_block (entry_bb
, stmt
);
4857 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4859 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
4862 gsi
= gsi_last_bb (exit_bb
);
4863 gcc_assert (!gsi_end_p (gsi
)
4864 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
4865 stmt
= gimple_build_return (NULL
);
4866 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
4867 gsi_remove (&gsi
, true);
4870 /* Move the parallel region into CHILD_CFUN. */
4872 if (gimple_in_ssa_p (cfun
))
4874 init_tree_ssa (child_cfun
);
4875 init_ssa_operands (child_cfun
);
4876 child_cfun
->gimple_df
->in_ssa_p
= true;
4880 block
= gimple_block (entry_stmt
);
4882 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
4884 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
4885 /* When the OMP expansion process cannot guarantee an up-to-date
4886 loop tree arrange for the child function to fixup loops. */
4887 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
4888 child_cfun
->x_current_loops
->state
|= LOOPS_NEED_FIXUP
;
4890 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
4891 num
= vec_safe_length (child_cfun
->local_decls
);
4892 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
4894 t
= (*child_cfun
->local_decls
)[srcidx
];
4895 if (DECL_CONTEXT (t
) == cfun
->decl
)
4897 if (srcidx
!= dstidx
)
4898 (*child_cfun
->local_decls
)[dstidx
] = t
;
4902 vec_safe_truncate (child_cfun
->local_decls
, dstidx
);
4904 /* Inform the callgraph about the new function. */
4905 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
= cfun
->curr_properties
;
4906 cgraph_add_new_function (child_fn
, true);
4908 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4909 fixed in a following pass. */
4910 push_cfun (child_cfun
);
4912 optimize_omp_library_calls (entry_stmt
);
4913 rebuild_cgraph_edges ();
4915 /* Some EH regions might become dead, see PR34608. If
4916 pass_cleanup_cfg isn't the first pass to happen with the
4917 new child, these dead EH edges might cause problems.
4918 Clean them up now. */
4919 if (flag_exceptions
)
4922 bool changed
= false;
4924 FOR_EACH_BB_FN (bb
, cfun
)
4925 changed
|= gimple_purge_dead_eh_edges (bb
);
4927 cleanup_tree_cfg ();
4929 if (gimple_in_ssa_p (cfun
))
4930 update_ssa (TODO_update_ssa
);
4934 /* Emit a library call to launch the children threads. */
4935 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
4936 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
4938 expand_task_call (new_bb
, entry_stmt
);
4939 if (gimple_in_ssa_p (cfun
))
4940 update_ssa (TODO_update_ssa_only_virtuals
);
4944 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4945 of the combined collapse > 1 loop constructs, generate code like:
4946 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4951 count3 = (adj + N32 - N31) / STEP3;
4952 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4957 count2 = (adj + N22 - N21) / STEP2;
4958 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4963 count1 = (adj + N12 - N11) / STEP1;
4964 count = count1 * count2 * count3;
4965 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4967 and set ZERO_ITER_BB to that bb. If this isn't the outermost
4968 of the combined loop constructs, just initialize COUNTS array
4969 from the _looptemp_ clauses. */
4971 /* NOTE: It *could* be better to moosh all of the BBs together,
4972 creating one larger BB with all the computation and the unexpected
4973 jump at the end. I.e.
4975 bool zero3, zero2, zero1, zero;
4978 count3 = (N32 - N31) /[cl] STEP3;
4980 count2 = (N22 - N21) /[cl] STEP2;
4982 count1 = (N12 - N11) /[cl] STEP1;
4983 zero = zero3 || zero2 || zero1;
4984 count = count1 * count2 * count3;
4985 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4987 After all, we expect the zero=false, and thus we expect to have to
4988 evaluate all of the comparison expressions, so short-circuiting
4989 oughtn't be a win. Since the condition isn't protecting a
4990 denominator, we're not concerned about divide-by-zero, so we can
4991 fully evaluate count even if a numerator turned out to be wrong.
4993 It seems like putting this all together would create much better
4994 scheduling opportunities, and less pressure on the chip's branch
4998 expand_omp_for_init_counts (struct omp_for_data
*fd
, gimple_stmt_iterator
*gsi
,
4999 basic_block
&entry_bb
, tree
*counts
,
5000 basic_block
&zero_iter_bb
, int &first_zero_iter
,
5001 basic_block
&l2_dom_bb
)
5003 tree t
, type
= TREE_TYPE (fd
->loop
.v
);
5008 /* Collapsed loops need work for expansion into SSA form. */
5009 gcc_assert (!gimple_in_ssa_p (cfun
));
5011 if (gimple_omp_for_combined_into_p (fd
->for_stmt
)
5012 && TREE_CODE (fd
->loop
.n2
) != INTEGER_CST
)
5014 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5015 isn't supposed to be handled, as the inner loop doesn't
5017 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
5018 OMP_CLAUSE__LOOPTEMP_
);
5019 gcc_assert (innerc
);
5020 for (i
= 0; i
< fd
->collapse
; i
++)
5022 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
5023 OMP_CLAUSE__LOOPTEMP_
);
5024 gcc_assert (innerc
);
5026 counts
[i
] = OMP_CLAUSE_DECL (innerc
);
5028 counts
[0] = NULL_TREE
;
5033 for (i
= 0; i
< fd
->collapse
; i
++)
5035 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
5037 if (SSA_VAR_P (fd
->loop
.n2
)
5038 && ((t
= fold_binary (fd
->loops
[i
].cond_code
, boolean_type_node
,
5039 fold_convert (itype
, fd
->loops
[i
].n1
),
5040 fold_convert (itype
, fd
->loops
[i
].n2
)))
5041 == NULL_TREE
|| !integer_onep (t
)))
5044 n1
= fold_convert (itype
, unshare_expr (fd
->loops
[i
].n1
));
5045 n1
= force_gimple_operand_gsi (gsi
, n1
, true, NULL_TREE
,
5046 true, GSI_SAME_STMT
);
5047 n2
= fold_convert (itype
, unshare_expr (fd
->loops
[i
].n2
));
5048 n2
= force_gimple_operand_gsi (gsi
, n2
, true, NULL_TREE
,
5049 true, GSI_SAME_STMT
);
5050 stmt
= gimple_build_cond (fd
->loops
[i
].cond_code
, n1
, n2
,
5051 NULL_TREE
, NULL_TREE
);
5052 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
5053 if (walk_tree (gimple_cond_lhs_ptr (stmt
),
5054 expand_omp_regimplify_p
, NULL
, NULL
)
5055 || walk_tree (gimple_cond_rhs_ptr (stmt
),
5056 expand_omp_regimplify_p
, NULL
, NULL
))
5058 *gsi
= gsi_for_stmt (stmt
);
5059 gimple_regimplify_operands (stmt
, gsi
);
5061 e
= split_block (entry_bb
, stmt
);
5062 if (zero_iter_bb
== NULL
)
5064 first_zero_iter
= i
;
5065 zero_iter_bb
= create_empty_bb (entry_bb
);
5066 add_bb_to_loop (zero_iter_bb
, entry_bb
->loop_father
);
5067 *gsi
= gsi_after_labels (zero_iter_bb
);
5068 stmt
= gimple_build_assign (fd
->loop
.n2
,
5069 build_zero_cst (type
));
5070 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
5071 set_immediate_dominator (CDI_DOMINATORS
, zero_iter_bb
,
5074 ne
= make_edge (entry_bb
, zero_iter_bb
, EDGE_FALSE_VALUE
);
5075 ne
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
5076 e
->flags
= EDGE_TRUE_VALUE
;
5077 e
->probability
= REG_BR_PROB_BASE
- ne
->probability
;
5078 if (l2_dom_bb
== NULL
)
5079 l2_dom_bb
= entry_bb
;
5081 *gsi
= gsi_last_bb (entry_bb
);
5084 if (POINTER_TYPE_P (itype
))
5085 itype
= signed_type_for (itype
);
5086 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
5088 t
= fold_build2 (PLUS_EXPR
, itype
,
5089 fold_convert (itype
, fd
->loops
[i
].step
), t
);
5090 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
5091 fold_convert (itype
, fd
->loops
[i
].n2
));
5092 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
5093 fold_convert (itype
, fd
->loops
[i
].n1
));
5094 /* ?? We could probably use CEIL_DIV_EXPR instead of
5095 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5096 generate the same code in the end because generically we
5097 don't know that the values involved must be negative for
5099 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
5100 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
5101 fold_build1 (NEGATE_EXPR
, itype
, t
),
5102 fold_build1 (NEGATE_EXPR
, itype
,
5103 fold_convert (itype
,
5104 fd
->loops
[i
].step
)));
5106 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
5107 fold_convert (itype
, fd
->loops
[i
].step
));
5108 t
= fold_convert (type
, t
);
5109 if (TREE_CODE (t
) == INTEGER_CST
)
5113 counts
[i
] = create_tmp_reg (type
, ".count");
5114 expand_omp_build_assign (gsi
, counts
[i
], t
);
5116 if (SSA_VAR_P (fd
->loop
.n2
))
5121 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
5122 expand_omp_build_assign (gsi
, fd
->loop
.n2
, t
);
5128 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5130 V3 = N31 + (T % count3) * STEP3;
5132 V2 = N21 + (T % count2) * STEP2;
5134 V1 = N11 + T * STEP1;
5135 if this loop doesn't have an inner loop construct combined with it.
5136 If it does have an inner loop construct combined with it and the
5137 iteration count isn't known constant, store values from counts array
5138 into its _looptemp_ temporaries instead. */
5141 expand_omp_for_init_vars (struct omp_for_data
*fd
, gimple_stmt_iterator
*gsi
,
5142 tree
*counts
, gimple inner_stmt
, tree startvar
)
5145 if (gimple_omp_for_combined_p (fd
->for_stmt
))
5147 /* If fd->loop.n2 is constant, then no propagation of the counts
5148 is needed, they are constant. */
5149 if (TREE_CODE (fd
->loop
.n2
) == INTEGER_CST
)
5152 tree clauses
= gimple_code (inner_stmt
) == GIMPLE_OMP_PARALLEL
5153 ? gimple_omp_parallel_clauses (inner_stmt
)
5154 : gimple_omp_for_clauses (inner_stmt
);
5155 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5156 isn't supposed to be handled, as the inner loop doesn't
5158 tree innerc
= find_omp_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
5159 gcc_assert (innerc
);
5160 for (i
= 0; i
< fd
->collapse
; i
++)
5162 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
5163 OMP_CLAUSE__LOOPTEMP_
);
5164 gcc_assert (innerc
);
5167 tree tem
= OMP_CLAUSE_DECL (innerc
);
5168 tree t
= fold_convert (TREE_TYPE (tem
), counts
[i
]);
5169 t
= force_gimple_operand_gsi (gsi
, t
, false, NULL_TREE
,
5170 false, GSI_CONTINUE_LINKING
);
5171 gimple stmt
= gimple_build_assign (tem
, t
);
5172 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
5178 tree type
= TREE_TYPE (fd
->loop
.v
);
5179 tree tem
= create_tmp_reg (type
, ".tem");
5180 gimple stmt
= gimple_build_assign (tem
, startvar
);
5181 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
5183 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
5185 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
, t
;
5187 if (POINTER_TYPE_P (vtype
))
5188 itype
= signed_type_for (vtype
);
5190 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
5193 t
= fold_convert (itype
, t
);
5194 t
= fold_build2 (MULT_EXPR
, itype
, t
,
5195 fold_convert (itype
, fd
->loops
[i
].step
));
5196 if (POINTER_TYPE_P (vtype
))
5197 t
= fold_build_pointer_plus (fd
->loops
[i
].n1
, t
);
5199 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
5200 t
= force_gimple_operand_gsi (gsi
, t
,
5201 DECL_P (fd
->loops
[i
].v
)
5202 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
5204 GSI_CONTINUE_LINKING
);
5205 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
5206 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
5209 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
5210 t
= force_gimple_operand_gsi (gsi
, t
, false, NULL_TREE
,
5211 false, GSI_CONTINUE_LINKING
);
5212 stmt
= gimple_build_assign (tem
, t
);
5213 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
5219 /* Helper function for expand_omp_for_*. Generate code like:
5222 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5226 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5233 extract_omp_for_update_vars (struct omp_for_data
*fd
, basic_block cont_bb
,
5234 basic_block body_bb
)
5236 basic_block last_bb
, bb
, collapse_bb
= NULL
;
5238 gimple_stmt_iterator gsi
;
5244 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
5246 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
5248 bb
= create_empty_bb (last_bb
);
5249 add_bb_to_loop (bb
, last_bb
->loop_father
);
5250 gsi
= gsi_start_bb (bb
);
5252 if (i
< fd
->collapse
- 1)
5254 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
5255 e
->probability
= REG_BR_PROB_BASE
/ 8;
5257 t
= fd
->loops
[i
+ 1].n1
;
5258 t
= force_gimple_operand_gsi (&gsi
, t
,
5259 DECL_P (fd
->loops
[i
+ 1].v
)
5260 && TREE_ADDRESSABLE (fd
->loops
[i
5263 GSI_CONTINUE_LINKING
);
5264 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
5265 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5270 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
5272 if (POINTER_TYPE_P (vtype
))
5273 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, fd
->loops
[i
].step
);
5275 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
, fd
->loops
[i
].step
);
5276 t
= force_gimple_operand_gsi (&gsi
, t
,
5277 DECL_P (fd
->loops
[i
].v
)
5278 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
5279 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
5280 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
5281 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5285 t
= fd
->loops
[i
].n2
;
5286 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5287 false, GSI_CONTINUE_LINKING
);
5288 tree v
= fd
->loops
[i
].v
;
5289 if (DECL_P (v
) && TREE_ADDRESSABLE (v
))
5290 v
= force_gimple_operand_gsi (&gsi
, v
, true, NULL_TREE
,
5291 false, GSI_CONTINUE_LINKING
);
5292 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
, v
, t
);
5293 stmt
= gimple_build_cond_empty (t
);
5294 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5295 e
= make_edge (bb
, body_bb
, EDGE_TRUE_VALUE
);
5296 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
5299 make_edge (bb
, body_bb
, EDGE_FALLTHRU
);
5307 /* A subroutine of expand_omp_for. Generate code for a parallel
5308 loop with any schedule. Given parameters:
5310 for (V = N1; V cond N2; V += STEP) BODY;
5312 where COND is "<" or ">", we generate pseudocode
5314 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5315 if (more) goto L0; else goto L3;
5322 if (V cond iend) goto L1; else goto L2;
5324 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5327 If this is a combined omp parallel loop, instead of the call to
5328 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5329 If this is gimple_omp_for_combined_p loop, then instead of assigning
5330 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5331 inner GIMPLE_OMP_FOR and V += STEP; and
5332 if (V cond iend) goto L1; else goto L2; are removed.
5334 For collapsed loops, given parameters:
5336 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5337 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5338 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5341 we generate pseudocode
5343 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5348 count3 = (adj + N32 - N31) / STEP3;
5349 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5354 count2 = (adj + N22 - N21) / STEP2;
5355 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5360 count1 = (adj + N12 - N11) / STEP1;
5361 count = count1 * count2 * count3;
5366 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5367 if (more) goto L0; else goto L3;
5371 V3 = N31 + (T % count3) * STEP3;
5373 V2 = N21 + (T % count2) * STEP2;
5375 V1 = N11 + T * STEP1;
5380 if (V < iend) goto L10; else goto L2;
5383 if (V3 cond3 N32) goto L1; else goto L11;
5387 if (V2 cond2 N22) goto L1; else goto L12;
5393 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5399 expand_omp_for_generic (struct omp_region
*region
,
5400 struct omp_for_data
*fd
,
5401 enum built_in_function start_fn
,
5402 enum built_in_function next_fn
,
5405 tree type
, istart0
, iend0
, iend
;
5406 tree t
, vmain
, vback
, bias
= NULL_TREE
;
5407 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
5408 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
5409 gimple_stmt_iterator gsi
;
5411 bool in_combined_parallel
= is_combined_parallel (region
);
5412 bool broken_loop
= region
->cont
== NULL
;
5414 tree
*counts
= NULL
;
5417 gcc_assert (!broken_loop
|| !in_combined_parallel
);
5418 gcc_assert (fd
->iter_type
== long_integer_type_node
5419 || !in_combined_parallel
);
5421 type
= TREE_TYPE (fd
->loop
.v
);
5422 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
5423 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
5424 TREE_ADDRESSABLE (istart0
) = 1;
5425 TREE_ADDRESSABLE (iend0
) = 1;
5427 /* See if we need to bias by LLONG_MIN. */
5428 if (fd
->iter_type
== long_long_unsigned_type_node
5429 && TREE_CODE (type
) == INTEGER_TYPE
5430 && !TYPE_UNSIGNED (type
))
5434 if (fd
->loop
.cond_code
== LT_EXPR
)
5437 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
5441 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
5444 if (TREE_CODE (n1
) != INTEGER_CST
5445 || TREE_CODE (n2
) != INTEGER_CST
5446 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
5447 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
5450 entry_bb
= region
->entry
;
5451 cont_bb
= region
->cont
;
5453 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
5454 gcc_assert (broken_loop
5455 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
5456 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
5457 l1_bb
= single_succ (l0_bb
);
5460 l2_bb
= create_empty_bb (cont_bb
);
5461 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
5462 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
5466 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
5467 exit_bb
= region
->exit
;
5469 gsi
= gsi_last_bb (entry_bb
);
5471 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
5472 if (fd
->collapse
> 1)
5474 int first_zero_iter
= -1;
5475 basic_block zero_iter_bb
= NULL
, l2_dom_bb
= NULL
;
5477 counts
= XALLOCAVEC (tree
, fd
->collapse
);
5478 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
5479 zero_iter_bb
, first_zero_iter
,
5484 /* Some counts[i] vars might be uninitialized if
5485 some loop has zero iterations. But the body shouldn't
5486 be executed in that case, so just avoid uninit warnings. */
5487 for (i
= first_zero_iter
; i
< fd
->collapse
; i
++)
5488 if (SSA_VAR_P (counts
[i
]))
5489 TREE_NO_WARNING (counts
[i
]) = 1;
5491 e
= split_block (entry_bb
, gsi_stmt (gsi
));
5493 make_edge (zero_iter_bb
, entry_bb
, EDGE_FALLTHRU
);
5494 gsi
= gsi_last_bb (entry_bb
);
5495 set_immediate_dominator (CDI_DOMINATORS
, entry_bb
,
5496 get_immediate_dominator (CDI_DOMINATORS
,
5500 if (in_combined_parallel
)
5502 /* In a combined parallel loop, emit a call to
5503 GOMP_loop_foo_next. */
5504 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
5505 build_fold_addr_expr (istart0
),
5506 build_fold_addr_expr (iend0
));
5510 tree t0
, t1
, t2
, t3
, t4
;
5511 /* If this is not a combined parallel loop, emit a call to
5512 GOMP_loop_foo_start in ENTRY_BB. */
5513 t4
= build_fold_addr_expr (iend0
);
5514 t3
= build_fold_addr_expr (istart0
);
5515 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
5518 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
5520 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
5521 OMP_CLAUSE__LOOPTEMP_
);
5522 gcc_assert (innerc
);
5523 t0
= OMP_CLAUSE_DECL (innerc
);
5524 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
5525 OMP_CLAUSE__LOOPTEMP_
);
5526 gcc_assert (innerc
);
5527 t1
= OMP_CLAUSE_DECL (innerc
);
5529 if (POINTER_TYPE_P (TREE_TYPE (t0
))
5530 && TYPE_PRECISION (TREE_TYPE (t0
))
5531 != TYPE_PRECISION (fd
->iter_type
))
5533 /* Avoid casting pointers to integer of a different size. */
5534 tree itype
= signed_type_for (type
);
5535 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, t1
));
5536 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, t0
));
5540 t1
= fold_convert (fd
->iter_type
, t1
);
5541 t0
= fold_convert (fd
->iter_type
, t0
);
5545 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
5546 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
5548 if (fd
->iter_type
== long_integer_type_node
)
5552 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
5553 t
= build_call_expr (builtin_decl_explicit (start_fn
),
5554 6, t0
, t1
, t2
, t
, t3
, t4
);
5557 t
= build_call_expr (builtin_decl_explicit (start_fn
),
5558 5, t0
, t1
, t2
, t3
, t4
);
5566 /* The GOMP_loop_ull_*start functions have additional boolean
5567 argument, true for < loops and false for > loops.
5568 In Fortran, the C bool type can be different from
5569 boolean_type_node. */
5570 bfn_decl
= builtin_decl_explicit (start_fn
);
5571 c_bool_type
= TREE_TYPE (TREE_TYPE (bfn_decl
));
5572 t5
= build_int_cst (c_bool_type
,
5573 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
5576 tree bfn_decl
= builtin_decl_explicit (start_fn
);
5577 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
5578 t
= build_call_expr (bfn_decl
, 7, t5
, t0
, t1
, t2
, t
, t3
, t4
);
5581 t
= build_call_expr (builtin_decl_explicit (start_fn
),
5582 6, t5
, t0
, t1
, t2
, t3
, t4
);
5585 if (TREE_TYPE (t
) != boolean_type_node
)
5586 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
5587 t
, build_int_cst (TREE_TYPE (t
), 0));
5588 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5589 true, GSI_SAME_STMT
);
5590 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
5592 /* Remove the GIMPLE_OMP_FOR statement. */
5593 gsi_remove (&gsi
, true);
5595 /* Iteration setup for sequential loop goes in L0_BB. */
5596 tree startvar
= fd
->loop
.v
;
5597 tree endvar
= NULL_TREE
;
5599 if (gimple_omp_for_combined_p (fd
->for_stmt
))
5601 gcc_assert (gimple_code (inner_stmt
) == GIMPLE_OMP_FOR
5602 && gimple_omp_for_kind (inner_stmt
)
5603 == GF_OMP_FOR_KIND_SIMD
);
5604 tree innerc
= find_omp_clause (gimple_omp_for_clauses (inner_stmt
),
5605 OMP_CLAUSE__LOOPTEMP_
);
5606 gcc_assert (innerc
);
5607 startvar
= OMP_CLAUSE_DECL (innerc
);
5608 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
5609 OMP_CLAUSE__LOOPTEMP_
);
5610 gcc_assert (innerc
);
5611 endvar
= OMP_CLAUSE_DECL (innerc
);
5614 gsi
= gsi_start_bb (l0_bb
);
5617 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
5618 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
5619 t
= fold_convert (signed_type_for (TREE_TYPE (startvar
)), t
);
5620 t
= fold_convert (TREE_TYPE (startvar
), t
);
5621 t
= force_gimple_operand_gsi (&gsi
, t
,
5623 && TREE_ADDRESSABLE (startvar
),
5624 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
5625 stmt
= gimple_build_assign (startvar
, t
);
5626 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5630 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
5631 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
5632 t
= fold_convert (signed_type_for (TREE_TYPE (startvar
)), t
);
5633 t
= fold_convert (TREE_TYPE (startvar
), t
);
5634 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5635 false, GSI_CONTINUE_LINKING
);
5638 stmt
= gimple_build_assign (endvar
, iend
);
5639 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5640 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (iend
)))
5641 stmt
= gimple_build_assign (fd
->loop
.v
, iend
);
5643 stmt
= gimple_build_assign_with_ops (NOP_EXPR
, fd
->loop
.v
, iend
,
5645 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5647 if (fd
->collapse
> 1)
5648 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
5652 /* Code to control the increment and predicate for the sequential
5653 loop goes in the CONT_BB. */
5654 gsi
= gsi_last_bb (cont_bb
);
5655 stmt
= gsi_stmt (gsi
);
5656 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
5657 vmain
= gimple_omp_continue_control_use (stmt
);
5658 vback
= gimple_omp_continue_control_def (stmt
);
5660 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
5662 if (POINTER_TYPE_P (type
))
5663 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
5665 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
5666 t
= force_gimple_operand_gsi (&gsi
, t
,
5668 && TREE_ADDRESSABLE (vback
),
5669 NULL_TREE
, true, GSI_SAME_STMT
);
5670 stmt
= gimple_build_assign (vback
, t
);
5671 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
5673 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
5674 DECL_P (vback
) && TREE_ADDRESSABLE (vback
) ? t
: vback
,
5676 stmt
= gimple_build_cond_empty (t
);
5677 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
5680 /* Remove GIMPLE_OMP_CONTINUE. */
5681 gsi_remove (&gsi
, true);
5683 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
5684 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, l1_bb
);
5686 /* Emit code to get the next parallel iteration in L2_BB. */
5687 gsi
= gsi_start_bb (l2_bb
);
5689 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
5690 build_fold_addr_expr (istart0
),
5691 build_fold_addr_expr (iend0
));
5692 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5693 false, GSI_CONTINUE_LINKING
);
5694 if (TREE_TYPE (t
) != boolean_type_node
)
5695 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
5696 t
, build_int_cst (TREE_TYPE (t
), 0));
5697 stmt
= gimple_build_cond_empty (t
);
5698 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5701 /* Add the loop cleanup function. */
5702 gsi
= gsi_last_bb (exit_bb
);
5703 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
5704 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT
);
5705 else if (gimple_omp_return_lhs (gsi_stmt (gsi
)))
5706 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL
);
5708 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END
);
5709 stmt
= gimple_build_call (t
, 0);
5710 if (gimple_omp_return_lhs (gsi_stmt (gsi
)))
5711 gimple_call_set_lhs (stmt
, gimple_omp_return_lhs (gsi_stmt (gsi
)));
5712 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
5713 gsi_remove (&gsi
, true);
5715 /* Connect the new blocks. */
5716 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
5717 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
5723 e
= find_edge (cont_bb
, l3_bb
);
5724 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
5726 phis
= phi_nodes (l3_bb
);
5727 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5729 gimple phi
= gsi_stmt (gsi
);
5730 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
5731 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
5735 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
5736 add_bb_to_loop (l2_bb
, cont_bb
->loop_father
);
5737 e
= find_edge (cont_bb
, l1_bb
);
5738 if (gimple_omp_for_combined_p (fd
->for_stmt
))
5743 else if (fd
->collapse
> 1)
5746 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
5749 e
->flags
= EDGE_TRUE_VALUE
;
5752 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
5753 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
5757 e
= find_edge (cont_bb
, l2_bb
);
5758 e
->flags
= EDGE_FALLTHRU
;
5760 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
5762 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
5763 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
5764 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
5765 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
5766 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
5767 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
5768 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
5769 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
5771 struct loop
*outer_loop
= alloc_loop ();
5772 outer_loop
->header
= l0_bb
;
5773 outer_loop
->latch
= l2_bb
;
5774 add_loop (outer_loop
, l0_bb
->loop_father
);
5776 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
5778 struct loop
*loop
= alloc_loop ();
5779 loop
->header
= l1_bb
;
5780 /* The loop may have multiple latches. */
5781 add_loop (loop
, outer_loop
);
5787 /* A subroutine of expand_omp_for. Generate code for a parallel
5788 loop with static schedule and no specified chunk size. Given
5791 for (V = N1; V cond N2; V += STEP) BODY;
5793 where COND is "<" or ">", we generate pseudocode
5795 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5800 if ((__typeof (V)) -1 > 0 && cond is >)
5801 n = -(adj + N2 - N1) / -STEP;
5803 n = (adj + N2 - N1) / STEP;
5806 if (threadid < tt) goto L3; else goto L4;
5811 s0 = q * threadid + tt;
5814 if (s0 >= e0) goto L2; else goto L0;
5820 if (V cond e) goto L1;
5825 expand_omp_for_static_nochunk (struct omp_region
*region
,
5826 struct omp_for_data
*fd
,
5829 tree n
, q
, s0
, e0
, e
, t
, tt
, nthreads
, threadid
;
5830 tree type
, itype
, vmain
, vback
;
5831 basic_block entry_bb
, second_bb
, third_bb
, exit_bb
, seq_start_bb
;
5832 basic_block body_bb
, cont_bb
, collapse_bb
= NULL
;
5834 gimple_stmt_iterator gsi
;
5837 enum built_in_function get_num_threads
= BUILT_IN_OMP_GET_NUM_THREADS
;
5838 enum built_in_function get_thread_num
= BUILT_IN_OMP_GET_THREAD_NUM
;
5839 bool broken_loop
= region
->cont
== NULL
;
5840 tree
*counts
= NULL
;
5843 itype
= type
= TREE_TYPE (fd
->loop
.v
);
5844 if (POINTER_TYPE_P (type
))
5845 itype
= signed_type_for (type
);
5847 entry_bb
= region
->entry
;
5848 cont_bb
= region
->cont
;
5849 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
5850 fin_bb
= BRANCH_EDGE (entry_bb
)->dest
;
5851 gcc_assert (broken_loop
5852 || (fin_bb
== FALLTHRU_EDGE (cont_bb
)->dest
));
5853 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
5854 body_bb
= single_succ (seq_start_bb
);
5857 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
5858 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
5860 exit_bb
= region
->exit
;
5862 /* Iteration space partitioning goes in ENTRY_BB. */
5863 gsi
= gsi_last_bb (entry_bb
);
5864 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
5866 if (gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_DISTRIBUTE
)
5868 get_num_threads
= BUILT_IN_OMP_GET_NUM_TEAMS
;
5869 get_thread_num
= BUILT_IN_OMP_GET_TEAM_NUM
;
5872 if (fd
->collapse
> 1)
5874 int first_zero_iter
= -1;
5875 basic_block l2_dom_bb
= NULL
;
5877 counts
= XALLOCAVEC (tree
, fd
->collapse
);
5878 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
5879 fin_bb
, first_zero_iter
,
5883 else if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
5884 t
= integer_one_node
;
5886 t
= fold_binary (fd
->loop
.cond_code
, boolean_type_node
,
5887 fold_convert (type
, fd
->loop
.n1
),
5888 fold_convert (type
, fd
->loop
.n2
));
5889 if (fd
->collapse
== 1
5890 && TYPE_UNSIGNED (type
)
5891 && (t
== NULL_TREE
|| !integer_onep (t
)))
5893 n1
= fold_convert (type
, unshare_expr (fd
->loop
.n1
));
5894 n1
= force_gimple_operand_gsi (&gsi
, n1
, true, NULL_TREE
,
5895 true, GSI_SAME_STMT
);
5896 n2
= fold_convert (type
, unshare_expr (fd
->loop
.n2
));
5897 n2
= force_gimple_operand_gsi (&gsi
, n2
, true, NULL_TREE
,
5898 true, GSI_SAME_STMT
);
5899 stmt
= gimple_build_cond (fd
->loop
.cond_code
, n1
, n2
,
5900 NULL_TREE
, NULL_TREE
);
5901 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
5902 if (walk_tree (gimple_cond_lhs_ptr (stmt
),
5903 expand_omp_regimplify_p
, NULL
, NULL
)
5904 || walk_tree (gimple_cond_rhs_ptr (stmt
),
5905 expand_omp_regimplify_p
, NULL
, NULL
))
5907 gsi
= gsi_for_stmt (stmt
);
5908 gimple_regimplify_operands (stmt
, &gsi
);
5910 ep
= split_block (entry_bb
, stmt
);
5911 ep
->flags
= EDGE_TRUE_VALUE
;
5912 entry_bb
= ep
->dest
;
5913 ep
->probability
= REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
/ 2000 - 1);
5914 ep
= make_edge (ep
->src
, fin_bb
, EDGE_FALSE_VALUE
);
5915 ep
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
5916 if (gimple_in_ssa_p (cfun
))
5918 int dest_idx
= find_edge (entry_bb
, fin_bb
)->dest_idx
;
5919 for (gsi
= gsi_start_phis (fin_bb
);
5920 !gsi_end_p (gsi
); gsi_next (&gsi
))
5922 gimple phi
= gsi_stmt (gsi
);
5923 add_phi_arg (phi
, gimple_phi_arg_def (phi
, dest_idx
),
5924 ep
, UNKNOWN_LOCATION
);
5927 gsi
= gsi_last_bb (entry_bb
);
5930 t
= build_call_expr (builtin_decl_explicit (get_num_threads
), 0);
5931 t
= fold_convert (itype
, t
);
5932 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5933 true, GSI_SAME_STMT
);
5935 t
= build_call_expr (builtin_decl_explicit (get_thread_num
), 0);
5936 t
= fold_convert (itype
, t
);
5937 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5938 true, GSI_SAME_STMT
);
5942 step
= fd
->loop
.step
;
5943 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
5945 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
5946 OMP_CLAUSE__LOOPTEMP_
);
5947 gcc_assert (innerc
);
5948 n1
= OMP_CLAUSE_DECL (innerc
);
5949 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
5950 OMP_CLAUSE__LOOPTEMP_
);
5951 gcc_assert (innerc
);
5952 n2
= OMP_CLAUSE_DECL (innerc
);
5954 n1
= force_gimple_operand_gsi (&gsi
, fold_convert (type
, n1
),
5955 true, NULL_TREE
, true, GSI_SAME_STMT
);
5956 n2
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, n2
),
5957 true, NULL_TREE
, true, GSI_SAME_STMT
);
5958 step
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, step
),
5959 true, NULL_TREE
, true, GSI_SAME_STMT
);
5961 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
5962 t
= fold_build2 (PLUS_EXPR
, itype
, step
, t
);
5963 t
= fold_build2 (PLUS_EXPR
, itype
, t
, n2
);
5964 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, n1
));
5965 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
5966 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
5967 fold_build1 (NEGATE_EXPR
, itype
, t
),
5968 fold_build1 (NEGATE_EXPR
, itype
, step
));
5970 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, step
);
5971 t
= fold_convert (itype
, t
);
5972 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5974 q
= create_tmp_reg (itype
, "q");
5975 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
5976 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
5977 gsi_insert_before (&gsi
, gimple_build_assign (q
, t
), GSI_SAME_STMT
);
5979 tt
= create_tmp_reg (itype
, "tt");
5980 t
= fold_build2 (TRUNC_MOD_EXPR
, itype
, n
, nthreads
);
5981 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
5982 gsi_insert_before (&gsi
, gimple_build_assign (tt
, t
), GSI_SAME_STMT
);
5984 t
= build2 (LT_EXPR
, boolean_type_node
, threadid
, tt
);
5985 stmt
= gimple_build_cond_empty (t
);
5986 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
5988 second_bb
= split_block (entry_bb
, stmt
)->dest
;
5989 gsi
= gsi_last_bb (second_bb
);
5990 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
5992 gsi_insert_before (&gsi
, gimple_build_assign (tt
, build_int_cst (itype
, 0)),
5994 stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, q
, q
,
5995 build_int_cst (itype
, 1));
5996 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
5998 third_bb
= split_block (second_bb
, stmt
)->dest
;
5999 gsi
= gsi_last_bb (third_bb
);
6000 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
6002 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
6003 t
= build2 (PLUS_EXPR
, itype
, t
, tt
);
6004 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6006 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
6007 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6009 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
6010 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
6012 /* Remove the GIMPLE_OMP_FOR statement. */
6013 gsi_remove (&gsi
, true);
6015 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6016 gsi
= gsi_start_bb (seq_start_bb
);
6018 tree startvar
= fd
->loop
.v
;
6019 tree endvar
= NULL_TREE
;
6021 if (gimple_omp_for_combined_p (fd
->for_stmt
))
6023 tree clauses
= gimple_code (inner_stmt
) == GIMPLE_OMP_PARALLEL
6024 ? gimple_omp_parallel_clauses (inner_stmt
)
6025 : gimple_omp_for_clauses (inner_stmt
);
6026 tree innerc
= find_omp_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
6027 gcc_assert (innerc
);
6028 startvar
= OMP_CLAUSE_DECL (innerc
);
6029 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
6030 OMP_CLAUSE__LOOPTEMP_
);
6031 gcc_assert (innerc
);
6032 endvar
= OMP_CLAUSE_DECL (innerc
);
6034 t
= fold_convert (itype
, s0
);
6035 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
6036 if (POINTER_TYPE_P (type
))
6037 t
= fold_build_pointer_plus (n1
, t
);
6039 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
6040 t
= fold_convert (TREE_TYPE (startvar
), t
);
6041 t
= force_gimple_operand_gsi (&gsi
, t
,
6043 && TREE_ADDRESSABLE (startvar
),
6044 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
6045 stmt
= gimple_build_assign (startvar
, t
);
6046 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
6048 t
= fold_convert (itype
, e0
);
6049 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
6050 if (POINTER_TYPE_P (type
))
6051 t
= fold_build_pointer_plus (n1
, t
);
6053 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
6054 t
= fold_convert (TREE_TYPE (startvar
), t
);
6055 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6056 false, GSI_CONTINUE_LINKING
);
6059 stmt
= gimple_build_assign (endvar
, e
);
6060 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
6061 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (e
)))
6062 stmt
= gimple_build_assign (fd
->loop
.v
, e
);
6064 stmt
= gimple_build_assign_with_ops (NOP_EXPR
, fd
->loop
.v
, e
,
6066 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
6068 if (fd
->collapse
> 1)
6069 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
6073 /* The code controlling the sequential loop replaces the
6074 GIMPLE_OMP_CONTINUE. */
6075 gsi
= gsi_last_bb (cont_bb
);
6076 stmt
= gsi_stmt (gsi
);
6077 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
6078 vmain
= gimple_omp_continue_control_use (stmt
);
6079 vback
= gimple_omp_continue_control_def (stmt
);
6081 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
6083 if (POINTER_TYPE_P (type
))
6084 t
= fold_build_pointer_plus (vmain
, step
);
6086 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, step
);
6087 t
= force_gimple_operand_gsi (&gsi
, t
,
6089 && TREE_ADDRESSABLE (vback
),
6090 NULL_TREE
, true, GSI_SAME_STMT
);
6091 stmt
= gimple_build_assign (vback
, t
);
6092 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
6094 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
6095 DECL_P (vback
) && TREE_ADDRESSABLE (vback
)
6097 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
6100 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6101 gsi_remove (&gsi
, true);
6103 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
6104 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, body_bb
);
6107 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6108 gsi
= gsi_last_bb (exit_bb
);
6109 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
6111 t
= gimple_omp_return_lhs (gsi_stmt (gsi
));
6112 gsi_insert_after (&gsi
, build_omp_barrier (t
), GSI_SAME_STMT
);
6114 gsi_remove (&gsi
, true);
6116 /* Connect all the blocks. */
6117 ep
= make_edge (entry_bb
, third_bb
, EDGE_FALSE_VALUE
);
6118 ep
->probability
= REG_BR_PROB_BASE
/ 4 * 3;
6119 ep
= find_edge (entry_bb
, second_bb
);
6120 ep
->flags
= EDGE_TRUE_VALUE
;
6121 ep
->probability
= REG_BR_PROB_BASE
/ 4;
6122 find_edge (third_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
6123 find_edge (third_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
6127 ep
= find_edge (cont_bb
, body_bb
);
6128 if (gimple_omp_for_combined_p (fd
->for_stmt
))
6133 else if (fd
->collapse
> 1)
6136 ep
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
6139 ep
->flags
= EDGE_TRUE_VALUE
;
6140 find_edge (cont_bb
, fin_bb
)->flags
6141 = ep
? EDGE_FALSE_VALUE
: EDGE_FALLTHRU
;
6144 set_immediate_dominator (CDI_DOMINATORS
, second_bb
, entry_bb
);
6145 set_immediate_dominator (CDI_DOMINATORS
, third_bb
, entry_bb
);
6146 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, third_bb
);
6148 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
6149 recompute_dominator (CDI_DOMINATORS
, body_bb
));
6150 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
6151 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
6153 if (!broken_loop
&& !gimple_omp_for_combined_p (fd
->for_stmt
))
6155 struct loop
*loop
= alloc_loop ();
6156 loop
->header
= body_bb
;
6157 if (collapse_bb
== NULL
)
6158 loop
->latch
= cont_bb
;
6159 add_loop (loop
, body_bb
->loop_father
);
6164 /* A subroutine of expand_omp_for. Generate code for a parallel
6165 loop with static schedule and a specified chunk size. Given
6168 for (V = N1; V cond N2; V += STEP) BODY;
6170 where COND is "<" or ">", we generate pseudocode
6172 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6177 if ((__typeof (V)) -1 > 0 && cond is >)
6178 n = -(adj + N2 - N1) / -STEP;
6180 n = (adj + N2 - N1) / STEP;
6182 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6183 here so that V is defined
6184 if the loop is not entered
6186 s0 = (trip * nthreads + threadid) * CHUNK;
6187 e0 = min(s0 + CHUNK, n);
6188 if (s0 < n) goto L1; else goto L4;
6195 if (V cond e) goto L2; else goto L3;
6203 expand_omp_for_static_chunk (struct omp_region
*region
,
6204 struct omp_for_data
*fd
, gimple inner_stmt
)
6206 tree n
, s0
, e0
, e
, t
;
6207 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
6208 tree type
, itype
, vmain
, vback
, vextra
;
6209 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
6210 basic_block trip_update_bb
= NULL
, cont_bb
, collapse_bb
= NULL
, fin_bb
;
6211 gimple_stmt_iterator gsi
;
6214 enum built_in_function get_num_threads
= BUILT_IN_OMP_GET_NUM_THREADS
;
6215 enum built_in_function get_thread_num
= BUILT_IN_OMP_GET_THREAD_NUM
;
6216 bool broken_loop
= region
->cont
== NULL
;
6217 tree
*counts
= NULL
;
6220 itype
= type
= TREE_TYPE (fd
->loop
.v
);
6221 if (POINTER_TYPE_P (type
))
6222 itype
= signed_type_for (type
);
6224 entry_bb
= region
->entry
;
6225 se
= split_block (entry_bb
, last_stmt (entry_bb
));
6227 iter_part_bb
= se
->dest
;
6228 cont_bb
= region
->cont
;
6229 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
6230 fin_bb
= BRANCH_EDGE (iter_part_bb
)->dest
;
6231 gcc_assert (broken_loop
6232 || fin_bb
== FALLTHRU_EDGE (cont_bb
)->dest
);
6233 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
6234 body_bb
= single_succ (seq_start_bb
);
6237 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
6238 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
6239 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
6241 exit_bb
= region
->exit
;
6243 /* Trip and adjustment setup goes in ENTRY_BB. */
6244 gsi
= gsi_last_bb (entry_bb
);
6245 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
6247 if (gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_DISTRIBUTE
)
6249 get_num_threads
= BUILT_IN_OMP_GET_NUM_TEAMS
;
6250 get_thread_num
= BUILT_IN_OMP_GET_TEAM_NUM
;
6253 if (fd
->collapse
> 1)
6255 int first_zero_iter
= -1;
6256 basic_block l2_dom_bb
= NULL
;
6258 counts
= XALLOCAVEC (tree
, fd
->collapse
);
6259 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
6260 fin_bb
, first_zero_iter
,
6264 else if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
6265 t
= integer_one_node
;
6267 t
= fold_binary (fd
->loop
.cond_code
, boolean_type_node
,
6268 fold_convert (type
, fd
->loop
.n1
),
6269 fold_convert (type
, fd
->loop
.n2
));
6270 if (fd
->collapse
== 1
6271 && TYPE_UNSIGNED (type
)
6272 && (t
== NULL_TREE
|| !integer_onep (t
)))
6274 n1
= fold_convert (type
, unshare_expr (fd
->loop
.n1
));
6275 n1
= force_gimple_operand_gsi (&gsi
, n1
, true, NULL_TREE
,
6276 true, GSI_SAME_STMT
);
6277 n2
= fold_convert (type
, unshare_expr (fd
->loop
.n2
));
6278 n2
= force_gimple_operand_gsi (&gsi
, n2
, true, NULL_TREE
,
6279 true, GSI_SAME_STMT
);
6280 stmt
= gimple_build_cond (fd
->loop
.cond_code
, n1
, n2
,
6281 NULL_TREE
, NULL_TREE
);
6282 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
6283 if (walk_tree (gimple_cond_lhs_ptr (stmt
),
6284 expand_omp_regimplify_p
, NULL
, NULL
)
6285 || walk_tree (gimple_cond_rhs_ptr (stmt
),
6286 expand_omp_regimplify_p
, NULL
, NULL
))
6288 gsi
= gsi_for_stmt (stmt
);
6289 gimple_regimplify_operands (stmt
, &gsi
);
6291 se
= split_block (entry_bb
, stmt
);
6292 se
->flags
= EDGE_TRUE_VALUE
;
6293 entry_bb
= se
->dest
;
6294 se
->probability
= REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
/ 2000 - 1);
6295 se
= make_edge (se
->src
, fin_bb
, EDGE_FALSE_VALUE
);
6296 se
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
6297 if (gimple_in_ssa_p (cfun
))
6299 int dest_idx
= find_edge (entry_bb
, fin_bb
)->dest_idx
;
6300 for (gsi
= gsi_start_phis (fin_bb
);
6301 !gsi_end_p (gsi
); gsi_next (&gsi
))
6303 gimple phi
= gsi_stmt (gsi
);
6304 add_phi_arg (phi
, gimple_phi_arg_def (phi
, dest_idx
),
6305 se
, UNKNOWN_LOCATION
);
6308 gsi
= gsi_last_bb (entry_bb
);
6311 t
= build_call_expr (builtin_decl_explicit (get_num_threads
), 0);
6312 t
= fold_convert (itype
, t
);
6313 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6314 true, GSI_SAME_STMT
);
6316 t
= build_call_expr (builtin_decl_explicit (get_thread_num
), 0);
6317 t
= fold_convert (itype
, t
);
6318 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6319 true, GSI_SAME_STMT
);
6323 step
= fd
->loop
.step
;
6324 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
6326 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
6327 OMP_CLAUSE__LOOPTEMP_
);
6328 gcc_assert (innerc
);
6329 n1
= OMP_CLAUSE_DECL (innerc
);
6330 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
6331 OMP_CLAUSE__LOOPTEMP_
);
6332 gcc_assert (innerc
);
6333 n2
= OMP_CLAUSE_DECL (innerc
);
6335 n1
= force_gimple_operand_gsi (&gsi
, fold_convert (type
, n1
),
6336 true, NULL_TREE
, true, GSI_SAME_STMT
);
6337 n2
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, n2
),
6338 true, NULL_TREE
, true, GSI_SAME_STMT
);
6339 step
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, step
),
6340 true, NULL_TREE
, true, GSI_SAME_STMT
);
6342 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->chunk_size
),
6343 true, NULL_TREE
, true, GSI_SAME_STMT
);
6345 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
6346 t
= fold_build2 (PLUS_EXPR
, itype
, step
, t
);
6347 t
= fold_build2 (PLUS_EXPR
, itype
, t
, n2
);
6348 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, n1
));
6349 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
6350 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
6351 fold_build1 (NEGATE_EXPR
, itype
, t
),
6352 fold_build1 (NEGATE_EXPR
, itype
, step
));
6354 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, step
);
6355 t
= fold_convert (itype
, t
);
6356 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6357 true, GSI_SAME_STMT
);
6359 trip_var
= create_tmp_reg (itype
, ".trip");
6360 if (gimple_in_ssa_p (cfun
))
6362 trip_init
= make_ssa_name (trip_var
, NULL
);
6363 trip_main
= make_ssa_name (trip_var
, NULL
);
6364 trip_back
= make_ssa_name (trip_var
, NULL
);
6368 trip_init
= trip_var
;
6369 trip_main
= trip_var
;
6370 trip_back
= trip_var
;
6373 stmt
= gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
6374 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
6376 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
6377 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
6378 if (POINTER_TYPE_P (type
))
6379 t
= fold_build_pointer_plus (n1
, t
);
6381 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
6382 vextra
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6383 true, GSI_SAME_STMT
);
6385 /* Remove the GIMPLE_OMP_FOR. */
6386 gsi_remove (&gsi
, true);
6388 /* Iteration space partitioning goes in ITER_PART_BB. */
6389 gsi
= gsi_last_bb (iter_part_bb
);
6391 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
6392 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
6393 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
6394 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6395 false, GSI_CONTINUE_LINKING
);
6397 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
6398 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
6399 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6400 false, GSI_CONTINUE_LINKING
);
6402 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
6403 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
6405 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6406 gsi
= gsi_start_bb (seq_start_bb
);
6408 tree startvar
= fd
->loop
.v
;
6409 tree endvar
= NULL_TREE
;
6411 if (gimple_omp_for_combined_p (fd
->for_stmt
))
6413 tree clauses
= gimple_code (inner_stmt
) == GIMPLE_OMP_PARALLEL
6414 ? gimple_omp_parallel_clauses (inner_stmt
)
6415 : gimple_omp_for_clauses (inner_stmt
);
6416 tree innerc
= find_omp_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
6417 gcc_assert (innerc
);
6418 startvar
= OMP_CLAUSE_DECL (innerc
);
6419 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
6420 OMP_CLAUSE__LOOPTEMP_
);
6421 gcc_assert (innerc
);
6422 endvar
= OMP_CLAUSE_DECL (innerc
);
6425 t
= fold_convert (itype
, s0
);
6426 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
6427 if (POINTER_TYPE_P (type
))
6428 t
= fold_build_pointer_plus (n1
, t
);
6430 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
6431 t
= fold_convert (TREE_TYPE (startvar
), t
);
6432 t
= force_gimple_operand_gsi (&gsi
, t
,
6434 && TREE_ADDRESSABLE (startvar
),
6435 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
6436 stmt
= gimple_build_assign (startvar
, t
);
6437 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
6439 t
= fold_convert (itype
, e0
);
6440 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
6441 if (POINTER_TYPE_P (type
))
6442 t
= fold_build_pointer_plus (n1
, t
);
6444 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
6445 t
= fold_convert (TREE_TYPE (startvar
), t
);
6446 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6447 false, GSI_CONTINUE_LINKING
);
6450 stmt
= gimple_build_assign (endvar
, e
);
6451 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
6452 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (e
)))
6453 stmt
= gimple_build_assign (fd
->loop
.v
, e
);
6455 stmt
= gimple_build_assign_with_ops (NOP_EXPR
, fd
->loop
.v
, e
,
6457 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
6459 if (fd
->collapse
> 1)
6460 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
6464 /* The code controlling the sequential loop goes in CONT_BB,
6465 replacing the GIMPLE_OMP_CONTINUE. */
6466 gsi
= gsi_last_bb (cont_bb
);
6467 stmt
= gsi_stmt (gsi
);
6468 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
6469 vmain
= gimple_omp_continue_control_use (stmt
);
6470 vback
= gimple_omp_continue_control_def (stmt
);
6472 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
6474 if (POINTER_TYPE_P (type
))
6475 t
= fold_build_pointer_plus (vmain
, step
);
6477 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, step
);
6478 if (DECL_P (vback
) && TREE_ADDRESSABLE (vback
))
6479 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6480 true, GSI_SAME_STMT
);
6481 stmt
= gimple_build_assign (vback
, t
);
6482 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
6484 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
6485 DECL_P (vback
) && TREE_ADDRESSABLE (vback
)
6487 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
6490 /* Remove GIMPLE_OMP_CONTINUE. */
6491 gsi_remove (&gsi
, true);
6493 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
6494 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, body_bb
);
6496 /* Trip update code goes into TRIP_UPDATE_BB. */
6497 gsi
= gsi_start_bb (trip_update_bb
);
6499 t
= build_int_cst (itype
, 1);
6500 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
6501 stmt
= gimple_build_assign (trip_back
, t
);
6502 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
6505 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6506 gsi
= gsi_last_bb (exit_bb
);
6507 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
6509 t
= gimple_omp_return_lhs (gsi_stmt (gsi
));
6510 gsi_insert_after (&gsi
, build_omp_barrier (t
), GSI_SAME_STMT
);
6512 gsi_remove (&gsi
, true);
6514 /* Connect the new blocks. */
6515 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
6516 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
6520 se
= find_edge (cont_bb
, body_bb
);
6521 if (gimple_omp_for_combined_p (fd
->for_stmt
))
6526 else if (fd
->collapse
> 1)
6529 se
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
6532 se
->flags
= EDGE_TRUE_VALUE
;
6533 find_edge (cont_bb
, trip_update_bb
)->flags
6534 = se
? EDGE_FALSE_VALUE
: EDGE_FALLTHRU
;
6536 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
6539 if (gimple_in_ssa_p (cfun
))
6541 gimple_stmt_iterator psi
;
6544 edge_var_map_vector
*head
;
6548 gcc_assert (fd
->collapse
== 1 && !broken_loop
);
6550 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6551 remove arguments of the phi nodes in fin_bb. We need to create
6552 appropriate phi nodes in iter_part_bb instead. */
6553 se
= single_pred_edge (fin_bb
);
6554 re
= single_succ_edge (trip_update_bb
);
6555 head
= redirect_edge_var_map_vector (re
);
6556 ene
= single_succ_edge (entry_bb
);
6558 psi
= gsi_start_phis (fin_bb
);
6559 for (i
= 0; !gsi_end_p (psi
) && head
->iterate (i
, &vm
);
6560 gsi_next (&psi
), ++i
)
6563 source_location locus
;
6565 phi
= gsi_stmt (psi
);
6566 t
= gimple_phi_result (phi
);
6567 gcc_assert (t
== redirect_edge_var_map_result (vm
));
6568 nphi
= create_phi_node (t
, iter_part_bb
);
6570 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
6571 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
6573 /* A special case -- fd->loop.v is not yet computed in
6574 iter_part_bb, we need to use vextra instead. */
6575 if (t
== fd
->loop
.v
)
6577 add_phi_arg (nphi
, t
, ene
, locus
);
6578 locus
= redirect_edge_var_map_location (vm
);
6579 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
6581 gcc_assert (!gsi_end_p (psi
) && i
== head
->length ());
6582 redirect_edge_var_map_clear (re
);
6585 psi
= gsi_start_phis (fin_bb
);
6586 if (gsi_end_p (psi
))
6588 remove_phi_node (&psi
, false);
6591 /* Make phi node for trip. */
6592 phi
= create_phi_node (trip_main
, iter_part_bb
);
6593 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
6595 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
6600 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
6601 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
6602 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
6603 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
6604 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
6605 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
6606 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
6607 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
6608 recompute_dominator (CDI_DOMINATORS
, body_bb
));
6612 struct loop
*trip_loop
= alloc_loop ();
6613 trip_loop
->header
= iter_part_bb
;
6614 trip_loop
->latch
= trip_update_bb
;
6615 add_loop (trip_loop
, iter_part_bb
->loop_father
);
6617 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
6619 struct loop
*loop
= alloc_loop ();
6620 loop
->header
= body_bb
;
6621 if (collapse_bb
== NULL
)
6622 loop
->latch
= cont_bb
;
6623 add_loop (loop
, trip_loop
);
6629 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
6630 loop. Given parameters:
6632 for (V = N1; V cond N2; V += STEP) BODY;
6634 where COND is "<" or ">", we generate pseudocode
6642 if (V cond N2) goto L0; else goto L2;
6645 For collapsed loops, given parameters:
6647 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6648 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6649 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6652 we generate pseudocode
6658 count3 = (adj + N32 - N31) / STEP3;
6663 count2 = (adj + N22 - N21) / STEP2;
6668 count1 = (adj + N12 - N11) / STEP1;
6669 count = count1 * count2 * count3;
6679 V2 += (V3 cond3 N32) ? 0 : STEP2;
6680 V3 = (V3 cond3 N32) ? V3 : N31;
6681 V1 += (V2 cond2 N22) ? 0 : STEP1;
6682 V2 = (V2 cond2 N22) ? V2 : N21;
6684 if (V < count) goto L0; else goto L2;
6690 expand_omp_simd (struct omp_region
*region
, struct omp_for_data
*fd
)
6693 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, l2_bb
, l2_dom_bb
;
6694 gimple_stmt_iterator gsi
;
6696 bool broken_loop
= region
->cont
== NULL
;
6698 tree
*counts
= NULL
;
6700 tree safelen
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
6701 OMP_CLAUSE_SAFELEN
);
6702 tree simduid
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
6703 OMP_CLAUSE__SIMDUID_
);
6706 type
= TREE_TYPE (fd
->loop
.v
);
6707 entry_bb
= region
->entry
;
6708 cont_bb
= region
->cont
;
6709 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
6710 gcc_assert (broken_loop
6711 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
6712 l0_bb
= FALLTHRU_EDGE (entry_bb
)->dest
;
6715 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l0_bb
);
6716 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
6717 l1_bb
= split_block (cont_bb
, last_stmt (cont_bb
))->dest
;
6718 l2_bb
= BRANCH_EDGE (entry_bb
)->dest
;
6722 BRANCH_EDGE (entry_bb
)->flags
&= ~EDGE_ABNORMAL
;
6723 l1_bb
= split_edge (BRANCH_EDGE (entry_bb
));
6724 l2_bb
= single_succ (l1_bb
);
6726 exit_bb
= region
->exit
;
6729 gsi
= gsi_last_bb (entry_bb
);
6731 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
6732 /* Not needed in SSA form right now. */
6733 gcc_assert (!gimple_in_ssa_p (cfun
));
6734 if (fd
->collapse
> 1)
6736 int first_zero_iter
= -1;
6737 basic_block zero_iter_bb
= l2_bb
;
6739 counts
= XALLOCAVEC (tree
, fd
->collapse
);
6740 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
6741 zero_iter_bb
, first_zero_iter
,
6744 if (l2_dom_bb
== NULL
)
6749 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
6751 tree innerc
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
6752 OMP_CLAUSE__LOOPTEMP_
);
6753 gcc_assert (innerc
);
6754 n1
= OMP_CLAUSE_DECL (innerc
);
6755 innerc
= find_omp_clause (OMP_CLAUSE_CHAIN (innerc
),
6756 OMP_CLAUSE__LOOPTEMP_
);
6757 gcc_assert (innerc
);
6758 n2
= OMP_CLAUSE_DECL (innerc
);
6759 expand_omp_build_assign (&gsi
, fd
->loop
.v
,
6760 fold_convert (type
, n1
));
6761 if (fd
->collapse
> 1)
6764 expand_omp_for_init_vars (fd
, &gsi
, counts
, NULL
, n1
);
6770 expand_omp_build_assign (&gsi
, fd
->loop
.v
,
6771 fold_convert (type
, fd
->loop
.n1
));
6772 if (fd
->collapse
> 1)
6773 for (i
= 0; i
< fd
->collapse
; i
++)
6775 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
6776 if (POINTER_TYPE_P (itype
))
6777 itype
= signed_type_for (itype
);
6778 t
= fold_convert (TREE_TYPE (fd
->loops
[i
].v
), fd
->loops
[i
].n1
);
6779 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
6783 /* Remove the GIMPLE_OMP_FOR statement. */
6784 gsi_remove (&gsi
, true);
6788 /* Code to control the increment goes in the CONT_BB. */
6789 gsi
= gsi_last_bb (cont_bb
);
6790 stmt
= gsi_stmt (gsi
);
6791 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
6793 if (POINTER_TYPE_P (type
))
6794 t
= fold_build_pointer_plus (fd
->loop
.v
, fd
->loop
.step
);
6796 t
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.v
, fd
->loop
.step
);
6797 expand_omp_build_assign (&gsi
, fd
->loop
.v
, t
);
6799 if (fd
->collapse
> 1)
6801 i
= fd
->collapse
- 1;
6802 if (POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
].v
)))
6804 t
= fold_convert (sizetype
, fd
->loops
[i
].step
);
6805 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, t
);
6809 t
= fold_convert (TREE_TYPE (fd
->loops
[i
].v
),
6811 t
= fold_build2 (PLUS_EXPR
, TREE_TYPE (fd
->loops
[i
].v
),
6814 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
6816 for (i
= fd
->collapse
- 1; i
> 0; i
--)
6818 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
6819 tree itype2
= TREE_TYPE (fd
->loops
[i
- 1].v
);
6820 if (POINTER_TYPE_P (itype2
))
6821 itype2
= signed_type_for (itype2
);
6822 t
= build3 (COND_EXPR
, itype2
,
6823 build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
6825 fold_convert (itype
, fd
->loops
[i
].n2
)),
6826 build_int_cst (itype2
, 0),
6827 fold_convert (itype2
, fd
->loops
[i
- 1].step
));
6828 if (POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
- 1].v
)))
6829 t
= fold_build_pointer_plus (fd
->loops
[i
- 1].v
, t
);
6831 t
= fold_build2 (PLUS_EXPR
, itype2
, fd
->loops
[i
- 1].v
, t
);
6832 expand_omp_build_assign (&gsi
, fd
->loops
[i
- 1].v
, t
);
6834 t
= build3 (COND_EXPR
, itype
,
6835 build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
6837 fold_convert (itype
, fd
->loops
[i
].n2
)),
6839 fold_convert (itype
, fd
->loops
[i
].n1
));
6840 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
6844 /* Remove GIMPLE_OMP_CONTINUE. */
6845 gsi_remove (&gsi
, true);
6848 /* Emit the condition in L1_BB. */
6849 gsi
= gsi_start_bb (l1_bb
);
6851 t
= fold_convert (type
, n2
);
6852 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
6853 false, GSI_CONTINUE_LINKING
);
6854 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, fd
->loop
.v
, t
);
6855 stmt
= gimple_build_cond_empty (t
);
6856 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
6857 if (walk_tree (gimple_cond_lhs_ptr (stmt
), expand_omp_regimplify_p
,
6859 || walk_tree (gimple_cond_rhs_ptr (stmt
), expand_omp_regimplify_p
,
6862 gsi
= gsi_for_stmt (stmt
);
6863 gimple_regimplify_operands (stmt
, &gsi
);
6866 /* Remove GIMPLE_OMP_RETURN. */
6867 gsi
= gsi_last_bb (exit_bb
);
6868 gsi_remove (&gsi
, true);
6870 /* Connect the new blocks. */
6871 remove_edge (FALLTHRU_EDGE (entry_bb
));
6875 remove_edge (BRANCH_EDGE (entry_bb
));
6876 make_edge (entry_bb
, l1_bb
, EDGE_FALLTHRU
);
6878 e
= BRANCH_EDGE (l1_bb
);
6879 ne
= FALLTHRU_EDGE (l1_bb
);
6880 e
->flags
= EDGE_TRUE_VALUE
;
6884 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
6886 ne
= single_succ_edge (l1_bb
);
6887 e
= make_edge (l1_bb
, l0_bb
, EDGE_TRUE_VALUE
);
6890 ne
->flags
= EDGE_FALSE_VALUE
;
6891 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
6892 ne
->probability
= REG_BR_PROB_BASE
/ 8;
6894 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
, entry_bb
);
6895 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
, l2_dom_bb
);
6896 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
, l1_bb
);
6900 struct loop
*loop
= alloc_loop ();
6901 loop
->header
= l1_bb
;
6902 loop
->latch
= cont_bb
;
6903 add_loop (loop
, l1_bb
->loop_father
);
6904 if (safelen
== NULL_TREE
)
6905 loop
->safelen
= INT_MAX
;
6908 safelen
= OMP_CLAUSE_SAFELEN_EXPR (safelen
);
6909 if (TREE_CODE (safelen
) != INTEGER_CST
)
6911 else if (!tree_fits_uhwi_p (safelen
)
6912 || tree_to_uhwi (safelen
) > INT_MAX
)
6913 loop
->safelen
= INT_MAX
;
6915 loop
->safelen
= tree_to_uhwi (safelen
);
6916 if (loop
->safelen
== 1)
6921 loop
->simduid
= OMP_CLAUSE__SIMDUID__DECL (simduid
);
6922 cfun
->has_simduid_loops
= true;
6924 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
6926 if ((flag_tree_loop_vectorize
6927 || (!global_options_set
.x_flag_tree_loop_vectorize
6928 && !global_options_set
.x_flag_tree_vectorize
))
6929 && flag_tree_loop_optimize
6930 && loop
->safelen
> 1)
6932 loop
->force_vectorize
= true;
6933 cfun
->has_force_vectorize_loops
= true;
6939 /* Expand the OpenMP loop defined by REGION. */
6942 expand_omp_for (struct omp_region
*region
, gimple inner_stmt
)
6944 struct omp_for_data fd
;
6945 struct omp_for_data_loop
*loops
;
6948 = (struct omp_for_data_loop
*)
6949 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
6950 * sizeof (struct omp_for_data_loop
));
6951 extract_omp_for_data (last_stmt (region
->entry
), &fd
, loops
);
6952 region
->sched_kind
= fd
.sched_kind
;
6954 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
6955 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
6956 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
6959 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
6960 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
6961 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
6964 /* If there isn't a continue then this is a degerate case where
6965 the introduction of abnormal edges during lowering will prevent
6966 original loops from being detected. Fix that up. */
6967 loops_state_set (LOOPS_NEED_FIXUP
);
6969 if (gimple_omp_for_kind (fd
.for_stmt
) & GF_OMP_FOR_SIMD
)
6970 expand_omp_simd (region
, &fd
);
6971 else if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
6972 && !fd
.have_ordered
)
6974 if (fd
.chunk_size
== NULL
)
6975 expand_omp_for_static_nochunk (region
, &fd
, inner_stmt
);
6977 expand_omp_for_static_chunk (region
, &fd
, inner_stmt
);
6981 int fn_index
, start_ix
, next_ix
;
6983 gcc_assert (gimple_omp_for_kind (fd
.for_stmt
)
6984 == GF_OMP_FOR_KIND_FOR
);
6985 if (fd
.chunk_size
== NULL
6986 && fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
6987 fd
.chunk_size
= integer_zero_node
;
6988 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
6989 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
6990 ? 3 : fd
.sched_kind
;
6991 fn_index
+= fd
.have_ordered
* 4;
6992 start_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_START
) + fn_index
;
6993 next_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
) + fn_index
;
6994 if (fd
.iter_type
== long_long_unsigned_type_node
)
6996 start_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
6997 - (int)BUILT_IN_GOMP_LOOP_STATIC_START
);
6998 next_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
6999 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
);
7001 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
7002 (enum built_in_function
) next_ix
, inner_stmt
);
7005 if (gimple_in_ssa_p (cfun
))
7006 update_ssa (TODO_update_ssa_only_virtuals
);
7010 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7012 v = GOMP_sections_start (n);
7029 v = GOMP_sections_next ();
7034 If this is a combined parallel sections, replace the call to
7035 GOMP_sections_start with call to GOMP_sections_next. */
7038 expand_omp_sections (struct omp_region
*region
)
7040 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
7042 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
7043 gimple_stmt_iterator si
, switch_si
;
7044 gimple sections_stmt
, stmt
, cont
;
7047 struct omp_region
*inner
;
7049 bool exit_reachable
= region
->cont
!= NULL
;
7051 gcc_assert (region
->exit
!= NULL
);
7052 entry_bb
= region
->entry
;
7053 l0_bb
= single_succ (entry_bb
);
7054 l1_bb
= region
->cont
;
7055 l2_bb
= region
->exit
;
7056 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
7057 l2
= gimple_block_label (l2_bb
);
7060 /* This can happen if there are reductions. */
7061 len
= EDGE_COUNT (l0_bb
->succs
);
7062 gcc_assert (len
> 0);
7063 e
= EDGE_SUCC (l0_bb
, len
- 1);
7064 si
= gsi_last_bb (e
->dest
);
7067 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
7068 l2
= gimple_block_label (e
->dest
);
7070 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
7072 si
= gsi_last_bb (e
->dest
);
7074 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
7076 l2
= gimple_block_label (e
->dest
);
7082 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
7084 default_bb
= create_empty_bb (l0_bb
);
7086 /* We will build a switch() with enough cases for all the
7087 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7088 and a default case to abort if something goes wrong. */
7089 len
= EDGE_COUNT (l0_bb
->succs
);
7091 /* Use vec::quick_push on label_vec throughout, since we know the size
7093 auto_vec
<tree
> label_vec (len
);
7095 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7096 GIMPLE_OMP_SECTIONS statement. */
7097 si
= gsi_last_bb (entry_bb
);
7098 sections_stmt
= gsi_stmt (si
);
7099 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
7100 vin
= gimple_omp_sections_control (sections_stmt
);
7101 if (!is_combined_parallel (region
))
7103 /* If we are not inside a combined parallel+sections region,
7104 call GOMP_sections_start. */
7105 t
= build_int_cst (unsigned_type_node
, len
- 1);
7106 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START
);
7107 stmt
= gimple_build_call (u
, 1, t
);
7111 /* Otherwise, call GOMP_sections_next. */
7112 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
7113 stmt
= gimple_build_call (u
, 0);
7115 gimple_call_set_lhs (stmt
, vin
);
7116 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
7117 gsi_remove (&si
, true);
7119 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7121 switch_si
= gsi_last_bb (l0_bb
);
7122 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
7125 cont
= last_stmt (l1_bb
);
7126 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
7127 vmain
= gimple_omp_continue_control_use (cont
);
7128 vnext
= gimple_omp_continue_control_def (cont
);
7136 t
= build_case_label (build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
7137 label_vec
.quick_push (t
);
7140 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7141 for (inner
= region
->inner
, casei
= 1;
7143 inner
= inner
->next
, i
++, casei
++)
7145 basic_block s_entry_bb
, s_exit_bb
;
7147 /* Skip optional reduction region. */
7148 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
7155 s_entry_bb
= inner
->entry
;
7156 s_exit_bb
= inner
->exit
;
7158 t
= gimple_block_label (s_entry_bb
);
7159 u
= build_int_cst (unsigned_type_node
, casei
);
7160 u
= build_case_label (u
, NULL
, t
);
7161 label_vec
.quick_push (u
);
7163 si
= gsi_last_bb (s_entry_bb
);
7164 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
7165 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
7166 gsi_remove (&si
, true);
7167 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
7169 if (s_exit_bb
== NULL
)
7172 si
= gsi_last_bb (s_exit_bb
);
7173 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
7174 gsi_remove (&si
, true);
7176 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
7179 /* Error handling code goes in DEFAULT_BB. */
7180 t
= gimple_block_label (default_bb
);
7181 u
= build_case_label (NULL
, NULL
, t
);
7182 make_edge (l0_bb
, default_bb
, 0);
7183 add_bb_to_loop (default_bb
, current_loops
->tree_root
);
7185 stmt
= gimple_build_switch (vmain
, u
, label_vec
);
7186 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
7187 gsi_remove (&switch_si
, true);
7189 si
= gsi_start_bb (default_bb
);
7190 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP
), 0);
7191 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
7197 /* Code to get the next section goes in L1_BB. */
7198 si
= gsi_last_bb (l1_bb
);
7199 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
7201 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
7202 stmt
= gimple_build_call (bfn_decl
, 0);
7203 gimple_call_set_lhs (stmt
, vnext
);
7204 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
7205 gsi_remove (&si
, true);
7207 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
7210 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7211 si
= gsi_last_bb (l2_bb
);
7212 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
7213 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT
);
7214 else if (gimple_omp_return_lhs (gsi_stmt (si
)))
7215 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL
);
7217 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END
);
7218 stmt
= gimple_build_call (t
, 0);
7219 if (gimple_omp_return_lhs (gsi_stmt (si
)))
7220 gimple_call_set_lhs (stmt
, gimple_omp_return_lhs (gsi_stmt (si
)));
7221 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
7222 gsi_remove (&si
, true);
7224 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
7228 /* Expand code for an OpenMP single directive. We've already expanded
7229 much of the code, here we simply place the GOMP_barrier call. */
7232 expand_omp_single (struct omp_region
*region
)
7234 basic_block entry_bb
, exit_bb
;
7235 gimple_stmt_iterator si
;
7237 entry_bb
= region
->entry
;
7238 exit_bb
= region
->exit
;
7240 si
= gsi_last_bb (entry_bb
);
7241 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
7242 gsi_remove (&si
, true);
7243 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
7245 si
= gsi_last_bb (exit_bb
);
7246 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
7248 tree t
= gimple_omp_return_lhs (gsi_stmt (si
));
7249 gsi_insert_after (&si
, build_omp_barrier (t
), GSI_SAME_STMT
);
7251 gsi_remove (&si
, true);
7252 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
7256 /* Generic expansion for OpenMP synchronization directives: master,
7257 ordered and critical. All we need to do here is remove the entry
7258 and exit markers for REGION. */
7261 expand_omp_synch (struct omp_region
*region
)
7263 basic_block entry_bb
, exit_bb
;
7264 gimple_stmt_iterator si
;
7266 entry_bb
= region
->entry
;
7267 exit_bb
= region
->exit
;
7269 si
= gsi_last_bb (entry_bb
);
7270 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
7271 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
7272 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_TASKGROUP
7273 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
7274 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
7275 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_TEAMS
);
7276 gsi_remove (&si
, true);
7277 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
7281 si
= gsi_last_bb (exit_bb
);
7282 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
7283 gsi_remove (&si
, true);
7284 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
7288 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7289 operation as a normal volatile load. */
7292 expand_omp_atomic_load (basic_block load_bb
, tree addr
,
7293 tree loaded_val
, int index
)
7295 enum built_in_function tmpbase
;
7296 gimple_stmt_iterator gsi
;
7297 basic_block store_bb
;
7300 tree decl
, call
, type
, itype
;
7302 gsi
= gsi_last_bb (load_bb
);
7303 stmt
= gsi_stmt (gsi
);
7304 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
7305 loc
= gimple_location (stmt
);
7307 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7308 is smaller than word size, then expand_atomic_load assumes that the load
7309 is atomic. We could avoid the builtin entirely in this case. */
7311 tmpbase
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
7312 decl
= builtin_decl_explicit (tmpbase
);
7313 if (decl
== NULL_TREE
)
7316 type
= TREE_TYPE (loaded_val
);
7317 itype
= TREE_TYPE (TREE_TYPE (decl
));
7319 call
= build_call_expr_loc (loc
, decl
, 2, addr
,
7320 build_int_cst (NULL
,
7321 gimple_omp_atomic_seq_cst_p (stmt
)
7323 : MEMMODEL_RELAXED
));
7324 if (!useless_type_conversion_p (type
, itype
))
7325 call
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
7326 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
7328 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
7329 gsi_remove (&gsi
, true);
7331 store_bb
= single_succ (load_bb
);
7332 gsi
= gsi_last_bb (store_bb
);
7333 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
7334 gsi_remove (&gsi
, true);
7336 if (gimple_in_ssa_p (cfun
))
7337 update_ssa (TODO_update_ssa_no_phi
);
7342 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7343 operation as a normal volatile store. */
7346 expand_omp_atomic_store (basic_block load_bb
, tree addr
,
7347 tree loaded_val
, tree stored_val
, int index
)
7349 enum built_in_function tmpbase
;
7350 gimple_stmt_iterator gsi
;
7351 basic_block store_bb
= single_succ (load_bb
);
7354 tree decl
, call
, type
, itype
;
7355 enum machine_mode imode
;
7358 gsi
= gsi_last_bb (load_bb
);
7359 stmt
= gsi_stmt (gsi
);
7360 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
7362 /* If the load value is needed, then this isn't a store but an exchange. */
7363 exchange
= gimple_omp_atomic_need_value_p (stmt
);
7365 gsi
= gsi_last_bb (store_bb
);
7366 stmt
= gsi_stmt (gsi
);
7367 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_STORE
);
7368 loc
= gimple_location (stmt
);
7370 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7371 is smaller than word size, then expand_atomic_store assumes that the store
7372 is atomic. We could avoid the builtin entirely in this case. */
7374 tmpbase
= (exchange
? BUILT_IN_ATOMIC_EXCHANGE_N
: BUILT_IN_ATOMIC_STORE_N
);
7375 tmpbase
= (enum built_in_function
) ((int) tmpbase
+ index
+ 1);
7376 decl
= builtin_decl_explicit (tmpbase
);
7377 if (decl
== NULL_TREE
)
7380 type
= TREE_TYPE (stored_val
);
7382 /* Dig out the type of the function's second argument. */
7383 itype
= TREE_TYPE (decl
);
7384 itype
= TYPE_ARG_TYPES (itype
);
7385 itype
= TREE_CHAIN (itype
);
7386 itype
= TREE_VALUE (itype
);
7387 imode
= TYPE_MODE (itype
);
7389 if (exchange
&& !can_atomic_exchange_p (imode
, true))
7392 if (!useless_type_conversion_p (itype
, type
))
7393 stored_val
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, itype
, stored_val
);
7394 call
= build_call_expr_loc (loc
, decl
, 3, addr
, stored_val
,
7395 build_int_cst (NULL
,
7396 gimple_omp_atomic_seq_cst_p (stmt
)
7398 : MEMMODEL_RELAXED
));
7401 if (!useless_type_conversion_p (type
, itype
))
7402 call
= build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
7403 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
7406 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
7407 gsi_remove (&gsi
, true);
7409 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7410 gsi
= gsi_last_bb (load_bb
);
7411 gsi_remove (&gsi
, true);
7413 if (gimple_in_ssa_p (cfun
))
7414 update_ssa (TODO_update_ssa_no_phi
);
7419 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7420 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7421 size of the data type, and thus usable to find the index of the builtin
7422 decl. Returns false if the expression is not of the proper form. */
7425 expand_omp_atomic_fetch_op (basic_block load_bb
,
7426 tree addr
, tree loaded_val
,
7427 tree stored_val
, int index
)
7429 enum built_in_function oldbase
, newbase
, tmpbase
;
7430 tree decl
, itype
, call
;
7432 basic_block store_bb
= single_succ (load_bb
);
7433 gimple_stmt_iterator gsi
;
7436 enum tree_code code
;
7437 bool need_old
, need_new
;
7438 enum machine_mode imode
;
7441 /* We expect to find the following sequences:
7444 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7447 val = tmp OP something; (or: something OP tmp)
7448 GIMPLE_OMP_STORE (val)
7450 ???FIXME: Allow a more flexible sequence.
7451 Perhaps use data flow to pick the statements.
7455 gsi
= gsi_after_labels (store_bb
);
7456 stmt
= gsi_stmt (gsi
);
7457 loc
= gimple_location (stmt
);
7458 if (!is_gimple_assign (stmt
))
7461 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
7463 need_new
= gimple_omp_atomic_need_value_p (gsi_stmt (gsi
));
7464 need_old
= gimple_omp_atomic_need_value_p (last_stmt (load_bb
));
7465 seq_cst
= gimple_omp_atomic_seq_cst_p (last_stmt (load_bb
));
7466 gcc_checking_assert (!need_old
|| !need_new
);
7468 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
7471 /* Check for one of the supported fetch-op operations. */
7472 code
= gimple_assign_rhs_code (stmt
);
7476 case POINTER_PLUS_EXPR
:
7477 oldbase
= BUILT_IN_ATOMIC_FETCH_ADD_N
;
7478 newbase
= BUILT_IN_ATOMIC_ADD_FETCH_N
;
7481 oldbase
= BUILT_IN_ATOMIC_FETCH_SUB_N
;
7482 newbase
= BUILT_IN_ATOMIC_SUB_FETCH_N
;
7485 oldbase
= BUILT_IN_ATOMIC_FETCH_AND_N
;
7486 newbase
= BUILT_IN_ATOMIC_AND_FETCH_N
;
7489 oldbase
= BUILT_IN_ATOMIC_FETCH_OR_N
;
7490 newbase
= BUILT_IN_ATOMIC_OR_FETCH_N
;
7493 oldbase
= BUILT_IN_ATOMIC_FETCH_XOR_N
;
7494 newbase
= BUILT_IN_ATOMIC_XOR_FETCH_N
;
7500 /* Make sure the expression is of the proper form. */
7501 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
7502 rhs
= gimple_assign_rhs2 (stmt
);
7503 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
7504 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
7505 rhs
= gimple_assign_rhs1 (stmt
);
7509 tmpbase
= ((enum built_in_function
)
7510 ((need_new
? newbase
: oldbase
) + index
+ 1));
7511 decl
= builtin_decl_explicit (tmpbase
);
7512 if (decl
== NULL_TREE
)
7514 itype
= TREE_TYPE (TREE_TYPE (decl
));
7515 imode
= TYPE_MODE (itype
);
7517 /* We could test all of the various optabs involved, but the fact of the
7518 matter is that (with the exception of i486 vs i586 and xadd) all targets
7519 that support any atomic operaton optab also implements compare-and-swap.
7520 Let optabs.c take care of expanding any compare-and-swap loop. */
7521 if (!can_compare_and_swap_p (imode
, true))
7524 gsi
= gsi_last_bb (load_bb
);
7525 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
7527 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7528 It only requires that the operation happen atomically. Thus we can
7529 use the RELAXED memory model. */
7530 call
= build_call_expr_loc (loc
, decl
, 3, addr
,
7531 fold_convert_loc (loc
, itype
, rhs
),
7532 build_int_cst (NULL
,
7533 seq_cst
? MEMMODEL_SEQ_CST
7534 : MEMMODEL_RELAXED
));
7536 if (need_old
|| need_new
)
7538 lhs
= need_old
? loaded_val
: stored_val
;
7539 call
= fold_convert_loc (loc
, TREE_TYPE (lhs
), call
);
7540 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, lhs
, call
);
7543 call
= fold_convert_loc (loc
, void_type_node
, call
);
7544 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
7545 gsi_remove (&gsi
, true);
7547 gsi
= gsi_last_bb (store_bb
);
7548 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
7549 gsi_remove (&gsi
, true);
7550 gsi
= gsi_last_bb (store_bb
);
7551 gsi_remove (&gsi
, true);
7553 if (gimple_in_ssa_p (cfun
))
7554 update_ssa (TODO_update_ssa_no_phi
);
7559 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7563 newval = rhs; // with oldval replacing *addr in rhs
7564 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7565 if (oldval != newval)
7568 INDEX is log2 of the size of the data type, and thus usable to find the
7569 index of the builtin decl. */
7572 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
7573 tree addr
, tree loaded_val
, tree stored_val
,
7576 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
7577 tree type
, itype
, cmpxchg
, iaddr
;
7578 gimple_stmt_iterator si
;
7579 basic_block loop_header
= single_succ (load_bb
);
7582 enum built_in_function fncode
;
7584 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7585 order to use the RELAXED memory model effectively. */
7586 fncode
= (enum built_in_function
)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7588 cmpxchg
= builtin_decl_explicit (fncode
);
7589 if (cmpxchg
== NULL_TREE
)
7591 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
7592 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
7594 if (!can_compare_and_swap_p (TYPE_MODE (itype
), true))
7597 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7598 si
= gsi_last_bb (load_bb
);
7599 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
7601 /* For floating-point values, we'll need to view-convert them to integers
7602 so that we can perform the atomic compare and swap. Simplify the
7603 following code by always setting up the "i"ntegral variables. */
7604 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
7608 iaddr
= create_tmp_reg (build_pointer_type_for_mode (itype
, ptr_mode
,
7611 = force_gimple_operand_gsi (&si
,
7612 fold_convert (TREE_TYPE (iaddr
), addr
),
7613 false, NULL_TREE
, true, GSI_SAME_STMT
);
7614 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
7615 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
7616 loadedi
= create_tmp_var (itype
, NULL
);
7617 if (gimple_in_ssa_p (cfun
))
7618 loadedi
= make_ssa_name (loadedi
, NULL
);
7623 loadedi
= loaded_val
;
7626 fncode
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
7627 tree loaddecl
= builtin_decl_explicit (fncode
);
7630 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr
)),
7631 build_call_expr (loaddecl
, 2, iaddr
,
7632 build_int_cst (NULL_TREE
,
7633 MEMMODEL_RELAXED
)));
7635 initial
= build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)), iaddr
,
7636 build_int_cst (TREE_TYPE (iaddr
), 0));
7639 = force_gimple_operand_gsi (&si
, initial
, true, NULL_TREE
, true,
7642 /* Move the value to the LOADEDI temporary. */
7643 if (gimple_in_ssa_p (cfun
))
7645 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
7646 phi
= create_phi_node (loadedi
, loop_header
);
7647 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
7651 gsi_insert_before (&si
,
7652 gimple_build_assign (loadedi
, initial
),
7654 if (loadedi
!= loaded_val
)
7656 gimple_stmt_iterator gsi2
;
7659 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
7660 gsi2
= gsi_start_bb (loop_header
);
7661 if (gimple_in_ssa_p (cfun
))
7664 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
7665 true, GSI_SAME_STMT
);
7666 stmt
= gimple_build_assign (loaded_val
, x
);
7667 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
7671 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
7672 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
7673 true, GSI_SAME_STMT
);
7676 gsi_remove (&si
, true);
7678 si
= gsi_last_bb (store_bb
);
7679 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
7682 storedi
= stored_val
;
7685 force_gimple_operand_gsi (&si
,
7686 build1 (VIEW_CONVERT_EXPR
, itype
,
7687 stored_val
), true, NULL_TREE
, true,
7690 /* Build the compare&swap statement. */
7691 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
7692 new_storedi
= force_gimple_operand_gsi (&si
,
7693 fold_convert (TREE_TYPE (loadedi
),
7696 true, GSI_SAME_STMT
);
7698 if (gimple_in_ssa_p (cfun
))
7702 old_vali
= create_tmp_var (TREE_TYPE (loadedi
), NULL
);
7703 stmt
= gimple_build_assign (old_vali
, loadedi
);
7704 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
7706 stmt
= gimple_build_assign (loadedi
, new_storedi
);
7707 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
7710 /* Note that we always perform the comparison as an integer, even for
7711 floating point. This allows the atomic operation to properly
7712 succeed even with NaNs and -0.0. */
7713 stmt
= gimple_build_cond_empty
7714 (build2 (NE_EXPR
, boolean_type_node
,
7715 new_storedi
, old_vali
));
7716 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
7719 e
= single_succ_edge (store_bb
);
7720 e
->flags
&= ~EDGE_FALLTHRU
;
7721 e
->flags
|= EDGE_FALSE_VALUE
;
7723 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
7725 /* Copy the new value to loadedi (we already did that before the condition
7726 if we are not in SSA). */
7727 if (gimple_in_ssa_p (cfun
))
7729 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
7730 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
7733 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
7734 gsi_remove (&si
, true);
7736 struct loop
*loop
= alloc_loop ();
7737 loop
->header
= loop_header
;
7738 loop
->latch
= store_bb
;
7739 add_loop (loop
, loop_header
->loop_father
);
7741 if (gimple_in_ssa_p (cfun
))
7742 update_ssa (TODO_update_ssa_no_phi
);
7747 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7749 GOMP_atomic_start ();
7753 The result is not globally atomic, but works so long as all parallel
7754 references are within #pragma omp atomic directives. According to
7755 responses received from omp@openmp.org, appears to be within spec.
7756 Which makes sense, since that's how several other compilers handle
7757 this situation as well.
7758 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
7759 expanding. STORED_VAL is the operand of the matching
7760 GIMPLE_OMP_ATOMIC_STORE.
7763 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
7767 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
7772 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
7773 tree addr
, tree loaded_val
, tree stored_val
)
7775 gimple_stmt_iterator si
;
7779 si
= gsi_last_bb (load_bb
);
7780 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
7782 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
);
7783 t
= build_call_expr (t
, 0);
7784 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
7786 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
7787 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
7788 gsi_remove (&si
, true);
7790 si
= gsi_last_bb (store_bb
);
7791 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
7793 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
7795 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
7797 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
);
7798 t
= build_call_expr (t
, 0);
7799 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
7800 gsi_remove (&si
, true);
7802 if (gimple_in_ssa_p (cfun
))
7803 update_ssa (TODO_update_ssa_no_phi
);
7807 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
7808 using expand_omp_atomic_fetch_op. If it failed, we try to
7809 call expand_omp_atomic_pipeline, and if it fails too, the
7810 ultimate fallback is wrapping the operation in a mutex
7811 (expand_omp_atomic_mutex). REGION is the atomic region built
7812 by build_omp_regions_1(). */
7815 expand_omp_atomic (struct omp_region
*region
)
7817 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
7818 gimple load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
7819 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
7820 tree addr
= gimple_omp_atomic_load_rhs (load
);
7821 tree stored_val
= gimple_omp_atomic_store_val (store
);
7822 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
7823 HOST_WIDE_INT index
;
7825 /* Make sure the type is one of the supported sizes. */
7826 index
= tree_to_uhwi (TYPE_SIZE_UNIT (type
));
7827 index
= exact_log2 (index
);
7828 if (index
>= 0 && index
<= 4)
7830 unsigned int align
= TYPE_ALIGN_UNIT (type
);
7832 /* __sync builtins require strict data alignment. */
7833 if (exact_log2 (align
) >= index
)
7836 if (loaded_val
== stored_val
7837 && (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
7838 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
7839 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
7840 && expand_omp_atomic_load (load_bb
, addr
, loaded_val
, index
))
7844 if ((GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
7845 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
7846 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
7847 && store_bb
== single_succ (load_bb
)
7848 && first_stmt (store_bb
) == store
7849 && expand_omp_atomic_store (load_bb
, addr
, loaded_val
,
7853 /* When possible, use specialized atomic update functions. */
7854 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
7855 && store_bb
== single_succ (load_bb
)
7856 && expand_omp_atomic_fetch_op (load_bb
, addr
,
7857 loaded_val
, stored_val
, index
))
7860 /* If we don't have specialized __sync builtins, try and implement
7861 as a compare and swap loop. */
7862 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
7863 loaded_val
, stored_val
, index
))
7868 /* The ultimate fallback is wrapping the operation in a mutex. */
7869 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
7873 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
7876 expand_omp_target (struct omp_region
*region
)
7878 basic_block entry_bb
, exit_bb
, new_bb
;
7879 struct function
*child_cfun
= NULL
;
7880 tree child_fn
= NULL_TREE
, block
, t
;
7881 gimple_stmt_iterator gsi
;
7882 gimple entry_stmt
, stmt
;
7885 entry_stmt
= last_stmt (region
->entry
);
7886 new_bb
= region
->entry
;
7887 int kind
= gimple_omp_target_kind (entry_stmt
);
7888 if (kind
== GF_OMP_TARGET_KIND_REGION
)
7890 child_fn
= gimple_omp_target_child_fn (entry_stmt
);
7891 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
7894 entry_bb
= region
->entry
;
7895 exit_bb
= region
->exit
;
7897 if (kind
== GF_OMP_TARGET_KIND_REGION
)
7899 unsigned srcidx
, dstidx
, num
;
7901 /* If the target region needs data sent from the parent
7902 function, then the very first statement (except possible
7903 tree profile counter updates) of the parallel body
7904 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
7905 &.OMP_DATA_O is passed as an argument to the child function,
7906 we need to replace it with the argument as seen by the child
7909 In most cases, this will end up being the identity assignment
7910 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
7911 a function call that has been inlined, the original PARM_DECL
7912 .OMP_DATA_I may have been converted into a different local
7913 variable. In which case, we need to keep the assignment. */
7914 if (gimple_omp_target_data_arg (entry_stmt
))
7916 basic_block entry_succ_bb
= single_succ (entry_bb
);
7917 gimple_stmt_iterator gsi
;
7919 gimple tgtcopy_stmt
= NULL
;
7921 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt
), 0);
7923 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
7925 gcc_assert (!gsi_end_p (gsi
));
7926 stmt
= gsi_stmt (gsi
);
7927 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
7930 if (gimple_num_ops (stmt
) == 2)
7932 tree arg
= gimple_assign_rhs1 (stmt
);
7934 /* We're ignoring the subcode because we're
7935 effectively doing a STRIP_NOPS. */
7937 if (TREE_CODE (arg
) == ADDR_EXPR
7938 && TREE_OPERAND (arg
, 0) == sender
)
7940 tgtcopy_stmt
= stmt
;
7946 gcc_assert (tgtcopy_stmt
!= NULL
);
7947 arg
= DECL_ARGUMENTS (child_fn
);
7949 gcc_assert (gimple_assign_lhs (tgtcopy_stmt
) == arg
);
7950 gsi_remove (&gsi
, true);
7953 /* Declare local variables needed in CHILD_CFUN. */
7954 block
= DECL_INITIAL (child_fn
);
7955 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
7956 /* The gimplifier could record temporaries in target block
7957 rather than in containing function's local_decls chain,
7958 which would mean cgraph missed finalizing them. Do it now. */
7959 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
7960 if (TREE_CODE (t
) == VAR_DECL
7962 && !DECL_EXTERNAL (t
))
7963 varpool_finalize_decl (t
);
7964 DECL_SAVED_TREE (child_fn
) = NULL
;
7965 /* We'll create a CFG for child_fn, so no gimple body is needed. */
7966 gimple_set_body (child_fn
, NULL
);
7967 TREE_USED (block
) = 1;
7969 /* Reset DECL_CONTEXT on function arguments. */
7970 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
7971 DECL_CONTEXT (t
) = child_fn
;
7973 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
7974 so that it can be moved to the child function. */
7975 gsi
= gsi_last_bb (entry_bb
);
7976 stmt
= gsi_stmt (gsi
);
7977 gcc_assert (stmt
&& gimple_code (stmt
) == GIMPLE_OMP_TARGET
7978 && gimple_omp_target_kind (stmt
)
7979 == GF_OMP_TARGET_KIND_REGION
);
7980 gsi_remove (&gsi
, true);
7981 e
= split_block (entry_bb
, stmt
);
7983 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
7985 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
7988 gsi
= gsi_last_bb (exit_bb
);
7989 gcc_assert (!gsi_end_p (gsi
)
7990 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
7991 stmt
= gimple_build_return (NULL
);
7992 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
7993 gsi_remove (&gsi
, true);
7996 /* Move the target region into CHILD_CFUN. */
7998 block
= gimple_block (entry_stmt
);
8000 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
8002 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
8003 /* When the OMP expansion process cannot guarantee an up-to-date
8004 loop tree arrange for the child function to fixup loops. */
8005 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
8006 child_cfun
->x_current_loops
->state
|= LOOPS_NEED_FIXUP
;
8008 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8009 num
= vec_safe_length (child_cfun
->local_decls
);
8010 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
8012 t
= (*child_cfun
->local_decls
)[srcidx
];
8013 if (DECL_CONTEXT (t
) == cfun
->decl
)
8015 if (srcidx
!= dstidx
)
8016 (*child_cfun
->local_decls
)[dstidx
] = t
;
8020 vec_safe_truncate (child_cfun
->local_decls
, dstidx
);
8022 /* Inform the callgraph about the new function. */
8023 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
= cfun
->curr_properties
;
8024 cgraph_add_new_function (child_fn
, true);
8026 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8027 fixed in a following pass. */
8028 push_cfun (child_cfun
);
8029 rebuild_cgraph_edges ();
8031 /* Some EH regions might become dead, see PR34608. If
8032 pass_cleanup_cfg isn't the first pass to happen with the
8033 new child, these dead EH edges might cause problems.
8034 Clean them up now. */
8035 if (flag_exceptions
)
8038 bool changed
= false;
8040 FOR_EACH_BB_FN (bb
, cfun
)
8041 changed
|= gimple_purge_dead_eh_edges (bb
);
8043 cleanup_tree_cfg ();
8048 /* Emit a library call to launch the target region, or do data
8050 tree t1
, t2
, t3
, t4
, device
, cond
, c
, clauses
;
8051 enum built_in_function start_ix
;
8052 location_t clause_loc
;
8054 clauses
= gimple_omp_target_clauses (entry_stmt
);
8056 if (kind
== GF_OMP_TARGET_KIND_REGION
)
8057 start_ix
= BUILT_IN_GOMP_TARGET
;
8058 else if (kind
== GF_OMP_TARGET_KIND_DATA
)
8059 start_ix
= BUILT_IN_GOMP_TARGET_DATA
;
8061 start_ix
= BUILT_IN_GOMP_TARGET_UPDATE
;
8063 /* By default, the value of DEVICE is -1 (let runtime library choose)
8064 and there is no conditional. */
8066 device
= build_int_cst (integer_type_node
, -1);
8068 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
8070 cond
= OMP_CLAUSE_IF_EXPR (c
);
8072 c
= find_omp_clause (clauses
, OMP_CLAUSE_DEVICE
);
8075 device
= OMP_CLAUSE_DEVICE_ID (c
);
8076 clause_loc
= OMP_CLAUSE_LOCATION (c
);
8079 clause_loc
= gimple_location (entry_stmt
);
8081 /* Ensure 'device' is of the correct type. */
8082 device
= fold_convert_loc (clause_loc
, integer_type_node
, device
);
8084 /* If we found the clause 'if (cond)', build
8085 (cond ? device : -2). */
8088 cond
= gimple_boolify (cond
);
8090 basic_block cond_bb
, then_bb
, else_bb
;
8094 tmp_var
= create_tmp_var (TREE_TYPE (device
), NULL
);
8095 if (kind
!= GF_OMP_TARGET_KIND_REGION
)
8097 gsi
= gsi_last_bb (new_bb
);
8099 e
= split_block (new_bb
, gsi_stmt (gsi
));
8102 e
= split_block (new_bb
, NULL
);
8107 then_bb
= create_empty_bb (cond_bb
);
8108 else_bb
= create_empty_bb (then_bb
);
8109 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
8110 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
8112 stmt
= gimple_build_cond_empty (cond
);
8113 gsi
= gsi_last_bb (cond_bb
);
8114 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
8116 gsi
= gsi_start_bb (then_bb
);
8117 stmt
= gimple_build_assign (tmp_var
, device
);
8118 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
8120 gsi
= gsi_start_bb (else_bb
);
8121 stmt
= gimple_build_assign (tmp_var
,
8122 build_int_cst (integer_type_node
, -2));
8123 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
8125 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
8126 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
8127 add_bb_to_loop (then_bb
, cond_bb
->loop_father
);
8128 add_bb_to_loop (else_bb
, cond_bb
->loop_father
);
8129 make_edge (then_bb
, new_bb
, EDGE_FALLTHRU
);
8130 make_edge (else_bb
, new_bb
, EDGE_FALLTHRU
);
8135 gsi
= gsi_last_bb (new_bb
);
8136 t
= gimple_omp_target_data_arg (entry_stmt
);
8139 t1
= size_zero_node
;
8140 t2
= build_zero_cst (ptr_type_node
);
8146 t1
= TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t
, 1))));
8147 t1
= size_binop (PLUS_EXPR
, t1
, size_int (1));
8148 t2
= build_fold_addr_expr (TREE_VEC_ELT (t
, 0));
8149 t3
= build_fold_addr_expr (TREE_VEC_ELT (t
, 1));
8150 t4
= build_fold_addr_expr (TREE_VEC_ELT (t
, 2));
8154 /* FIXME: This will be address of
8155 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8156 symbol, as soon as the linker plugin is able to create it for us. */
8157 tree openmp_target
= build_zero_cst (ptr_type_node
);
8158 if (kind
== GF_OMP_TARGET_KIND_REGION
)
8160 tree fnaddr
= build_fold_addr_expr (child_fn
);
8161 g
= gimple_build_call (builtin_decl_explicit (start_ix
), 7,
8162 device
, fnaddr
, openmp_target
, t1
, t2
, t3
, t4
);
8165 g
= gimple_build_call (builtin_decl_explicit (start_ix
), 6,
8166 device
, openmp_target
, t1
, t2
, t3
, t4
);
8167 gimple_set_location (g
, gimple_location (entry_stmt
));
8168 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
8169 if (kind
!= GF_OMP_TARGET_KIND_REGION
)
8172 gcc_assert (g
&& gimple_code (g
) == GIMPLE_OMP_TARGET
);
8173 gsi_remove (&gsi
, true);
8175 if (kind
== GF_OMP_TARGET_KIND_DATA
&& region
->exit
)
8177 gsi
= gsi_last_bb (region
->exit
);
8179 gcc_assert (g
&& gimple_code (g
) == GIMPLE_OMP_RETURN
);
8180 gsi_remove (&gsi
, true);
8185 /* Expand the parallel region tree rooted at REGION. Expansion
8186 proceeds in depth-first order. Innermost regions are expanded
8187 first. This way, parallel regions that require a new function to
8188 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8189 internal dependencies in their body. */
8192 expand_omp (struct omp_region
*region
)
8196 location_t saved_location
;
8197 gimple inner_stmt
= NULL
;
8199 /* First, determine whether this is a combined parallel+workshare
8201 if (region
->type
== GIMPLE_OMP_PARALLEL
)
8202 determine_parallel_type (region
);
8204 if (region
->type
== GIMPLE_OMP_FOR
8205 && gimple_omp_for_combined_p (last_stmt (region
->entry
)))
8206 inner_stmt
= last_stmt (region
->inner
->entry
);
8209 expand_omp (region
->inner
);
8211 saved_location
= input_location
;
8212 if (gimple_has_location (last_stmt (region
->entry
)))
8213 input_location
= gimple_location (last_stmt (region
->entry
));
8215 switch (region
->type
)
8217 case GIMPLE_OMP_PARALLEL
:
8218 case GIMPLE_OMP_TASK
:
8219 expand_omp_taskreg (region
);
8222 case GIMPLE_OMP_FOR
:
8223 expand_omp_for (region
, inner_stmt
);
8226 case GIMPLE_OMP_SECTIONS
:
8227 expand_omp_sections (region
);
8230 case GIMPLE_OMP_SECTION
:
8231 /* Individual omp sections are handled together with their
8232 parent GIMPLE_OMP_SECTIONS region. */
8235 case GIMPLE_OMP_SINGLE
:
8236 expand_omp_single (region
);
8239 case GIMPLE_OMP_MASTER
:
8240 case GIMPLE_OMP_TASKGROUP
:
8241 case GIMPLE_OMP_ORDERED
:
8242 case GIMPLE_OMP_CRITICAL
:
8243 case GIMPLE_OMP_TEAMS
:
8244 expand_omp_synch (region
);
8247 case GIMPLE_OMP_ATOMIC_LOAD
:
8248 expand_omp_atomic (region
);
8251 case GIMPLE_OMP_TARGET
:
8252 expand_omp_target (region
);
8259 input_location
= saved_location
;
8260 region
= region
->next
;
8265 /* Helper for build_omp_regions. Scan the dominator tree starting at
8266 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8267 true, the function ends once a single tree is built (otherwise, whole
8268 forest of OMP constructs may be built). */
8271 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
8274 gimple_stmt_iterator gsi
;
8278 gsi
= gsi_last_bb (bb
);
8279 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
8281 struct omp_region
*region
;
8282 enum gimple_code code
;
8284 stmt
= gsi_stmt (gsi
);
8285 code
= gimple_code (stmt
);
8286 if (code
== GIMPLE_OMP_RETURN
)
8288 /* STMT is the return point out of region PARENT. Mark it
8289 as the exit point and make PARENT the immediately
8290 enclosing region. */
8291 gcc_assert (parent
);
8294 parent
= parent
->outer
;
8296 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
8298 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8299 GIMPLE_OMP_RETURN, but matches with
8300 GIMPLE_OMP_ATOMIC_LOAD. */
8301 gcc_assert (parent
);
8302 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
8305 parent
= parent
->outer
;
8308 else if (code
== GIMPLE_OMP_CONTINUE
)
8310 gcc_assert (parent
);
8313 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
8315 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8316 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8319 else if (code
== GIMPLE_OMP_TARGET
8320 && gimple_omp_target_kind (stmt
) == GF_OMP_TARGET_KIND_UPDATE
)
8321 new_omp_region (bb
, code
, parent
);
8324 /* Otherwise, this directive becomes the parent for a new
8326 region
= new_omp_region (bb
, code
, parent
);
8331 if (single_tree
&& !parent
)
8334 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
8336 son
= next_dom_son (CDI_DOMINATORS
, son
))
8337 build_omp_regions_1 (son
, parent
, single_tree
);
8340 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8344 build_omp_regions_root (basic_block root
)
8346 gcc_assert (root_omp_region
== NULL
);
8347 build_omp_regions_1 (root
, NULL
, true);
8348 gcc_assert (root_omp_region
!= NULL
);
8351 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8354 omp_expand_local (basic_block head
)
8356 build_omp_regions_root (head
);
8357 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8359 fprintf (dump_file
, "\nOMP region tree\n\n");
8360 dump_omp_region (dump_file
, root_omp_region
, 0);
8361 fprintf (dump_file
, "\n");
8364 remove_exit_barriers (root_omp_region
);
8365 expand_omp (root_omp_region
);
8367 free_omp_regions ();
8370 /* Scan the CFG and build a tree of OMP regions. Return the root of
8371 the OMP region tree. */
8374 build_omp_regions (void)
8376 gcc_assert (root_omp_region
== NULL
);
8377 calculate_dominance_info (CDI_DOMINATORS
);
8378 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun
), NULL
, false);
8381 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8384 execute_expand_omp (void)
8386 build_omp_regions ();
8388 if (!root_omp_region
)
8393 fprintf (dump_file
, "\nOMP region tree\n\n");
8394 dump_omp_region (dump_file
, root_omp_region
, 0);
8395 fprintf (dump_file
, "\n");
8398 remove_exit_barriers (root_omp_region
);
8400 expand_omp (root_omp_region
);
8402 cleanup_tree_cfg ();
8404 free_omp_regions ();
8409 /* OMP expansion -- the default pass, run before creation of SSA form. */
8413 const pass_data pass_data_expand_omp
=
8415 GIMPLE_PASS
, /* type */
8416 "ompexp", /* name */
8417 OPTGROUP_NONE
, /* optinfo_flags */
8418 TV_NONE
, /* tv_id */
8419 PROP_gimple_any
, /* properties_required */
8420 0, /* properties_provided */
8421 0, /* properties_destroyed */
8422 0, /* todo_flags_start */
8423 0, /* todo_flags_finish */
8426 class pass_expand_omp
: public gimple_opt_pass
8429 pass_expand_omp (gcc::context
*ctxt
)
8430 : gimple_opt_pass (pass_data_expand_omp
, ctxt
)
8433 /* opt_pass methods: */
8434 virtual bool gate (function
*)
8436 return ((flag_openmp
!= 0 || flag_openmp_simd
!= 0
8437 || flag_cilkplus
!= 0) && !seen_error ());
8440 virtual unsigned int execute (function
*) { return execute_expand_omp (); }
8442 }; // class pass_expand_omp
8447 make_pass_expand_omp (gcc::context
*ctxt
)
8449 return new pass_expand_omp (ctxt
);
8452 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8454 /* If ctx is a worksharing context inside of a cancellable parallel
8455 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8456 and conditional branch to parallel's cancel_label to handle
8457 cancellation in the implicit barrier. */
8460 maybe_add_implicit_barrier_cancel (omp_context
*ctx
, gimple_seq
*body
)
8462 gimple omp_return
= gimple_seq_last_stmt (*body
);
8463 gcc_assert (gimple_code (omp_return
) == GIMPLE_OMP_RETURN
);
8464 if (gimple_omp_return_nowait_p (omp_return
))
8467 && gimple_code (ctx
->outer
->stmt
) == GIMPLE_OMP_PARALLEL
8468 && ctx
->outer
->cancellable
)
8470 tree fndecl
= builtin_decl_explicit (BUILT_IN_GOMP_CANCEL
);
8471 tree c_bool_type
= TREE_TYPE (TREE_TYPE (fndecl
));
8472 tree lhs
= create_tmp_var (c_bool_type
, NULL
);
8473 gimple_omp_return_set_lhs (omp_return
, lhs
);
8474 tree fallthru_label
= create_artificial_label (UNKNOWN_LOCATION
);
8475 gimple g
= gimple_build_cond (NE_EXPR
, lhs
,
8476 fold_convert (c_bool_type
,
8477 boolean_false_node
),
8478 ctx
->outer
->cancel_label
, fallthru_label
);
8479 gimple_seq_add_stmt (body
, g
);
8480 gimple_seq_add_stmt (body
, gimple_build_label (fallthru_label
));
8484 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8485 CTX is the enclosing OMP context for the current statement. */
8488 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
8490 tree block
, control
;
8491 gimple_stmt_iterator tgsi
;
8492 gimple stmt
, new_stmt
, bind
, t
;
8493 gimple_seq ilist
, dlist
, olist
, new_body
;
8495 stmt
= gsi_stmt (*gsi_p
);
8497 push_gimplify_context ();
8501 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
8502 &ilist
, &dlist
, ctx
, NULL
);
8504 new_body
= gimple_omp_body (stmt
);
8505 gimple_omp_set_body (stmt
, NULL
);
8506 tgsi
= gsi_start (new_body
);
8507 for (; !gsi_end_p (tgsi
); gsi_next (&tgsi
))
8512 sec_start
= gsi_stmt (tgsi
);
8513 sctx
= maybe_lookup_ctx (sec_start
);
8516 lower_omp (gimple_omp_body_ptr (sec_start
), sctx
);
8517 gsi_insert_seq_after (&tgsi
, gimple_omp_body (sec_start
),
8518 GSI_CONTINUE_LINKING
);
8519 gimple_omp_set_body (sec_start
, NULL
);
8521 if (gsi_one_before_end_p (tgsi
))
8523 gimple_seq l
= NULL
;
8524 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
8526 gsi_insert_seq_after (&tgsi
, l
, GSI_CONTINUE_LINKING
);
8527 gimple_omp_section_set_last (sec_start
);
8530 gsi_insert_after (&tgsi
, gimple_build_omp_return (false),
8531 GSI_CONTINUE_LINKING
);
8534 block
= make_node (BLOCK
);
8535 bind
= gimple_build_bind (NULL
, new_body
, block
);
8538 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
8540 block
= make_node (BLOCK
);
8541 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
8542 gsi_replace (gsi_p
, new_stmt
, true);
8544 pop_gimplify_context (new_stmt
);
8545 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
8546 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
8547 if (BLOCK_VARS (block
))
8548 TREE_USED (block
) = 1;
8551 gimple_seq_add_seq (&new_body
, ilist
);
8552 gimple_seq_add_stmt (&new_body
, stmt
);
8553 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
8554 gimple_seq_add_stmt (&new_body
, bind
);
8556 control
= create_tmp_var (unsigned_type_node
, ".section");
8557 t
= gimple_build_omp_continue (control
, control
);
8558 gimple_omp_sections_set_control (stmt
, control
);
8559 gimple_seq_add_stmt (&new_body
, t
);
8561 gimple_seq_add_seq (&new_body
, olist
);
8562 if (ctx
->cancellable
)
8563 gimple_seq_add_stmt (&new_body
, gimple_build_label (ctx
->cancel_label
));
8564 gimple_seq_add_seq (&new_body
, dlist
);
8566 new_body
= maybe_catch_exception (new_body
);
8568 t
= gimple_build_omp_return
8569 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
8570 OMP_CLAUSE_NOWAIT
));
8571 gimple_seq_add_stmt (&new_body
, t
);
8572 maybe_add_implicit_barrier_cancel (ctx
, &new_body
);
8574 gimple_bind_set_body (new_stmt
, new_body
);
8578 /* A subroutine of lower_omp_single. Expand the simple form of
8579 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8581 if (GOMP_single_start ())
8583 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8585 FIXME. It may be better to delay expanding the logic of this until
8586 pass_expand_omp. The expanded logic may make the job more difficult
8587 to a synchronization analysis pass. */
8590 lower_omp_single_simple (gimple single_stmt
, gimple_seq
*pre_p
)
8592 location_t loc
= gimple_location (single_stmt
);
8593 tree tlabel
= create_artificial_label (loc
);
8594 tree flabel
= create_artificial_label (loc
);
8598 decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START
);
8599 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)), NULL
);
8600 call
= gimple_build_call (decl
, 0);
8601 gimple_call_set_lhs (call
, lhs
);
8602 gimple_seq_add_stmt (pre_p
, call
);
8604 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
8605 fold_convert_loc (loc
, TREE_TYPE (lhs
),
8608 gimple_seq_add_stmt (pre_p
, cond
);
8609 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
8610 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
8611 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
8615 /* A subroutine of lower_omp_single. Expand the simple form of
8616 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
8618 #pragma omp single copyprivate (a, b, c)
8620 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
8623 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
8629 GOMP_single_copy_end (©out);
8640 FIXME. It may be better to delay expanding the logic of this until
8641 pass_expand_omp. The expanded logic may make the job more difficult
8642 to a synchronization analysis pass. */
8645 lower_omp_single_copy (gimple single_stmt
, gimple_seq
*pre_p
, omp_context
*ctx
)
8647 tree ptr_type
, t
, l0
, l1
, l2
, bfn_decl
;
8648 gimple_seq copyin_seq
;
8649 location_t loc
= gimple_location (single_stmt
);
8651 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
8653 ptr_type
= build_pointer_type (ctx
->record_type
);
8654 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
8656 l0
= create_artificial_label (loc
);
8657 l1
= create_artificial_label (loc
);
8658 l2
= create_artificial_label (loc
);
8660 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START
);
8661 t
= build_call_expr_loc (loc
, bfn_decl
, 0);
8662 t
= fold_convert_loc (loc
, ptr_type
, t
);
8663 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
8665 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
8666 build_int_cst (ptr_type
, 0));
8667 t
= build3 (COND_EXPR
, void_type_node
, t
,
8668 build_and_jump (&l0
), build_and_jump (&l1
));
8669 gimplify_and_add (t
, pre_p
);
8671 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
8673 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
8676 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
8679 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
8680 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END
);
8681 t
= build_call_expr_loc (loc
, bfn_decl
, 1, t
);
8682 gimplify_and_add (t
, pre_p
);
8684 t
= build_and_jump (&l2
);
8685 gimplify_and_add (t
, pre_p
);
8687 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
8689 gimple_seq_add_seq (pre_p
, copyin_seq
);
8691 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
8695 /* Expand code for an OpenMP single directive. */
8698 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
8701 gimple t
, bind
, single_stmt
= gsi_stmt (*gsi_p
);
8702 gimple_seq bind_body
, bind_body_tail
= NULL
, dlist
;
8704 push_gimplify_context ();
8706 block
= make_node (BLOCK
);
8707 bind
= gimple_build_bind (NULL
, NULL
, block
);
8708 gsi_replace (gsi_p
, bind
, true);
8711 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
8712 &bind_body
, &dlist
, ctx
, NULL
);
8713 lower_omp (gimple_omp_body_ptr (single_stmt
), ctx
);
8715 gimple_seq_add_stmt (&bind_body
, single_stmt
);
8717 if (ctx
->record_type
)
8718 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
8720 lower_omp_single_simple (single_stmt
, &bind_body
);
8722 gimple_omp_set_body (single_stmt
, NULL
);
8724 gimple_seq_add_seq (&bind_body
, dlist
);
8726 bind_body
= maybe_catch_exception (bind_body
);
8728 t
= gimple_build_omp_return
8729 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
8730 OMP_CLAUSE_NOWAIT
));
8731 gimple_seq_add_stmt (&bind_body_tail
, t
);
8732 maybe_add_implicit_barrier_cancel (ctx
, &bind_body_tail
);
8733 if (ctx
->record_type
)
8735 gimple_stmt_iterator gsi
= gsi_start (bind_body_tail
);
8736 tree clobber
= build_constructor (ctx
->record_type
, NULL
);
8737 TREE_THIS_VOLATILE (clobber
) = 1;
8738 gsi_insert_after (&gsi
, gimple_build_assign (ctx
->sender_decl
,
8739 clobber
), GSI_SAME_STMT
);
8741 gimple_seq_add_seq (&bind_body
, bind_body_tail
);
8742 gimple_bind_set_body (bind
, bind_body
);
8744 pop_gimplify_context (bind
);
8746 gimple_bind_append_vars (bind
, ctx
->block_vars
);
8747 BLOCK_VARS (block
) = ctx
->block_vars
;
8748 if (BLOCK_VARS (block
))
8749 TREE_USED (block
) = 1;
8753 /* Expand code for an OpenMP master directive. */
8756 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
8758 tree block
, lab
= NULL
, x
, bfn_decl
;
8759 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
8760 location_t loc
= gimple_location (stmt
);
8763 push_gimplify_context ();
8765 block
= make_node (BLOCK
);
8766 bind
= gimple_build_bind (NULL
, NULL
, block
);
8767 gsi_replace (gsi_p
, bind
, true);
8768 gimple_bind_add_stmt (bind
, stmt
);
8770 bfn_decl
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
8771 x
= build_call_expr_loc (loc
, bfn_decl
, 0);
8772 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
8773 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
8775 gimplify_and_add (x
, &tseq
);
8776 gimple_bind_add_seq (bind
, tseq
);
8778 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
8779 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
8780 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
8781 gimple_omp_set_body (stmt
, NULL
);
8783 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
8785 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
8787 pop_gimplify_context (bind
);
8789 gimple_bind_append_vars (bind
, ctx
->block_vars
);
8790 BLOCK_VARS (block
) = ctx
->block_vars
;
8794 /* Expand code for an OpenMP taskgroup directive. */
8797 lower_omp_taskgroup (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
8799 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
8800 tree block
= make_node (BLOCK
);
8802 bind
= gimple_build_bind (NULL
, NULL
, block
);
8803 gsi_replace (gsi_p
, bind
, true);
8804 gimple_bind_add_stmt (bind
, stmt
);
8806 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START
),
8808 gimple_bind_add_stmt (bind
, x
);
8810 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
8811 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
8812 gimple_omp_set_body (stmt
, NULL
);
8814 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
8816 gimple_bind_append_vars (bind
, ctx
->block_vars
);
8817 BLOCK_VARS (block
) = ctx
->block_vars
;
8821 /* Expand code for an OpenMP ordered directive. */
8824 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
8827 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
8829 push_gimplify_context ();
8831 block
= make_node (BLOCK
);
8832 bind
= gimple_build_bind (NULL
, NULL
, block
);
8833 gsi_replace (gsi_p
, bind
, true);
8834 gimple_bind_add_stmt (bind
, stmt
);
8836 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START
),
8838 gimple_bind_add_stmt (bind
, x
);
8840 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
8841 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
8842 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
8843 gimple_omp_set_body (stmt
, NULL
);
8845 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END
), 0);
8846 gimple_bind_add_stmt (bind
, x
);
8848 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
8850 pop_gimplify_context (bind
);
8852 gimple_bind_append_vars (bind
, ctx
->block_vars
);
8853 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
8857 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
8858 substitution of a couple of function calls. But in the NAMED case,
8859 requires that languages coordinate a symbol name. It is therefore
8860 best put here in common code. */
8862 static GTY((param1_is (tree
), param2_is (tree
)))
8863 splay_tree critical_name_mutexes
;
8866 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
8869 tree name
, lock
, unlock
;
8870 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
8871 location_t loc
= gimple_location (stmt
);
8874 name
= gimple_omp_critical_name (stmt
);
8880 if (!critical_name_mutexes
)
8881 critical_name_mutexes
8882 = splay_tree_new_ggc (splay_tree_compare_pointers
,
8883 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s
,
8884 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s
);
8886 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
8891 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
8893 new_str
= ACONCAT ((".gomp_critical_user_",
8894 IDENTIFIER_POINTER (name
), NULL
));
8895 DECL_NAME (decl
) = get_identifier (new_str
);
8896 TREE_PUBLIC (decl
) = 1;
8897 TREE_STATIC (decl
) = 1;
8898 DECL_COMMON (decl
) = 1;
8899 DECL_ARTIFICIAL (decl
) = 1;
8900 DECL_IGNORED_P (decl
) = 1;
8901 varpool_finalize_decl (decl
);
8903 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
8904 (splay_tree_value
) decl
);
8907 decl
= (tree
) n
->value
;
8909 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START
);
8910 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
8912 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END
);
8913 unlock
= build_call_expr_loc (loc
, unlock
, 1,
8914 build_fold_addr_expr_loc (loc
, decl
));
8918 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START
);
8919 lock
= build_call_expr_loc (loc
, lock
, 0);
8921 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END
);
8922 unlock
= build_call_expr_loc (loc
, unlock
, 0);
8925 push_gimplify_context ();
8927 block
= make_node (BLOCK
);
8928 bind
= gimple_build_bind (NULL
, NULL
, block
);
8929 gsi_replace (gsi_p
, bind
, true);
8930 gimple_bind_add_stmt (bind
, stmt
);
8932 tbody
= gimple_bind_body (bind
);
8933 gimplify_and_add (lock
, &tbody
);
8934 gimple_bind_set_body (bind
, tbody
);
8936 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
8937 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
8938 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
8939 gimple_omp_set_body (stmt
, NULL
);
8941 tbody
= gimple_bind_body (bind
);
8942 gimplify_and_add (unlock
, &tbody
);
8943 gimple_bind_set_body (bind
, tbody
);
8945 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
8947 pop_gimplify_context (bind
);
8948 gimple_bind_append_vars (bind
, ctx
->block_vars
);
8949 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
8953 /* A subroutine of lower_omp_for. Generate code to emit the predicate
8954 for a lastprivate clause. Given a loop control predicate of (V
8955 cond N2), we gate the clause on (!(V cond N2)). The lowered form
8956 is appended to *DLIST, iterator initialization is appended to
8960 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
8961 gimple_seq
*dlist
, struct omp_context
*ctx
)
8963 tree clauses
, cond
, vinit
;
8964 enum tree_code cond_code
;
8967 cond_code
= fd
->loop
.cond_code
;
8968 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
8970 /* When possible, use a strict equality expression. This can let VRP
8971 type optimizations deduce the value and remove a copy. */
8972 if (tree_fits_shwi_p (fd
->loop
.step
))
8974 HOST_WIDE_INT step
= tree_to_shwi (fd
->loop
.step
);
8975 if (step
== 1 || step
== -1)
8976 cond_code
= EQ_EXPR
;
8979 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, fd
->loop
.n2
);
8981 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
8983 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
8984 if (!gimple_seq_empty_p (stmts
))
8986 gimple_seq_add_seq (&stmts
, *dlist
);
8989 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
8990 vinit
= fd
->loop
.n1
;
8991 if (cond_code
== EQ_EXPR
8992 && tree_fits_shwi_p (fd
->loop
.n2
)
8993 && ! integer_zerop (fd
->loop
.n2
))
8994 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
8996 vinit
= unshare_expr (vinit
);
8998 /* Initialize the iterator variable, so that threads that don't execute
8999 any iterations don't execute the lastprivate clauses by accident. */
9000 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
9005 /* Lower code for an OpenMP loop directive. */
9008 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
9011 struct omp_for_data fd
, *fdp
= NULL
;
9012 gimple stmt
= gsi_stmt (*gsi_p
), new_stmt
;
9013 gimple_seq omp_for_body
, body
, dlist
;
9016 push_gimplify_context ();
9018 lower_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
9020 block
= make_node (BLOCK
);
9021 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
9022 /* Replace at gsi right away, so that 'stmt' is no member
9023 of a sequence anymore as we're going to add to to a different
9025 gsi_replace (gsi_p
, new_stmt
, true);
9027 /* Move declaration of temporaries in the loop body before we make
9029 omp_for_body
= gimple_omp_body (stmt
);
9030 if (!gimple_seq_empty_p (omp_for_body
)
9031 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
9033 gimple inner_bind
= gimple_seq_first_stmt (omp_for_body
);
9034 tree vars
= gimple_bind_vars (inner_bind
);
9035 gimple_bind_append_vars (new_stmt
, vars
);
9036 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
9037 keep them on the inner_bind and it's block. */
9038 gimple_bind_set_vars (inner_bind
, NULL_TREE
);
9039 if (gimple_bind_block (inner_bind
))
9040 BLOCK_VARS (gimple_bind_block (inner_bind
)) = NULL_TREE
;
9043 if (gimple_omp_for_combined_into_p (stmt
))
9045 extract_omp_for_data (stmt
, &fd
, NULL
);
9048 /* We need two temporaries with fd.loop.v type (istart/iend)
9049 and then (fd.collapse - 1) temporaries with the same
9050 type for count2 ... countN-1 vars if not constant. */
9052 tree type
= fd
.iter_type
;
9054 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
9055 count
+= fd
.collapse
- 1;
9056 bool parallel_for
= gimple_omp_for_kind (stmt
) == GF_OMP_FOR_KIND_FOR
;
9057 tree outerc
= NULL
, *pc
= gimple_omp_for_clauses_ptr (stmt
);
9061 = find_omp_clause (gimple_omp_parallel_clauses (ctx
->outer
->stmt
),
9062 OMP_CLAUSE__LOOPTEMP_
);
9063 for (i
= 0; i
< count
; i
++)
9068 gcc_assert (outerc
);
9069 temp
= lookup_decl (OMP_CLAUSE_DECL (outerc
), ctx
->outer
);
9070 outerc
= find_omp_clause (OMP_CLAUSE_CHAIN (outerc
),
9071 OMP_CLAUSE__LOOPTEMP_
);
9075 temp
= create_tmp_var (type
, NULL
);
9076 insert_decl_map (&ctx
->outer
->cb
, temp
, temp
);
9078 *pc
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE__LOOPTEMP_
);
9079 OMP_CLAUSE_DECL (*pc
) = temp
;
9080 pc
= &OMP_CLAUSE_CHAIN (*pc
);
9085 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9088 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
,
9090 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
9092 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
9094 /* Lower the header expressions. At this point, we can assume that
9095 the header is of the form:
9097 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9099 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9100 using the .omp_data_s mapping, if needed. */
9101 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
9103 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
9104 if (!is_gimple_min_invariant (*rhs_p
))
9105 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
9107 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
9108 if (!is_gimple_min_invariant (*rhs_p
))
9109 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
9111 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
9112 if (!is_gimple_min_invariant (*rhs_p
))
9113 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
9116 /* Once lowered, extract the bounds and clauses. */
9117 extract_omp_for_data (stmt
, &fd
, NULL
);
9119 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
9121 gimple_seq_add_stmt (&body
, stmt
);
9122 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
9124 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
9127 /* After the loop, add exit clauses. */
9128 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
9130 if (ctx
->cancellable
)
9131 gimple_seq_add_stmt (&body
, gimple_build_label (ctx
->cancel_label
));
9133 gimple_seq_add_seq (&body
, dlist
);
9135 body
= maybe_catch_exception (body
);
9137 /* Region exit marker goes at the end of the loop body. */
9138 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
9139 maybe_add_implicit_barrier_cancel (ctx
, &body
);
9140 pop_gimplify_context (new_stmt
);
9142 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
9143 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
9144 if (BLOCK_VARS (block
))
9145 TREE_USED (block
) = 1;
9147 gimple_bind_set_body (new_stmt
, body
);
9148 gimple_omp_set_body (stmt
, NULL
);
9149 gimple_omp_for_set_pre_body (stmt
, NULL
);
9152 /* Callback for walk_stmts. Check if the current statement only contains
9153 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9156 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
9157 bool *handled_ops_p
,
9158 struct walk_stmt_info
*wi
)
9160 int *info
= (int *) wi
->info
;
9161 gimple stmt
= gsi_stmt (*gsi_p
);
9163 *handled_ops_p
= true;
9164 switch (gimple_code (stmt
))
9168 case GIMPLE_OMP_FOR
:
9169 case GIMPLE_OMP_SECTIONS
:
9170 *info
= *info
== 0 ? 1 : -1;
9179 struct omp_taskcopy_context
9181 /* This field must be at the beginning, as we do "inheritance": Some
9182 callback functions for tree-inline.c (e.g., omp_copy_decl)
9183 receive a copy_body_data pointer that is up-casted to an
9184 omp_context pointer. */
9190 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
9192 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
9194 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
9195 return create_tmp_var (TREE_TYPE (var
), NULL
);
9201 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
9203 tree name
, new_fields
= NULL
, type
, f
;
9205 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
9206 name
= DECL_NAME (TYPE_NAME (orig_type
));
9207 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
9208 TYPE_DECL
, name
, type
);
9209 TYPE_NAME (type
) = name
;
9211 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
9213 tree new_f
= copy_node (f
);
9214 DECL_CONTEXT (new_f
) = type
;
9215 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
9216 TREE_CHAIN (new_f
) = new_fields
;
9217 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
9218 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
9219 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
9222 *pointer_map_insert (tcctx
->cb
.decl_map
, f
) = new_f
;
9224 TYPE_FIELDS (type
) = nreverse (new_fields
);
9229 /* Create task copyfn. */
9232 create_task_copyfn (gimple task_stmt
, omp_context
*ctx
)
9234 struct function
*child_cfun
;
9235 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
9236 tree record_type
, srecord_type
, bind
, list
;
9237 bool record_needs_remap
= false, srecord_needs_remap
= false;
9239 struct omp_taskcopy_context tcctx
;
9240 location_t loc
= gimple_location (task_stmt
);
9242 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
9243 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
9244 gcc_assert (child_cfun
->cfg
== NULL
);
9245 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
9247 /* Reset DECL_CONTEXT on function arguments. */
9248 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
9249 DECL_CONTEXT (t
) = child_fn
;
9251 /* Populate the function. */
9252 push_gimplify_context ();
9253 push_cfun (child_cfun
);
9255 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
9256 TREE_SIDE_EFFECTS (bind
) = 1;
9258 DECL_SAVED_TREE (child_fn
) = bind
;
9259 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
9261 /* Remap src and dst argument types if needed. */
9262 record_type
= ctx
->record_type
;
9263 srecord_type
= ctx
->srecord_type
;
9264 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
9265 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
9267 record_needs_remap
= true;
9270 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= DECL_CHAIN (f
))
9271 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
9273 srecord_needs_remap
= true;
9277 if (record_needs_remap
|| srecord_needs_remap
)
9279 memset (&tcctx
, '\0', sizeof (tcctx
));
9280 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
9281 tcctx
.cb
.dst_fn
= child_fn
;
9282 tcctx
.cb
.src_node
= cgraph_get_node (tcctx
.cb
.src_fn
);
9283 gcc_checking_assert (tcctx
.cb
.src_node
);
9284 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
9285 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
9286 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
9287 tcctx
.cb
.eh_lp_nr
= 0;
9288 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
9289 tcctx
.cb
.decl_map
= pointer_map_create ();
9292 if (record_needs_remap
)
9293 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
9294 if (srecord_needs_remap
)
9295 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
9298 tcctx
.cb
.decl_map
= NULL
;
9300 arg
= DECL_ARGUMENTS (child_fn
);
9301 TREE_TYPE (arg
) = build_pointer_type (record_type
);
9302 sarg
= DECL_CHAIN (arg
);
9303 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
9305 /* First pass: initialize temporaries used in record_type and srecord_type
9306 sizes and field offsets. */
9307 if (tcctx
.cb
.decl_map
)
9308 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
9309 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
9313 decl
= OMP_CLAUSE_DECL (c
);
9314 p
= (tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, decl
);
9317 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
9318 sf
= (tree
) n
->value
;
9319 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
9320 src
= build_simple_mem_ref_loc (loc
, sarg
);
9321 src
= omp_build_component_ref (src
, sf
);
9322 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
9323 append_to_statement_list (t
, &list
);
9326 /* Second pass: copy shared var pointers and copy construct non-VLA
9327 firstprivate vars. */
9328 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
9329 switch (OMP_CLAUSE_CODE (c
))
9331 case OMP_CLAUSE_SHARED
:
9332 decl
= OMP_CLAUSE_DECL (c
);
9333 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
9336 f
= (tree
) n
->value
;
9337 if (tcctx
.cb
.decl_map
)
9338 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
9339 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
9340 sf
= (tree
) n
->value
;
9341 if (tcctx
.cb
.decl_map
)
9342 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
9343 src
= build_simple_mem_ref_loc (loc
, sarg
);
9344 src
= omp_build_component_ref (src
, sf
);
9345 dst
= build_simple_mem_ref_loc (loc
, arg
);
9346 dst
= omp_build_component_ref (dst
, f
);
9347 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
9348 append_to_statement_list (t
, &list
);
9350 case OMP_CLAUSE_FIRSTPRIVATE
:
9351 decl
= OMP_CLAUSE_DECL (c
);
9352 if (is_variable_sized (decl
))
9354 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
9357 f
= (tree
) n
->value
;
9358 if (tcctx
.cb
.decl_map
)
9359 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
9360 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
9363 sf
= (tree
) n
->value
;
9364 if (tcctx
.cb
.decl_map
)
9365 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
9366 src
= build_simple_mem_ref_loc (loc
, sarg
);
9367 src
= omp_build_component_ref (src
, sf
);
9368 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
9369 src
= build_simple_mem_ref_loc (loc
, src
);
9373 dst
= build_simple_mem_ref_loc (loc
, arg
);
9374 dst
= omp_build_component_ref (dst
, f
);
9375 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
9376 append_to_statement_list (t
, &list
);
9378 case OMP_CLAUSE_PRIVATE
:
9379 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
9381 decl
= OMP_CLAUSE_DECL (c
);
9382 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
9383 f
= (tree
) n
->value
;
9384 if (tcctx
.cb
.decl_map
)
9385 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
9386 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
9389 sf
= (tree
) n
->value
;
9390 if (tcctx
.cb
.decl_map
)
9391 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
9392 src
= build_simple_mem_ref_loc (loc
, sarg
);
9393 src
= omp_build_component_ref (src
, sf
);
9394 if (use_pointer_for_field (decl
, NULL
))
9395 src
= build_simple_mem_ref_loc (loc
, src
);
9399 dst
= build_simple_mem_ref_loc (loc
, arg
);
9400 dst
= omp_build_component_ref (dst
, f
);
9401 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
9402 append_to_statement_list (t
, &list
);
9408 /* Last pass: handle VLA firstprivates. */
9409 if (tcctx
.cb
.decl_map
)
9410 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
9411 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
9415 decl
= OMP_CLAUSE_DECL (c
);
9416 if (!is_variable_sized (decl
))
9418 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
9421 f
= (tree
) n
->value
;
9422 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
9423 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
9424 ind
= DECL_VALUE_EXPR (decl
);
9425 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
9426 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
9427 n
= splay_tree_lookup (ctx
->sfield_map
,
9428 (splay_tree_key
) TREE_OPERAND (ind
, 0));
9429 sf
= (tree
) n
->value
;
9430 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
9431 src
= build_simple_mem_ref_loc (loc
, sarg
);
9432 src
= omp_build_component_ref (src
, sf
);
9433 src
= build_simple_mem_ref_loc (loc
, src
);
9434 dst
= build_simple_mem_ref_loc (loc
, arg
);
9435 dst
= omp_build_component_ref (dst
, f
);
9436 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
9437 append_to_statement_list (t
, &list
);
9438 n
= splay_tree_lookup (ctx
->field_map
,
9439 (splay_tree_key
) TREE_OPERAND (ind
, 0));
9440 df
= (tree
) n
->value
;
9441 df
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, df
);
9442 ptr
= build_simple_mem_ref_loc (loc
, arg
);
9443 ptr
= omp_build_component_ref (ptr
, df
);
9444 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
9445 build_fold_addr_expr_loc (loc
, dst
));
9446 append_to_statement_list (t
, &list
);
9449 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
9450 append_to_statement_list (t
, &list
);
9452 if (tcctx
.cb
.decl_map
)
9453 pointer_map_destroy (tcctx
.cb
.decl_map
);
9454 pop_gimplify_context (NULL
);
9455 BIND_EXPR_BODY (bind
) = list
;
9460 lower_depend_clauses (gimple stmt
, gimple_seq
*iseq
, gimple_seq
*oseq
)
9464 size_t n_in
= 0, n_out
= 0, idx
= 2, i
;
9466 clauses
= find_omp_clause (gimple_omp_task_clauses (stmt
),
9468 gcc_assert (clauses
);
9469 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
9470 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_DEPEND
)
9471 switch (OMP_CLAUSE_DEPEND_KIND (c
))
9473 case OMP_CLAUSE_DEPEND_IN
:
9476 case OMP_CLAUSE_DEPEND_OUT
:
9477 case OMP_CLAUSE_DEPEND_INOUT
:
9483 tree type
= build_array_type_nelts (ptr_type_node
, n_in
+ n_out
+ 2);
9484 tree array
= create_tmp_var (type
, NULL
);
9485 tree r
= build4 (ARRAY_REF
, ptr_type_node
, array
, size_int (0), NULL_TREE
,
9487 g
= gimple_build_assign (r
, build_int_cst (ptr_type_node
, n_in
+ n_out
));
9488 gimple_seq_add_stmt (iseq
, g
);
9489 r
= build4 (ARRAY_REF
, ptr_type_node
, array
, size_int (1), NULL_TREE
,
9491 g
= gimple_build_assign (r
, build_int_cst (ptr_type_node
, n_out
));
9492 gimple_seq_add_stmt (iseq
, g
);
9493 for (i
= 0; i
< 2; i
++)
9495 if ((i
? n_in
: n_out
) == 0)
9497 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
9498 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_DEPEND
9499 && ((OMP_CLAUSE_DEPEND_KIND (c
) != OMP_CLAUSE_DEPEND_IN
) ^ i
))
9501 tree t
= OMP_CLAUSE_DECL (c
);
9502 t
= fold_convert (ptr_type_node
, t
);
9503 gimplify_expr (&t
, iseq
, NULL
, is_gimple_val
, fb_rvalue
);
9504 r
= build4 (ARRAY_REF
, ptr_type_node
, array
, size_int (idx
++),
9505 NULL_TREE
, NULL_TREE
);
9506 g
= gimple_build_assign (r
, t
);
9507 gimple_seq_add_stmt (iseq
, g
);
9510 tree
*p
= gimple_omp_task_clauses_ptr (stmt
);
9511 c
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE_DEPEND
);
9512 OMP_CLAUSE_DECL (c
) = build_fold_addr_expr (array
);
9513 OMP_CLAUSE_CHAIN (c
) = *p
;
9515 tree clobber
= build_constructor (type
, NULL
);
9516 TREE_THIS_VOLATILE (clobber
) = 1;
9517 g
= gimple_build_assign (array
, clobber
);
9518 gimple_seq_add_stmt (oseq
, g
);
9521 /* Lower the OpenMP parallel or task directive in the current statement
9522 in GSI_P. CTX holds context information for the directive. */
9525 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
9529 gimple stmt
= gsi_stmt (*gsi_p
);
9530 gimple par_bind
, bind
, dep_bind
= NULL
;
9531 gimple_seq par_body
, olist
, ilist
, par_olist
, par_rlist
, par_ilist
, new_body
;
9532 location_t loc
= gimple_location (stmt
);
9534 clauses
= gimple_omp_taskreg_clauses (stmt
);
9535 par_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
9536 par_body
= gimple_bind_body (par_bind
);
9537 child_fn
= ctx
->cb
.dst_fn
;
9538 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
9539 && !gimple_omp_parallel_combined_p (stmt
))
9541 struct walk_stmt_info wi
;
9544 memset (&wi
, 0, sizeof (wi
));
9547 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
9549 gimple_omp_parallel_set_combined_p (stmt
, true);
9551 gimple_seq dep_ilist
= NULL
;
9552 gimple_seq dep_olist
= NULL
;
9553 if (gimple_code (stmt
) == GIMPLE_OMP_TASK
9554 && find_omp_clause (clauses
, OMP_CLAUSE_DEPEND
))
9556 push_gimplify_context ();
9557 dep_bind
= gimple_build_bind (NULL
, NULL
, make_node (BLOCK
));
9558 lower_depend_clauses (stmt
, &dep_ilist
, &dep_olist
);
9561 if (ctx
->srecord_type
)
9562 create_task_copyfn (stmt
, ctx
);
9564 push_gimplify_context ();
9569 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
, NULL
);
9570 lower_omp (&par_body
, ctx
);
9571 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
9572 lower_reduction_clauses (clauses
, &par_rlist
, ctx
);
9574 /* Declare all the variables created by mapping and the variables
9575 declared in the scope of the parallel body. */
9576 record_vars_into (ctx
->block_vars
, child_fn
);
9577 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
9579 if (ctx
->record_type
)
9582 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
9583 : ctx
->record_type
, ".omp_data_o");
9584 DECL_NAMELESS (ctx
->sender_decl
) = 1;
9585 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
9586 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
9591 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
9592 lower_send_shared_vars (&ilist
, &olist
, ctx
);
9594 if (ctx
->record_type
)
9596 tree clobber
= build_constructor (TREE_TYPE (ctx
->sender_decl
), NULL
);
9597 TREE_THIS_VOLATILE (clobber
) = 1;
9598 gimple_seq_add_stmt (&olist
, gimple_build_assign (ctx
->sender_decl
,
9602 /* Once all the expansions are done, sequence all the different
9603 fragments inside gimple_omp_body. */
9607 if (ctx
->record_type
)
9609 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
9610 /* fixup_child_record_type might have changed receiver_decl's type. */
9611 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
9612 gimple_seq_add_stmt (&new_body
,
9613 gimple_build_assign (ctx
->receiver_decl
, t
));
9616 gimple_seq_add_seq (&new_body
, par_ilist
);
9617 gimple_seq_add_seq (&new_body
, par_body
);
9618 gimple_seq_add_seq (&new_body
, par_rlist
);
9619 if (ctx
->cancellable
)
9620 gimple_seq_add_stmt (&new_body
, gimple_build_label (ctx
->cancel_label
));
9621 gimple_seq_add_seq (&new_body
, par_olist
);
9622 new_body
= maybe_catch_exception (new_body
);
9623 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
9624 gimple_omp_set_body (stmt
, new_body
);
9626 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
9627 gsi_replace (gsi_p
, dep_bind
? dep_bind
: bind
, true);
9628 gimple_bind_add_seq (bind
, ilist
);
9629 gimple_bind_add_stmt (bind
, stmt
);
9630 gimple_bind_add_seq (bind
, olist
);
9632 pop_gimplify_context (NULL
);
9636 gimple_bind_add_seq (dep_bind
, dep_ilist
);
9637 gimple_bind_add_stmt (dep_bind
, bind
);
9638 gimple_bind_add_seq (dep_bind
, dep_olist
);
9639 pop_gimplify_context (dep_bind
);
9643 /* Lower the OpenMP target directive in the current statement
9644 in GSI_P. CTX holds context information for the directive. */
9647 lower_omp_target (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
9650 tree child_fn
, t
, c
;
9651 gimple stmt
= gsi_stmt (*gsi_p
);
9652 gimple tgt_bind
= NULL
, bind
;
9653 gimple_seq tgt_body
= NULL
, olist
, ilist
, new_body
;
9654 location_t loc
= gimple_location (stmt
);
9655 int kind
= gimple_omp_target_kind (stmt
);
9656 unsigned int map_cnt
= 0;
9658 clauses
= gimple_omp_target_clauses (stmt
);
9659 if (kind
== GF_OMP_TARGET_KIND_REGION
)
9661 tgt_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
9662 tgt_body
= gimple_bind_body (tgt_bind
);
9664 else if (kind
== GF_OMP_TARGET_KIND_DATA
)
9665 tgt_body
= gimple_omp_body (stmt
);
9666 child_fn
= ctx
->cb
.dst_fn
;
9668 push_gimplify_context ();
9670 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
9671 switch (OMP_CLAUSE_CODE (c
))
9677 case OMP_CLAUSE_MAP
:
9679 case OMP_CLAUSE_FROM
:
9680 var
= OMP_CLAUSE_DECL (c
);
9683 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_MAP
9684 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
))
9690 && TREE_CODE (DECL_SIZE (var
)) != INTEGER_CST
)
9692 tree var2
= DECL_VALUE_EXPR (var
);
9693 gcc_assert (TREE_CODE (var2
) == INDIRECT_REF
);
9694 var2
= TREE_OPERAND (var2
, 0);
9695 gcc_assert (DECL_P (var2
));
9699 if (!maybe_lookup_field (var
, ctx
))
9702 if (kind
== GF_OMP_TARGET_KIND_REGION
)
9704 x
= build_receiver_ref (var
, true, ctx
);
9705 tree new_var
= lookup_decl (var
, ctx
);
9706 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
9707 && OMP_CLAUSE_MAP_KIND (c
) == OMP_CLAUSE_MAP_POINTER
9708 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
)
9709 && TREE_CODE (TREE_TYPE (var
)) == ARRAY_TYPE
)
9710 x
= build_simple_mem_ref (x
);
9711 SET_DECL_VALUE_EXPR (new_var
, x
);
9712 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
9717 if (kind
== GF_OMP_TARGET_KIND_REGION
)
9719 target_nesting_level
++;
9720 lower_omp (&tgt_body
, ctx
);
9721 target_nesting_level
--;
9723 else if (kind
== GF_OMP_TARGET_KIND_DATA
)
9724 lower_omp (&tgt_body
, ctx
);
9726 if (kind
== GF_OMP_TARGET_KIND_REGION
)
9728 /* Declare all the variables created by mapping and the variables
9729 declared in the scope of the target body. */
9730 record_vars_into (ctx
->block_vars
, child_fn
);
9731 record_vars_into (gimple_bind_vars (tgt_bind
), child_fn
);
9736 if (ctx
->record_type
)
9739 = create_tmp_var (ctx
->record_type
, ".omp_data_arr");
9740 DECL_NAMELESS (ctx
->sender_decl
) = 1;
9741 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
9742 t
= make_tree_vec (3);
9743 TREE_VEC_ELT (t
, 0) = ctx
->sender_decl
;
9745 = create_tmp_var (build_array_type_nelts (size_type_node
, map_cnt
),
9747 DECL_NAMELESS (TREE_VEC_ELT (t
, 1)) = 1;
9748 TREE_ADDRESSABLE (TREE_VEC_ELT (t
, 1)) = 1;
9749 TREE_STATIC (TREE_VEC_ELT (t
, 1)) = 1;
9751 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node
,
9754 DECL_NAMELESS (TREE_VEC_ELT (t
, 2)) = 1;
9755 TREE_ADDRESSABLE (TREE_VEC_ELT (t
, 2)) = 1;
9756 TREE_STATIC (TREE_VEC_ELT (t
, 2)) = 1;
9757 gimple_omp_target_set_data_arg (stmt
, t
);
9759 vec
<constructor_elt
, va_gc
> *vsize
;
9760 vec
<constructor_elt
, va_gc
> *vkind
;
9761 vec_alloc (vsize
, map_cnt
);
9762 vec_alloc (vkind
, map_cnt
);
9763 unsigned int map_idx
= 0;
9765 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
9766 switch (OMP_CLAUSE_CODE (c
))
9772 case OMP_CLAUSE_MAP
:
9774 case OMP_CLAUSE_FROM
:
9776 ovar
= OMP_CLAUSE_DECL (c
);
9779 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
9780 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
))
9782 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c
))
9783 == get_base_address (ovar
));
9784 nc
= OMP_CLAUSE_CHAIN (c
);
9785 ovar
= OMP_CLAUSE_DECL (nc
);
9789 tree x
= build_sender_ref (ovar
, ctx
);
9791 = build_fold_addr_expr_with_type (ovar
, ptr_type_node
);
9792 gimplify_assign (x
, v
, &ilist
);
9798 if (DECL_SIZE (ovar
)
9799 && TREE_CODE (DECL_SIZE (ovar
)) != INTEGER_CST
)
9801 tree ovar2
= DECL_VALUE_EXPR (ovar
);
9802 gcc_assert (TREE_CODE (ovar2
) == INDIRECT_REF
);
9803 ovar2
= TREE_OPERAND (ovar2
, 0);
9804 gcc_assert (DECL_P (ovar2
));
9807 if (!maybe_lookup_field (ovar
, ctx
))
9813 tree var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
9814 tree x
= build_sender_ref (ovar
, ctx
);
9815 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_MAP
9816 && OMP_CLAUSE_MAP_KIND (c
) == OMP_CLAUSE_MAP_POINTER
9817 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c
)
9818 && TREE_CODE (TREE_TYPE (ovar
)) == ARRAY_TYPE
)
9820 gcc_assert (kind
== GF_OMP_TARGET_KIND_REGION
);
9822 = create_tmp_var (TREE_TYPE (TREE_TYPE (x
)), NULL
);
9823 mark_addressable (avar
);
9824 gimplify_assign (avar
, build_fold_addr_expr (var
), &ilist
);
9825 avar
= build_fold_addr_expr (avar
);
9826 gimplify_assign (x
, avar
, &ilist
);
9828 else if (is_gimple_reg (var
))
9830 gcc_assert (kind
== GF_OMP_TARGET_KIND_REGION
);
9831 tree avar
= create_tmp_var (TREE_TYPE (var
), NULL
);
9832 mark_addressable (avar
);
9833 if (OMP_CLAUSE_MAP_KIND (c
) != OMP_CLAUSE_MAP_ALLOC
9834 && OMP_CLAUSE_MAP_KIND (c
) != OMP_CLAUSE_MAP_FROM
)
9835 gimplify_assign (avar
, var
, &ilist
);
9836 avar
= build_fold_addr_expr (avar
);
9837 gimplify_assign (x
, avar
, &ilist
);
9838 if ((OMP_CLAUSE_MAP_KIND (c
) == OMP_CLAUSE_MAP_FROM
9839 || OMP_CLAUSE_MAP_KIND (c
) == OMP_CLAUSE_MAP_TOFROM
)
9840 && !TYPE_READONLY (TREE_TYPE (var
)))
9842 x
= build_sender_ref (ovar
, ctx
);
9843 x
= build_simple_mem_ref (x
);
9844 gimplify_assign (var
, x
, &olist
);
9849 var
= build_fold_addr_expr (var
);
9850 gimplify_assign (x
, var
, &ilist
);
9853 tree s
= OMP_CLAUSE_SIZE (c
);
9855 s
= TYPE_SIZE_UNIT (TREE_TYPE (ovar
));
9856 s
= fold_convert (size_type_node
, s
);
9857 tree purpose
= size_int (map_idx
++);
9858 CONSTRUCTOR_APPEND_ELT (vsize
, purpose
, s
);
9859 if (TREE_CODE (s
) != INTEGER_CST
)
9860 TREE_STATIC (TREE_VEC_ELT (t
, 1)) = 0;
9862 unsigned char tkind
= 0;
9863 switch (OMP_CLAUSE_CODE (c
))
9865 case OMP_CLAUSE_MAP
:
9866 tkind
= OMP_CLAUSE_MAP_KIND (c
);
9869 tkind
= OMP_CLAUSE_MAP_TO
;
9871 case OMP_CLAUSE_FROM
:
9872 tkind
= OMP_CLAUSE_MAP_FROM
;
9877 unsigned int talign
= TYPE_ALIGN_UNIT (TREE_TYPE (ovar
));
9878 if (DECL_P (ovar
) && DECL_ALIGN_UNIT (ovar
) > talign
)
9879 talign
= DECL_ALIGN_UNIT (ovar
);
9880 talign
= ceil_log2 (talign
);
9881 tkind
|= talign
<< 3;
9882 CONSTRUCTOR_APPEND_ELT (vkind
, purpose
,
9883 build_int_cst (unsigned_char_type_node
,
9889 gcc_assert (map_idx
== map_cnt
);
9891 DECL_INITIAL (TREE_VEC_ELT (t
, 1))
9892 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t
, 1)), vsize
);
9893 DECL_INITIAL (TREE_VEC_ELT (t
, 2))
9894 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t
, 2)), vkind
);
9895 if (!TREE_STATIC (TREE_VEC_ELT (t
, 1)))
9897 gimple_seq initlist
= NULL
;
9898 force_gimple_operand (build1 (DECL_EXPR
, void_type_node
,
9899 TREE_VEC_ELT (t
, 1)),
9900 &initlist
, true, NULL_TREE
);
9901 gimple_seq_add_seq (&ilist
, initlist
);
9903 tree clobber
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t
, 1)),
9905 TREE_THIS_VOLATILE (clobber
) = 1;
9906 gimple_seq_add_stmt (&olist
,
9907 gimple_build_assign (TREE_VEC_ELT (t
, 1),
9911 tree clobber
= build_constructor (ctx
->record_type
, NULL
);
9912 TREE_THIS_VOLATILE (clobber
) = 1;
9913 gimple_seq_add_stmt (&olist
, gimple_build_assign (ctx
->sender_decl
,
9917 /* Once all the expansions are done, sequence all the different
9918 fragments inside gimple_omp_body. */
9922 if (ctx
->record_type
&& kind
== GF_OMP_TARGET_KIND_REGION
)
9924 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
9925 /* fixup_child_record_type might have changed receiver_decl's type. */
9926 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
9927 gimple_seq_add_stmt (&new_body
,
9928 gimple_build_assign (ctx
->receiver_decl
, t
));
9931 if (kind
== GF_OMP_TARGET_KIND_REGION
)
9933 gimple_seq_add_seq (&new_body
, tgt_body
);
9934 new_body
= maybe_catch_exception (new_body
);
9936 else if (kind
== GF_OMP_TARGET_KIND_DATA
)
9937 new_body
= tgt_body
;
9938 if (kind
!= GF_OMP_TARGET_KIND_UPDATE
)
9940 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
9941 gimple_omp_set_body (stmt
, new_body
);
9944 bind
= gimple_build_bind (NULL
, NULL
,
9945 tgt_bind
? gimple_bind_block (tgt_bind
)
9947 gsi_replace (gsi_p
, bind
, true);
9948 gimple_bind_add_seq (bind
, ilist
);
9949 gimple_bind_add_stmt (bind
, stmt
);
9950 gimple_bind_add_seq (bind
, olist
);
9952 pop_gimplify_context (NULL
);
9955 /* Expand code for an OpenMP teams directive. */
9958 lower_omp_teams (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
9960 gimple teams_stmt
= gsi_stmt (*gsi_p
);
9961 push_gimplify_context ();
9963 tree block
= make_node (BLOCK
);
9964 gimple bind
= gimple_build_bind (NULL
, NULL
, block
);
9965 gsi_replace (gsi_p
, bind
, true);
9966 gimple_seq bind_body
= NULL
;
9967 gimple_seq dlist
= NULL
;
9968 gimple_seq olist
= NULL
;
9970 tree num_teams
= find_omp_clause (gimple_omp_teams_clauses (teams_stmt
),
9971 OMP_CLAUSE_NUM_TEAMS
);
9972 if (num_teams
== NULL_TREE
)
9973 num_teams
= build_int_cst (unsigned_type_node
, 0);
9976 num_teams
= OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams
);
9977 num_teams
= fold_convert (unsigned_type_node
, num_teams
);
9978 gimplify_expr (&num_teams
, &bind_body
, NULL
, is_gimple_val
, fb_rvalue
);
9980 tree thread_limit
= find_omp_clause (gimple_omp_teams_clauses (teams_stmt
),
9981 OMP_CLAUSE_THREAD_LIMIT
);
9982 if (thread_limit
== NULL_TREE
)
9983 thread_limit
= build_int_cst (unsigned_type_node
, 0);
9986 thread_limit
= OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit
);
9987 thread_limit
= fold_convert (unsigned_type_node
, thread_limit
);
9988 gimplify_expr (&thread_limit
, &bind_body
, NULL
, is_gimple_val
,
9992 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt
),
9993 &bind_body
, &dlist
, ctx
, NULL
);
9994 lower_omp (gimple_omp_body_ptr (teams_stmt
), ctx
);
9995 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt
), &olist
, ctx
);
9996 gimple_seq_add_stmt (&bind_body
, teams_stmt
);
9998 location_t loc
= gimple_location (teams_stmt
);
9999 tree decl
= builtin_decl_explicit (BUILT_IN_GOMP_TEAMS
);
10000 gimple call
= gimple_build_call (decl
, 2, num_teams
, thread_limit
);
10001 gimple_set_location (call
, loc
);
10002 gimple_seq_add_stmt (&bind_body
, call
);
10004 gimple_seq_add_seq (&bind_body
, gimple_omp_body (teams_stmt
));
10005 gimple_omp_set_body (teams_stmt
, NULL
);
10006 gimple_seq_add_seq (&bind_body
, olist
);
10007 gimple_seq_add_seq (&bind_body
, dlist
);
10008 gimple_seq_add_stmt (&bind_body
, gimple_build_omp_return (true));
10009 gimple_bind_set_body (bind
, bind_body
);
10011 pop_gimplify_context (bind
);
10013 gimple_bind_append_vars (bind
, ctx
->block_vars
);
10014 BLOCK_VARS (block
) = ctx
->block_vars
;
10015 if (BLOCK_VARS (block
))
10016 TREE_USED (block
) = 1;
10020 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
10021 regimplified. If DATA is non-NULL, lower_omp_1 is outside
10022 of OpenMP context, but with task_shared_vars set. */
10025 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
10030 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
10031 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
10034 if (task_shared_vars
10036 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
10039 /* If a global variable has been privatized, TREE_CONSTANT on
10040 ADDR_EXPR might be wrong. */
10041 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
10042 recompute_tree_invariant_for_addr_expr (t
);
10044 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
10049 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
10051 gimple stmt
= gsi_stmt (*gsi_p
);
10052 struct walk_stmt_info wi
;
10054 if (gimple_has_location (stmt
))
10055 input_location
= gimple_location (stmt
);
10057 if (task_shared_vars
)
10058 memset (&wi
, '\0', sizeof (wi
));
10060 /* If we have issued syntax errors, avoid doing any heavy lifting.
10061 Just replace the OpenMP directives with a NOP to avoid
10062 confusing RTL expansion. */
10063 if (seen_error () && is_gimple_omp (stmt
))
10065 gsi_replace (gsi_p
, gimple_build_nop (), true);
10069 switch (gimple_code (stmt
))
10072 if ((ctx
|| task_shared_vars
)
10073 && (walk_tree (gimple_cond_lhs_ptr (stmt
), lower_omp_regimplify_p
,
10074 ctx
? NULL
: &wi
, NULL
)
10075 || walk_tree (gimple_cond_rhs_ptr (stmt
), lower_omp_regimplify_p
,
10076 ctx
? NULL
: &wi
, NULL
)))
10077 gimple_regimplify_operands (stmt
, gsi_p
);
10080 lower_omp (gimple_catch_handler_ptr (stmt
), ctx
);
10082 case GIMPLE_EH_FILTER
:
10083 lower_omp (gimple_eh_filter_failure_ptr (stmt
), ctx
);
10086 lower_omp (gimple_try_eval_ptr (stmt
), ctx
);
10087 lower_omp (gimple_try_cleanup_ptr (stmt
), ctx
);
10089 case GIMPLE_TRANSACTION
:
10090 lower_omp (gimple_transaction_body_ptr (stmt
), ctx
);
10093 lower_omp (gimple_bind_body_ptr (stmt
), ctx
);
10095 case GIMPLE_OMP_PARALLEL
:
10096 case GIMPLE_OMP_TASK
:
10097 ctx
= maybe_lookup_ctx (stmt
);
10099 if (ctx
->cancellable
)
10100 ctx
->cancel_label
= create_artificial_label (UNKNOWN_LOCATION
);
10101 lower_omp_taskreg (gsi_p
, ctx
);
10103 case GIMPLE_OMP_FOR
:
10104 ctx
= maybe_lookup_ctx (stmt
);
10106 if (ctx
->cancellable
)
10107 ctx
->cancel_label
= create_artificial_label (UNKNOWN_LOCATION
);
10108 lower_omp_for (gsi_p
, ctx
);
10110 case GIMPLE_OMP_SECTIONS
:
10111 ctx
= maybe_lookup_ctx (stmt
);
10113 if (ctx
->cancellable
)
10114 ctx
->cancel_label
= create_artificial_label (UNKNOWN_LOCATION
);
10115 lower_omp_sections (gsi_p
, ctx
);
10117 case GIMPLE_OMP_SINGLE
:
10118 ctx
= maybe_lookup_ctx (stmt
);
10120 lower_omp_single (gsi_p
, ctx
);
10122 case GIMPLE_OMP_MASTER
:
10123 ctx
= maybe_lookup_ctx (stmt
);
10125 lower_omp_master (gsi_p
, ctx
);
10127 case GIMPLE_OMP_TASKGROUP
:
10128 ctx
= maybe_lookup_ctx (stmt
);
10130 lower_omp_taskgroup (gsi_p
, ctx
);
10132 case GIMPLE_OMP_ORDERED
:
10133 ctx
= maybe_lookup_ctx (stmt
);
10135 lower_omp_ordered (gsi_p
, ctx
);
10137 case GIMPLE_OMP_CRITICAL
:
10138 ctx
= maybe_lookup_ctx (stmt
);
10140 lower_omp_critical (gsi_p
, ctx
);
10142 case GIMPLE_OMP_ATOMIC_LOAD
:
10143 if ((ctx
|| task_shared_vars
)
10144 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt
),
10145 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
10146 gimple_regimplify_operands (stmt
, gsi_p
);
10148 case GIMPLE_OMP_TARGET
:
10149 ctx
= maybe_lookup_ctx (stmt
);
10151 lower_omp_target (gsi_p
, ctx
);
10153 case GIMPLE_OMP_TEAMS
:
10154 ctx
= maybe_lookup_ctx (stmt
);
10156 lower_omp_teams (gsi_p
, ctx
);
10160 fndecl
= gimple_call_fndecl (stmt
);
10162 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
10163 switch (DECL_FUNCTION_CODE (fndecl
))
10165 case BUILT_IN_GOMP_BARRIER
:
10169 case BUILT_IN_GOMP_CANCEL
:
10170 case BUILT_IN_GOMP_CANCELLATION_POINT
:
10173 if (gimple_code (cctx
->stmt
) == GIMPLE_OMP_SECTION
)
10174 cctx
= cctx
->outer
;
10175 gcc_assert (gimple_call_lhs (stmt
) == NULL_TREE
);
10176 if (!cctx
->cancellable
)
10178 if (DECL_FUNCTION_CODE (fndecl
)
10179 == BUILT_IN_GOMP_CANCELLATION_POINT
)
10181 stmt
= gimple_build_nop ();
10182 gsi_replace (gsi_p
, stmt
, false);
10186 if (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
10188 fndecl
= builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL
);
10189 gimple_call_set_fndecl (stmt
, fndecl
);
10190 gimple_call_set_fntype (stmt
, TREE_TYPE (fndecl
));
10193 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl
)), NULL
);
10194 gimple_call_set_lhs (stmt
, lhs
);
10195 tree fallthru_label
;
10196 fallthru_label
= create_artificial_label (UNKNOWN_LOCATION
);
10198 g
= gimple_build_label (fallthru_label
);
10199 gsi_insert_after (gsi_p
, g
, GSI_SAME_STMT
);
10200 g
= gimple_build_cond (NE_EXPR
, lhs
,
10201 fold_convert (TREE_TYPE (lhs
),
10202 boolean_false_node
),
10203 cctx
->cancel_label
, fallthru_label
);
10204 gsi_insert_after (gsi_p
, g
, GSI_SAME_STMT
);
10211 if ((ctx
|| task_shared_vars
)
10212 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
10215 /* Just remove clobbers, this should happen only if we have
10216 "privatized" local addressable variables in SIMD regions,
10217 the clobber isn't needed in that case and gimplifying address
10218 of the ARRAY_REF into a pointer and creating MEM_REF based
10219 clobber would create worse code than we get with the clobber
10221 if (gimple_clobber_p (stmt
))
10223 gsi_replace (gsi_p
, gimple_build_nop (), true);
10226 gimple_regimplify_operands (stmt
, gsi_p
);
10233 lower_omp (gimple_seq
*body
, omp_context
*ctx
)
10235 location_t saved_location
= input_location
;
10236 gimple_stmt_iterator gsi
;
10237 for (gsi
= gsi_start (*body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
10238 lower_omp_1 (&gsi
, ctx
);
10239 /* During gimplification, we have not always invoked fold_stmt
10240 (gimplify.c:maybe_fold_stmt); call it now. */
10241 if (target_nesting_level
)
10242 for (gsi
= gsi_start (*body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
10244 input_location
= saved_location
;
10247 /* Main entry point. */
10249 static unsigned int
10250 execute_lower_omp (void)
10254 /* This pass always runs, to provide PROP_gimple_lomp.
10255 But there is nothing to do unless -fopenmp is given. */
10256 if (flag_openmp
== 0 && flag_openmp_simd
== 0 && flag_cilkplus
== 0)
10259 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
10260 delete_omp_context
);
10262 body
= gimple_body (current_function_decl
);
10263 scan_omp (&body
, NULL
);
10264 gcc_assert (taskreg_nesting_level
== 0);
10266 if (all_contexts
->root
)
10268 if (task_shared_vars
)
10269 push_gimplify_context ();
10270 lower_omp (&body
, NULL
);
10271 if (task_shared_vars
)
10272 pop_gimplify_context (NULL
);
10277 splay_tree_delete (all_contexts
);
10278 all_contexts
= NULL
;
10280 BITMAP_FREE (task_shared_vars
);
10286 const pass_data pass_data_lower_omp
=
10288 GIMPLE_PASS
, /* type */
10289 "omplower", /* name */
10290 OPTGROUP_NONE
, /* optinfo_flags */
10291 TV_NONE
, /* tv_id */
10292 PROP_gimple_any
, /* properties_required */
10293 PROP_gimple_lomp
, /* properties_provided */
10294 0, /* properties_destroyed */
10295 0, /* todo_flags_start */
10296 0, /* todo_flags_finish */
10299 class pass_lower_omp
: public gimple_opt_pass
10302 pass_lower_omp (gcc::context
*ctxt
)
10303 : gimple_opt_pass (pass_data_lower_omp
, ctxt
)
10306 /* opt_pass methods: */
10307 virtual unsigned int execute (function
*) { return execute_lower_omp (); }
10309 }; // class pass_lower_omp
10311 } // anon namespace
10314 make_pass_lower_omp (gcc::context
*ctxt
)
10316 return new pass_lower_omp (ctxt
);
10319 /* The following is a utility to diagnose OpenMP structured block violations.
10320 It is not part of the "omplower" pass, as that's invoked too late. It
10321 should be invoked by the respective front ends after gimplification. */
10323 static splay_tree all_labels
;
10325 /* Check for mismatched contexts and generate an error if needed. Return
10326 true if an error is detected. */
10329 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
10330 gimple branch_ctx
, gimple label_ctx
)
10332 if (label_ctx
== branch_ctx
)
10337 Previously we kept track of the label's entire context in diagnose_sb_[12]
10338 so we could traverse it and issue a correct "exit" or "enter" error
10339 message upon a structured block violation.
10341 We built the context by building a list with tree_cons'ing, but there is
10342 no easy counterpart in gimple tuples. It seems like far too much work
10343 for issuing exit/enter error messages. If someone really misses the
10344 distinct error message... patches welcome.
10348 /* Try to avoid confusing the user by producing and error message
10349 with correct "exit" or "enter" verbiage. We prefer "exit"
10350 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10351 if (branch_ctx
== NULL
)
10357 if (TREE_VALUE (label_ctx
) == branch_ctx
)
10362 label_ctx
= TREE_CHAIN (label_ctx
);
10367 error ("invalid exit from OpenMP structured block");
10369 error ("invalid entry to OpenMP structured block");
10372 bool cilkplus_block
= false;
10376 && gimple_code (branch_ctx
) == GIMPLE_OMP_FOR
10377 && gimple_omp_for_kind (branch_ctx
) == GF_OMP_FOR_KIND_CILKSIMD
)
10379 && gimple_code (label_ctx
) == GIMPLE_OMP_FOR
10380 && gimple_omp_for_kind (label_ctx
) == GF_OMP_FOR_KIND_CILKSIMD
))
10381 cilkplus_block
= true;
10384 /* If it's obvious we have an invalid entry, be specific about the error. */
10385 if (branch_ctx
== NULL
)
10387 if (cilkplus_block
)
10388 error ("invalid entry to Cilk Plus structured block");
10390 error ("invalid entry to OpenMP structured block");
10394 /* Otherwise, be vague and lazy, but efficient. */
10395 if (cilkplus_block
)
10396 error ("invalid branch to/from a Cilk Plus structured block");
10398 error ("invalid branch to/from an OpenMP structured block");
10401 gsi_replace (gsi_p
, gimple_build_nop (), false);
10405 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10406 where each label is found. */
10409 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
10410 struct walk_stmt_info
*wi
)
10412 gimple context
= (gimple
) wi
->info
;
10413 gimple inner_context
;
10414 gimple stmt
= gsi_stmt (*gsi_p
);
10416 *handled_ops_p
= true;
10418 switch (gimple_code (stmt
))
10422 case GIMPLE_OMP_PARALLEL
:
10423 case GIMPLE_OMP_TASK
:
10424 case GIMPLE_OMP_SECTIONS
:
10425 case GIMPLE_OMP_SINGLE
:
10426 case GIMPLE_OMP_SECTION
:
10427 case GIMPLE_OMP_MASTER
:
10428 case GIMPLE_OMP_ORDERED
:
10429 case GIMPLE_OMP_CRITICAL
:
10430 case GIMPLE_OMP_TARGET
:
10431 case GIMPLE_OMP_TEAMS
:
10432 case GIMPLE_OMP_TASKGROUP
:
10433 /* The minimal context here is just the current OMP construct. */
10434 inner_context
= stmt
;
10435 wi
->info
= inner_context
;
10436 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
10437 wi
->info
= context
;
10440 case GIMPLE_OMP_FOR
:
10441 inner_context
= stmt
;
10442 wi
->info
= inner_context
;
10443 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10445 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
10446 diagnose_sb_1
, NULL
, wi
);
10447 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
10448 wi
->info
= context
;
10452 splay_tree_insert (all_labels
, (splay_tree_key
) gimple_label_label (stmt
),
10453 (splay_tree_value
) context
);
10463 /* Pass 2: Check each branch and see if its context differs from that of
10464 the destination label's context. */
10467 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
10468 struct walk_stmt_info
*wi
)
10470 gimple context
= (gimple
) wi
->info
;
10472 gimple stmt
= gsi_stmt (*gsi_p
);
10474 *handled_ops_p
= true;
10476 switch (gimple_code (stmt
))
10480 case GIMPLE_OMP_PARALLEL
:
10481 case GIMPLE_OMP_TASK
:
10482 case GIMPLE_OMP_SECTIONS
:
10483 case GIMPLE_OMP_SINGLE
:
10484 case GIMPLE_OMP_SECTION
:
10485 case GIMPLE_OMP_MASTER
:
10486 case GIMPLE_OMP_ORDERED
:
10487 case GIMPLE_OMP_CRITICAL
:
10488 case GIMPLE_OMP_TARGET
:
10489 case GIMPLE_OMP_TEAMS
:
10490 case GIMPLE_OMP_TASKGROUP
:
10492 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
10493 wi
->info
= context
;
10496 case GIMPLE_OMP_FOR
:
10498 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10500 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt
),
10501 diagnose_sb_2
, NULL
, wi
);
10502 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
10503 wi
->info
= context
;
10508 tree lab
= gimple_cond_true_label (stmt
);
10511 n
= splay_tree_lookup (all_labels
,
10512 (splay_tree_key
) lab
);
10513 diagnose_sb_0 (gsi_p
, context
,
10514 n
? (gimple
) n
->value
: NULL
);
10516 lab
= gimple_cond_false_label (stmt
);
10519 n
= splay_tree_lookup (all_labels
,
10520 (splay_tree_key
) lab
);
10521 diagnose_sb_0 (gsi_p
, context
,
10522 n
? (gimple
) n
->value
: NULL
);
10529 tree lab
= gimple_goto_dest (stmt
);
10530 if (TREE_CODE (lab
) != LABEL_DECL
)
10533 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
10534 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
10538 case GIMPLE_SWITCH
:
10541 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
10543 tree lab
= CASE_LABEL (gimple_switch_label (stmt
, i
));
10544 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
10545 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
10551 case GIMPLE_RETURN
:
10552 diagnose_sb_0 (gsi_p
, context
, NULL
);
10562 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10565 make_gimple_omp_edges (basic_block bb
, struct omp_region
**region
,
10568 gimple last
= last_stmt (bb
);
10569 enum gimple_code code
= gimple_code (last
);
10570 struct omp_region
*cur_region
= *region
;
10571 bool fallthru
= false;
10575 case GIMPLE_OMP_PARALLEL
:
10576 case GIMPLE_OMP_TASK
:
10577 case GIMPLE_OMP_FOR
:
10578 case GIMPLE_OMP_SINGLE
:
10579 case GIMPLE_OMP_TEAMS
:
10580 case GIMPLE_OMP_MASTER
:
10581 case GIMPLE_OMP_TASKGROUP
:
10582 case GIMPLE_OMP_ORDERED
:
10583 case GIMPLE_OMP_CRITICAL
:
10584 case GIMPLE_OMP_SECTION
:
10585 cur_region
= new_omp_region (bb
, code
, cur_region
);
10589 case GIMPLE_OMP_TARGET
:
10590 cur_region
= new_omp_region (bb
, code
, cur_region
);
10592 if (gimple_omp_target_kind (last
) == GF_OMP_TARGET_KIND_UPDATE
)
10593 cur_region
= cur_region
->outer
;
10596 case GIMPLE_OMP_SECTIONS
:
10597 cur_region
= new_omp_region (bb
, code
, cur_region
);
10601 case GIMPLE_OMP_SECTIONS_SWITCH
:
10605 case GIMPLE_OMP_ATOMIC_LOAD
:
10606 case GIMPLE_OMP_ATOMIC_STORE
:
10610 case GIMPLE_OMP_RETURN
:
10611 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
10612 somewhere other than the next block. This will be
10614 cur_region
->exit
= bb
;
10615 fallthru
= cur_region
->type
!= GIMPLE_OMP_SECTION
;
10616 cur_region
= cur_region
->outer
;
10619 case GIMPLE_OMP_CONTINUE
:
10620 cur_region
->cont
= bb
;
10621 switch (cur_region
->type
)
10623 case GIMPLE_OMP_FOR
:
10624 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
10625 succs edges as abnormal to prevent splitting
10627 single_succ_edge (cur_region
->entry
)->flags
|= EDGE_ABNORMAL
;
10628 /* Make the loopback edge. */
10629 make_edge (bb
, single_succ (cur_region
->entry
),
10632 /* Create an edge from GIMPLE_OMP_FOR to exit, which
10633 corresponds to the case that the body of the loop
10634 is not executed at all. */
10635 make_edge (cur_region
->entry
, bb
->next_bb
, EDGE_ABNORMAL
);
10636 make_edge (bb
, bb
->next_bb
, EDGE_FALLTHRU
| EDGE_ABNORMAL
);
10640 case GIMPLE_OMP_SECTIONS
:
10641 /* Wire up the edges into and out of the nested sections. */
10643 basic_block switch_bb
= single_succ (cur_region
->entry
);
10645 struct omp_region
*i
;
10646 for (i
= cur_region
->inner
; i
; i
= i
->next
)
10648 gcc_assert (i
->type
== GIMPLE_OMP_SECTION
);
10649 make_edge (switch_bb
, i
->entry
, 0);
10650 make_edge (i
->exit
, bb
, EDGE_FALLTHRU
);
10653 /* Make the loopback edge to the block with
10654 GIMPLE_OMP_SECTIONS_SWITCH. */
10655 make_edge (bb
, switch_bb
, 0);
10657 /* Make the edge from the switch to exit. */
10658 make_edge (switch_bb
, bb
->next_bb
, 0);
10664 gcc_unreachable ();
10669 gcc_unreachable ();
10672 if (*region
!= cur_region
)
10674 *region
= cur_region
;
10676 *region_idx
= cur_region
->entry
->index
;
10684 static unsigned int
10685 diagnose_omp_structured_block_errors (void)
10687 struct walk_stmt_info wi
;
10688 gimple_seq body
= gimple_body (current_function_decl
);
10690 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
10692 memset (&wi
, 0, sizeof (wi
));
10693 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
10695 memset (&wi
, 0, sizeof (wi
));
10696 wi
.want_locations
= true;
10697 walk_gimple_seq_mod (&body
, diagnose_sb_2
, NULL
, &wi
);
10699 gimple_set_body (current_function_decl
, body
);
10701 splay_tree_delete (all_labels
);
10709 const pass_data pass_data_diagnose_omp_blocks
=
10711 GIMPLE_PASS
, /* type */
10712 "*diagnose_omp_blocks", /* name */
10713 OPTGROUP_NONE
, /* optinfo_flags */
10714 TV_NONE
, /* tv_id */
10715 PROP_gimple_any
, /* properties_required */
10716 0, /* properties_provided */
10717 0, /* properties_destroyed */
10718 0, /* todo_flags_start */
10719 0, /* todo_flags_finish */
10722 class pass_diagnose_omp_blocks
: public gimple_opt_pass
10725 pass_diagnose_omp_blocks (gcc::context
*ctxt
)
10726 : gimple_opt_pass (pass_data_diagnose_omp_blocks
, ctxt
)
10729 /* opt_pass methods: */
10730 virtual bool gate (function
*) { return flag_openmp
|| flag_cilkplus
; }
10731 virtual unsigned int execute (function
*)
10733 return diagnose_omp_structured_block_errors ();
10736 }; // class pass_diagnose_omp_blocks
10738 } // anon namespace
10741 make_pass_diagnose_omp_blocks (gcc::context
*ctxt
)
10743 return new pass_diagnose_omp_blocks (ctxt
);
10746 /* SIMD clone supporting code. */
10748 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
10749 of arguments to reserve space for. */
10751 static struct cgraph_simd_clone
*
10752 simd_clone_struct_alloc (int nargs
)
10754 struct cgraph_simd_clone
*clone_info
;
10755 size_t len
= (sizeof (struct cgraph_simd_clone
)
10756 + nargs
* sizeof (struct cgraph_simd_clone_arg
));
10757 clone_info
= (struct cgraph_simd_clone
*)
10758 ggc_internal_cleared_alloc (len
);
10762 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
10765 simd_clone_struct_copy (struct cgraph_simd_clone
*to
,
10766 struct cgraph_simd_clone
*from
)
10768 memcpy (to
, from
, (sizeof (struct cgraph_simd_clone
)
10769 + ((from
->nargs
- from
->inbranch
)
10770 * sizeof (struct cgraph_simd_clone_arg
))));
10773 /* Return vector of parameter types of function FNDECL. This uses
10774 TYPE_ARG_TYPES if available, otherwise falls back to types of
10775 DECL_ARGUMENTS types. */
10778 simd_clone_vector_of_formal_parm_types (tree fndecl
)
10780 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)))
10781 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl
));
10782 vec
<tree
> args
= ipa_get_vector_of_formal_parms (fndecl
);
10785 FOR_EACH_VEC_ELT (args
, i
, arg
)
10786 args
[i
] = TREE_TYPE (args
[i
]);
10790 /* Given a simd function in NODE, extract the simd specific
10791 information from the OMP clauses passed in CLAUSES, and return
10792 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
10793 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
10794 otherwise set to FALSE. */
10796 static struct cgraph_simd_clone
*
10797 simd_clone_clauses_extract (struct cgraph_node
*node
, tree clauses
,
10798 bool *inbranch_specified
)
10800 vec
<tree
> args
= simd_clone_vector_of_formal_parm_types (node
->decl
);
10803 *inbranch_specified
= false;
10805 n
= args
.length ();
10806 if (n
> 0 && args
.last () == void_type_node
)
10809 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
10810 be cloned have a distinctive artificial label in addition to "omp
10814 && lookup_attribute ("cilk simd function",
10815 DECL_ATTRIBUTES (node
->decl
)));
10817 /* Allocate one more than needed just in case this is an in-branch
10818 clone which will require a mask argument. */
10819 struct cgraph_simd_clone
*clone_info
= simd_clone_struct_alloc (n
+ 1);
10820 clone_info
->nargs
= n
;
10821 clone_info
->cilk_elemental
= cilk_clone
;
10828 clauses
= TREE_VALUE (clauses
);
10829 if (!clauses
|| TREE_CODE (clauses
) != OMP_CLAUSE
)
10832 for (t
= clauses
; t
; t
= OMP_CLAUSE_CHAIN (t
))
10834 switch (OMP_CLAUSE_CODE (t
))
10836 case OMP_CLAUSE_INBRANCH
:
10837 clone_info
->inbranch
= 1;
10838 *inbranch_specified
= true;
10840 case OMP_CLAUSE_NOTINBRANCH
:
10841 clone_info
->inbranch
= 0;
10842 *inbranch_specified
= true;
10844 case OMP_CLAUSE_SIMDLEN
:
10845 clone_info
->simdlen
10846 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t
));
10848 case OMP_CLAUSE_LINEAR
:
10850 tree decl
= OMP_CLAUSE_DECL (t
);
10851 tree step
= OMP_CLAUSE_LINEAR_STEP (t
);
10852 int argno
= TREE_INT_CST_LOW (decl
);
10853 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t
))
10855 clone_info
->args
[argno
].arg_type
10856 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
;
10857 clone_info
->args
[argno
].linear_step
= tree_to_shwi (step
);
10858 gcc_assert (clone_info
->args
[argno
].linear_step
>= 0
10859 && clone_info
->args
[argno
].linear_step
< n
);
10863 if (POINTER_TYPE_P (args
[argno
]))
10864 step
= fold_convert (ssizetype
, step
);
10865 if (!tree_fits_shwi_p (step
))
10867 warning_at (OMP_CLAUSE_LOCATION (t
), 0,
10868 "ignoring large linear step");
10872 else if (integer_zerop (step
))
10874 warning_at (OMP_CLAUSE_LOCATION (t
), 0,
10875 "ignoring zero linear step");
10881 clone_info
->args
[argno
].arg_type
10882 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
;
10883 clone_info
->args
[argno
].linear_step
= tree_to_shwi (step
);
10888 case OMP_CLAUSE_UNIFORM
:
10890 tree decl
= OMP_CLAUSE_DECL (t
);
10891 int argno
= tree_to_uhwi (decl
);
10892 clone_info
->args
[argno
].arg_type
10893 = SIMD_CLONE_ARG_TYPE_UNIFORM
;
10896 case OMP_CLAUSE_ALIGNED
:
10898 tree decl
= OMP_CLAUSE_DECL (t
);
10899 int argno
= tree_to_uhwi (decl
);
10900 clone_info
->args
[argno
].alignment
10901 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t
));
10912 /* Given a SIMD clone in NODE, calculate the characteristic data
10913 type and return the coresponding type. The characteristic data
10914 type is computed as described in the Intel Vector ABI. */
10917 simd_clone_compute_base_data_type (struct cgraph_node
*node
,
10918 struct cgraph_simd_clone
*clone_info
)
10920 tree type
= integer_type_node
;
10921 tree fndecl
= node
->decl
;
10923 /* a) For non-void function, the characteristic data type is the
10925 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl
))) != VOID_TYPE
)
10926 type
= TREE_TYPE (TREE_TYPE (fndecl
));
10928 /* b) If the function has any non-uniform, non-linear parameters,
10929 then the characteristic data type is the type of the first
10933 vec
<tree
> map
= simd_clone_vector_of_formal_parm_types (fndecl
);
10934 for (unsigned int i
= 0; i
< clone_info
->nargs
; ++i
)
10935 if (clone_info
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
10943 /* c) If the characteristic data type determined by a) or b) above
10944 is struct, union, or class type which is pass-by-value (except
10945 for the type that maps to the built-in complex data type), the
10946 characteristic data type is int. */
10947 if (RECORD_OR_UNION_TYPE_P (type
)
10948 && !aggregate_value_p (type
, NULL
)
10949 && TREE_CODE (type
) != COMPLEX_TYPE
)
10950 return integer_type_node
;
10952 /* d) If none of the above three classes is applicable, the
10953 characteristic data type is int. */
10957 /* e) For Intel Xeon Phi native and offload compilation, if the
10958 resulting characteristic data type is 8-bit or 16-bit integer
10959 data type, the characteristic data type is int. */
10960 /* Well, we don't handle Xeon Phi yet. */
10964 simd_clone_mangle (struct cgraph_node
*node
,
10965 struct cgraph_simd_clone
*clone_info
)
10967 char vecsize_mangle
= clone_info
->vecsize_mangle
;
10968 char mask
= clone_info
->inbranch
? 'M' : 'N';
10969 unsigned int simdlen
= clone_info
->simdlen
;
10973 gcc_assert (vecsize_mangle
&& simdlen
);
10975 pp_string (&pp
, "_ZGV");
10976 pp_character (&pp
, vecsize_mangle
);
10977 pp_character (&pp
, mask
);
10978 pp_decimal_int (&pp
, simdlen
);
10980 for (n
= 0; n
< clone_info
->nargs
; ++n
)
10982 struct cgraph_simd_clone_arg arg
= clone_info
->args
[n
];
10984 if (arg
.arg_type
== SIMD_CLONE_ARG_TYPE_UNIFORM
)
10985 pp_character (&pp
, 'u');
10986 else if (arg
.arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
10988 gcc_assert (arg
.linear_step
!= 0);
10989 pp_character (&pp
, 'l');
10990 if (arg
.linear_step
> 1)
10991 pp_unsigned_wide_integer (&pp
, arg
.linear_step
);
10992 else if (arg
.linear_step
< 0)
10994 pp_character (&pp
, 'n');
10995 pp_unsigned_wide_integer (&pp
, (-(unsigned HOST_WIDE_INT
)
10999 else if (arg
.arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
)
11001 pp_character (&pp
, 's');
11002 pp_unsigned_wide_integer (&pp
, arg
.linear_step
);
11005 pp_character (&pp
, 'v');
11008 pp_character (&pp
, 'a');
11009 pp_decimal_int (&pp
, arg
.alignment
);
11013 pp_underscore (&pp
);
11015 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node
->decl
)));
11016 const char *str
= pp_formatted_text (&pp
);
11018 /* If there already is a SIMD clone with the same mangled name, don't
11019 add another one. This can happen e.g. for
11020 #pragma omp declare simd
11021 #pragma omp declare simd simdlen(8)
11022 int foo (int, int);
11023 if the simdlen is assumed to be 8 for the first one, etc. */
11024 for (struct cgraph_node
*clone
= node
->simd_clones
; clone
;
11025 clone
= clone
->simdclone
->next_clone
)
11026 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone
->decl
)),
11030 return get_identifier (str
);
11033 /* Create a simd clone of OLD_NODE and return it. */
11035 static struct cgraph_node
*
11036 simd_clone_create (struct cgraph_node
*old_node
)
11038 struct cgraph_node
*new_node
;
11039 if (old_node
->definition
)
11041 if (!cgraph_function_with_gimple_body_p (old_node
))
11043 cgraph_get_body (old_node
);
11044 new_node
= cgraph_function_versioning (old_node
, vNULL
, NULL
, NULL
,
11045 false, NULL
, NULL
, "simdclone");
11049 tree old_decl
= old_node
->decl
;
11050 tree new_decl
= copy_node (old_node
->decl
);
11051 DECL_NAME (new_decl
) = clone_function_name (old_decl
, "simdclone");
11052 SET_DECL_ASSEMBLER_NAME (new_decl
, DECL_NAME (new_decl
));
11053 SET_DECL_RTL (new_decl
, NULL
);
11054 DECL_STATIC_CONSTRUCTOR (new_decl
) = 0;
11055 DECL_STATIC_DESTRUCTOR (new_decl
) = 0;
11057 = cgraph_copy_node_for_versioning (old_node
, new_decl
, vNULL
, NULL
);
11058 cgraph_call_function_insertion_hooks (new_node
);
11060 if (new_node
== NULL
)
11063 TREE_PUBLIC (new_node
->decl
) = TREE_PUBLIC (old_node
->decl
);
11065 /* The function cgraph_function_versioning () will force the new
11066 symbol local. Undo this, and inherit external visability from
11068 new_node
->local
.local
= old_node
->local
.local
;
11069 new_node
->externally_visible
= old_node
->externally_visible
;
11074 /* Adjust the return type of the given function to its appropriate
11075 vector counterpart. Returns a simd array to be used throughout the
11076 function as a return value. */
11079 simd_clone_adjust_return_type (struct cgraph_node
*node
)
11081 tree fndecl
= node
->decl
;
11082 tree orig_rettype
= TREE_TYPE (TREE_TYPE (fndecl
));
11083 unsigned int veclen
;
11086 /* Adjust the function return type. */
11087 if (orig_rettype
== void_type_node
)
11089 TREE_TYPE (fndecl
) = build_distinct_type_copy (TREE_TYPE (fndecl
));
11090 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl
)))
11091 || POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl
))))
11092 veclen
= node
->simdclone
->vecsize_int
;
11094 veclen
= node
->simdclone
->vecsize_float
;
11095 veclen
/= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl
))));
11096 if (veclen
> node
->simdclone
->simdlen
)
11097 veclen
= node
->simdclone
->simdlen
;
11098 if (veclen
== node
->simdclone
->simdlen
)
11099 TREE_TYPE (TREE_TYPE (fndecl
))
11100 = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl
)),
11101 node
->simdclone
->simdlen
);
11104 t
= build_vector_type (TREE_TYPE (TREE_TYPE (fndecl
)), veclen
);
11105 t
= build_array_type_nelts (t
, node
->simdclone
->simdlen
/ veclen
);
11106 TREE_TYPE (TREE_TYPE (fndecl
)) = t
;
11108 if (!node
->definition
)
11111 t
= DECL_RESULT (fndecl
);
11112 /* Adjust the DECL_RESULT. */
11113 gcc_assert (TREE_TYPE (t
) != void_type_node
);
11114 TREE_TYPE (t
) = TREE_TYPE (TREE_TYPE (fndecl
));
11117 tree atype
= build_array_type_nelts (orig_rettype
,
11118 node
->simdclone
->simdlen
);
11119 if (veclen
!= node
->simdclone
->simdlen
)
11120 return build1 (VIEW_CONVERT_EXPR
, atype
, t
);
11122 /* Set up a SIMD array to use as the return value. */
11123 tree retval
= create_tmp_var_raw (atype
, "retval");
11124 gimple_add_tmp_var (retval
);
11128 /* Each vector argument has a corresponding array to be used locally
11129 as part of the eventual loop. Create such temporary array and
11132 PREFIX is the prefix to be used for the temporary.
11134 TYPE is the inner element type.
11136 SIMDLEN is the number of elements. */
11139 create_tmp_simd_array (const char *prefix
, tree type
, int simdlen
)
11141 tree atype
= build_array_type_nelts (type
, simdlen
);
11142 tree avar
= create_tmp_var_raw (atype
, prefix
);
11143 gimple_add_tmp_var (avar
);
11147 /* Modify the function argument types to their corresponding vector
11148 counterparts if appropriate. Also, create one array for each simd
11149 argument to be used locally when using the function arguments as
11152 NODE is the function whose arguments are to be adjusted.
11154 Returns an adjustment vector that will be filled describing how the
11155 argument types will be adjusted. */
11157 static ipa_parm_adjustment_vec
11158 simd_clone_adjust_argument_types (struct cgraph_node
*node
)
11161 ipa_parm_adjustment_vec adjustments
;
11163 if (node
->definition
)
11164 args
= ipa_get_vector_of_formal_parms (node
->decl
);
11166 args
= simd_clone_vector_of_formal_parm_types (node
->decl
);
11167 adjustments
.create (args
.length ());
11168 unsigned i
, j
, veclen
;
11169 struct ipa_parm_adjustment adj
;
11170 for (i
= 0; i
< node
->simdclone
->nargs
; ++i
)
11172 memset (&adj
, 0, sizeof (adj
));
11173 tree parm
= args
[i
];
11174 tree parm_type
= node
->definition
? TREE_TYPE (parm
) : parm
;
11175 adj
.base_index
= i
;
11178 node
->simdclone
->args
[i
].orig_arg
= node
->definition
? parm
: NULL_TREE
;
11179 node
->simdclone
->args
[i
].orig_type
= parm_type
;
11181 if (node
->simdclone
->args
[i
].arg_type
!= SIMD_CLONE_ARG_TYPE_VECTOR
)
11183 /* No adjustment necessary for scalar arguments. */
11184 adj
.op
= IPA_PARM_OP_COPY
;
11188 if (INTEGRAL_TYPE_P (parm_type
) || POINTER_TYPE_P (parm_type
))
11189 veclen
= node
->simdclone
->vecsize_int
;
11191 veclen
= node
->simdclone
->vecsize_float
;
11192 veclen
/= GET_MODE_BITSIZE (TYPE_MODE (parm_type
));
11193 if (veclen
> node
->simdclone
->simdlen
)
11194 veclen
= node
->simdclone
->simdlen
;
11195 adj
.arg_prefix
= "simd";
11196 adj
.type
= build_vector_type (parm_type
, veclen
);
11197 node
->simdclone
->args
[i
].vector_type
= adj
.type
;
11198 for (j
= veclen
; j
< node
->simdclone
->simdlen
; j
+= veclen
)
11200 adjustments
.safe_push (adj
);
11203 memset (&adj
, 0, sizeof (adj
));
11204 adj
.op
= IPA_PARM_OP_NEW
;
11205 adj
.arg_prefix
= "simd";
11206 adj
.base_index
= i
;
11207 adj
.type
= node
->simdclone
->args
[i
].vector_type
;
11211 if (node
->definition
)
11212 node
->simdclone
->args
[i
].simd_array
11213 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm
)),
11214 parm_type
, node
->simdclone
->simdlen
);
11216 adjustments
.safe_push (adj
);
11219 if (node
->simdclone
->inbranch
)
11222 = simd_clone_compute_base_data_type (node
->simdclone
->origin
,
11225 memset (&adj
, 0, sizeof (adj
));
11226 adj
.op
= IPA_PARM_OP_NEW
;
11227 adj
.arg_prefix
= "mask";
11229 adj
.base_index
= i
;
11230 if (INTEGRAL_TYPE_P (base_type
) || POINTER_TYPE_P (base_type
))
11231 veclen
= node
->simdclone
->vecsize_int
;
11233 veclen
= node
->simdclone
->vecsize_float
;
11234 veclen
/= GET_MODE_BITSIZE (TYPE_MODE (base_type
));
11235 if (veclen
> node
->simdclone
->simdlen
)
11236 veclen
= node
->simdclone
->simdlen
;
11237 adj
.type
= build_vector_type (base_type
, veclen
);
11238 adjustments
.safe_push (adj
);
11240 for (j
= veclen
; j
< node
->simdclone
->simdlen
; j
+= veclen
)
11241 adjustments
.safe_push (adj
);
11243 /* We have previously allocated one extra entry for the mask. Use
11245 struct cgraph_simd_clone
*sc
= node
->simdclone
;
11247 if (node
->definition
)
11249 sc
->args
[i
].orig_arg
11250 = build_decl (UNKNOWN_LOCATION
, PARM_DECL
, NULL
, base_type
);
11251 sc
->args
[i
].simd_array
11252 = create_tmp_simd_array ("mask", base_type
, sc
->simdlen
);
11254 sc
->args
[i
].orig_type
= base_type
;
11255 sc
->args
[i
].arg_type
= SIMD_CLONE_ARG_TYPE_MASK
;
11258 if (node
->definition
)
11259 ipa_modify_formal_parameters (node
->decl
, adjustments
);
11262 tree new_arg_types
= NULL_TREE
, new_reversed
;
11263 bool last_parm_void
= false;
11264 if (args
.length () > 0 && args
.last () == void_type_node
)
11265 last_parm_void
= true;
11267 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node
->decl
)));
11268 j
= adjustments
.length ();
11269 for (i
= 0; i
< j
; i
++)
11271 struct ipa_parm_adjustment
*adj
= &adjustments
[i
];
11273 if (adj
->op
== IPA_PARM_OP_COPY
)
11274 ptype
= args
[adj
->base_index
];
11277 new_arg_types
= tree_cons (NULL_TREE
, ptype
, new_arg_types
);
11279 new_reversed
= nreverse (new_arg_types
);
11280 if (last_parm_void
)
11283 TREE_CHAIN (new_arg_types
) = void_list_node
;
11285 new_reversed
= void_list_node
;
11288 tree new_type
= build_distinct_type_copy (TREE_TYPE (node
->decl
));
11289 TYPE_ARG_TYPES (new_type
) = new_reversed
;
11290 TREE_TYPE (node
->decl
) = new_type
;
11292 adjustments
.release ();
11295 return adjustments
;
11298 /* Initialize and copy the function arguments in NODE to their
11299 corresponding local simd arrays. Returns a fresh gimple_seq with
11300 the instruction sequence generated. */
11303 simd_clone_init_simd_arrays (struct cgraph_node
*node
,
11304 ipa_parm_adjustment_vec adjustments
)
11306 gimple_seq seq
= NULL
;
11307 unsigned i
= 0, j
= 0, k
;
11309 for (tree arg
= DECL_ARGUMENTS (node
->decl
);
11311 arg
= DECL_CHAIN (arg
), i
++, j
++)
11313 if (adjustments
[j
].op
== IPA_PARM_OP_COPY
)
11316 node
->simdclone
->args
[i
].vector_arg
= arg
;
11318 tree array
= node
->simdclone
->args
[i
].simd_array
;
11319 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg
)) == node
->simdclone
->simdlen
)
11321 tree ptype
= build_pointer_type (TREE_TYPE (TREE_TYPE (array
)));
11322 tree ptr
= build_fold_addr_expr (array
);
11323 tree t
= build2 (MEM_REF
, TREE_TYPE (arg
), ptr
,
11324 build_int_cst (ptype
, 0));
11325 t
= build2 (MODIFY_EXPR
, TREE_TYPE (t
), t
, arg
);
11326 gimplify_and_add (t
, &seq
);
11330 unsigned int simdlen
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg
));
11331 tree ptype
= build_pointer_type (TREE_TYPE (TREE_TYPE (array
)));
11332 for (k
= 0; k
< node
->simdclone
->simdlen
; k
+= simdlen
)
11334 tree ptr
= build_fold_addr_expr (array
);
11338 arg
= DECL_CHAIN (arg
);
11342 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg
))));
11343 tree t
= build2 (MEM_REF
, TREE_TYPE (arg
), ptr
,
11344 build_int_cst (ptype
, k
* elemsize
));
11345 t
= build2 (MODIFY_EXPR
, TREE_TYPE (t
), t
, arg
);
11346 gimplify_and_add (t
, &seq
);
11353 /* Callback info for ipa_simd_modify_stmt_ops below. */
11355 struct modify_stmt_info
{
11356 ipa_parm_adjustment_vec adjustments
;
11358 /* True if the parent statement was modified by
11359 ipa_simd_modify_stmt_ops. */
11363 /* Callback for walk_gimple_op.
11365 Adjust operands from a given statement as specified in the
11366 adjustments vector in the callback data. */
11369 ipa_simd_modify_stmt_ops (tree
*tp
, int *walk_subtrees
, void *data
)
11371 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
11372 struct modify_stmt_info
*info
= (struct modify_stmt_info
*) wi
->info
;
11373 tree
*orig_tp
= tp
;
11374 if (TREE_CODE (*tp
) == ADDR_EXPR
)
11375 tp
= &TREE_OPERAND (*tp
, 0);
11376 struct ipa_parm_adjustment
*cand
= NULL
;
11377 if (TREE_CODE (*tp
) == PARM_DECL
)
11378 cand
= ipa_get_adjustment_candidate (&tp
, NULL
, info
->adjustments
, true);
11382 *walk_subtrees
= 0;
11385 tree repl
= NULL_TREE
;
11387 repl
= unshare_expr (cand
->new_decl
);
11392 *walk_subtrees
= 0;
11393 bool modified
= info
->modified
;
11394 info
->modified
= false;
11395 walk_tree (tp
, ipa_simd_modify_stmt_ops
, wi
, wi
->pset
);
11396 if (!info
->modified
)
11398 info
->modified
= modified
;
11401 info
->modified
= modified
;
11410 repl
= build_fold_addr_expr (repl
);
11412 = gimple_build_assign (make_ssa_name (TREE_TYPE (repl
), NULL
), repl
);
11413 repl
= gimple_assign_lhs (stmt
);
11414 gimple_stmt_iterator gsi
= gsi_for_stmt (info
->stmt
);
11415 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
11418 else if (!useless_type_conversion_p (TREE_TYPE (*tp
), TREE_TYPE (repl
)))
11420 tree vce
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (*tp
), repl
);
11426 info
->modified
= true;
11430 /* Traverse the function body and perform all modifications as
11431 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11432 modified such that the replacement/reduction value will now be an
11433 offset into the corresponding simd_array.
11435 This function will replace all function argument uses with their
11436 corresponding simd array elements, and ajust the return values
11440 ipa_simd_modify_function_body (struct cgraph_node
*node
,
11441 ipa_parm_adjustment_vec adjustments
,
11442 tree retval_array
, tree iter
)
11445 unsigned int i
, j
, l
;
11447 /* Re-use the adjustments array, but this time use it to replace
11448 every function argument use to an offset into the corresponding
11450 for (i
= 0, j
= 0; i
< node
->simdclone
->nargs
; ++i
, ++j
)
11452 if (!node
->simdclone
->args
[i
].vector_arg
)
11455 tree basetype
= TREE_TYPE (node
->simdclone
->args
[i
].orig_arg
);
11456 tree vectype
= TREE_TYPE (node
->simdclone
->args
[i
].vector_arg
);
11457 adjustments
[j
].new_decl
11458 = build4 (ARRAY_REF
,
11460 node
->simdclone
->args
[i
].simd_array
,
11462 NULL_TREE
, NULL_TREE
);
11463 if (adjustments
[j
].op
== IPA_PARM_OP_NONE
11464 && TYPE_VECTOR_SUBPARTS (vectype
) < node
->simdclone
->simdlen
)
11465 j
+= node
->simdclone
->simdlen
/ TYPE_VECTOR_SUBPARTS (vectype
) - 1;
11468 l
= adjustments
.length ();
11469 for (i
= 1; i
< num_ssa_names
; i
++)
11471 tree name
= ssa_name (i
);
11473 && SSA_NAME_VAR (name
)
11474 && TREE_CODE (SSA_NAME_VAR (name
)) == PARM_DECL
)
11476 for (j
= 0; j
< l
; j
++)
11477 if (SSA_NAME_VAR (name
) == adjustments
[j
].base
11478 && adjustments
[j
].new_decl
)
11481 if (adjustments
[j
].new_ssa_base
== NULL_TREE
)
11484 = copy_var_decl (adjustments
[j
].base
,
11485 DECL_NAME (adjustments
[j
].base
),
11486 TREE_TYPE (adjustments
[j
].base
));
11487 adjustments
[j
].new_ssa_base
= base_var
;
11490 base_var
= adjustments
[j
].new_ssa_base
;
11491 if (SSA_NAME_IS_DEFAULT_DEF (name
))
11493 bb
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
11494 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
11495 tree new_decl
= unshare_expr (adjustments
[j
].new_decl
);
11496 set_ssa_default_def (cfun
, adjustments
[j
].base
, NULL_TREE
);
11497 SET_SSA_NAME_VAR_OR_IDENTIFIER (name
, base_var
);
11498 SSA_NAME_IS_DEFAULT_DEF (name
) = 0;
11499 gimple stmt
= gimple_build_assign (name
, new_decl
);
11500 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
11503 SET_SSA_NAME_VAR_OR_IDENTIFIER (name
, base_var
);
11508 struct modify_stmt_info info
;
11509 info
.adjustments
= adjustments
;
11511 FOR_EACH_BB_FN (bb
, DECL_STRUCT_FUNCTION (node
->decl
))
11513 gimple_stmt_iterator gsi
;
11515 gsi
= gsi_start_bb (bb
);
11516 while (!gsi_end_p (gsi
))
11518 gimple stmt
= gsi_stmt (gsi
);
11520 struct walk_stmt_info wi
;
11522 memset (&wi
, 0, sizeof (wi
));
11523 info
.modified
= false;
11525 walk_gimple_op (stmt
, ipa_simd_modify_stmt_ops
, &wi
);
11527 if (gimple_code (stmt
) == GIMPLE_RETURN
)
11529 tree retval
= gimple_return_retval (stmt
);
11532 gsi_remove (&gsi
, true);
11536 /* Replace `return foo' with `retval_array[iter] = foo'. */
11537 tree ref
= build4 (ARRAY_REF
, TREE_TYPE (retval
),
11538 retval_array
, iter
, NULL
, NULL
);
11539 stmt
= gimple_build_assign (ref
, retval
);
11540 gsi_replace (&gsi
, stmt
, true);
11541 info
.modified
= true;
11546 update_stmt (stmt
);
11547 if (maybe_clean_eh_stmt (stmt
))
11548 gimple_purge_dead_eh_edges (gimple_bb (stmt
));
11555 /* Adjust the argument types in NODE to their appropriate vector
11559 simd_clone_adjust (struct cgraph_node
*node
)
11561 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
11563 targetm
.simd_clone
.adjust (node
);
11565 tree retval
= simd_clone_adjust_return_type (node
);
11566 ipa_parm_adjustment_vec adjustments
11567 = simd_clone_adjust_argument_types (node
);
11569 push_gimplify_context ();
11571 gimple_seq seq
= simd_clone_init_simd_arrays (node
, adjustments
);
11573 /* Adjust all uses of vector arguments accordingly. Adjust all
11574 return values accordingly. */
11575 tree iter
= create_tmp_var (unsigned_type_node
, "iter");
11576 tree iter1
= make_ssa_name (iter
, NULL
);
11577 tree iter2
= make_ssa_name (iter
, NULL
);
11578 ipa_simd_modify_function_body (node
, adjustments
, retval
, iter1
);
11580 /* Initialize the iteration variable. */
11581 basic_block entry_bb
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
11582 basic_block body_bb
= split_block_after_labels (entry_bb
)->dest
;
11583 gimple_stmt_iterator gsi
= gsi_after_labels (entry_bb
);
11584 /* Insert the SIMD array and iv initialization at function
11586 gsi_insert_seq_before (&gsi
, seq
, GSI_NEW_STMT
);
11588 pop_gimplify_context (NULL
);
11590 /* Create a new BB right before the original exit BB, to hold the
11591 iteration increment and the condition/branch. */
11592 basic_block orig_exit
= EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun
), 0)->src
;
11593 basic_block incr_bb
= create_empty_bb (orig_exit
);
11594 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
11595 flag. Set it now to be a FALLTHRU_EDGE. */
11596 gcc_assert (EDGE_COUNT (orig_exit
->succs
) == 1);
11597 EDGE_SUCC (orig_exit
, 0)->flags
|= EDGE_FALLTHRU
;
11598 for (unsigned i
= 0;
11599 i
< EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
); ++i
)
11601 edge e
= EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun
), i
);
11602 redirect_edge_succ (e
, incr_bb
);
11604 edge e
= make_edge (incr_bb
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
11605 e
->probability
= REG_BR_PROB_BASE
;
11606 gsi
= gsi_last_bb (incr_bb
);
11607 gimple g
= gimple_build_assign_with_ops (PLUS_EXPR
, iter2
, iter1
,
11608 build_int_cst (unsigned_type_node
,
11610 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
11612 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
11613 struct loop
*loop
= alloc_loop ();
11614 cfun
->has_force_vectorize_loops
= true;
11615 loop
->safelen
= node
->simdclone
->simdlen
;
11616 loop
->force_vectorize
= true;
11617 loop
->header
= body_bb
;
11618 add_bb_to_loop (incr_bb
, loop
);
11620 /* Branch around the body if the mask applies. */
11621 if (node
->simdclone
->inbranch
)
11623 gimple_stmt_iterator gsi
= gsi_last_bb (loop
->header
);
11625 = node
->simdclone
->args
[node
->simdclone
->nargs
- 1].simd_array
;
11626 tree mask
= make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array
)), NULL
);
11627 tree aref
= build4 (ARRAY_REF
,
11628 TREE_TYPE (TREE_TYPE (mask_array
)),
11631 g
= gimple_build_assign (mask
, aref
);
11632 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
11633 int bitsize
= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref
)));
11634 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref
)))
11636 aref
= build1 (VIEW_CONVERT_EXPR
,
11637 build_nonstandard_integer_type (bitsize
, 0), mask
);
11638 mask
= make_ssa_name (TREE_TYPE (aref
), NULL
);
11639 g
= gimple_build_assign (mask
, aref
);
11640 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
11643 g
= gimple_build_cond (EQ_EXPR
, mask
, build_zero_cst (TREE_TYPE (mask
)),
11645 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
11646 make_edge (loop
->header
, incr_bb
, EDGE_TRUE_VALUE
);
11647 FALLTHRU_EDGE (loop
->header
)->flags
= EDGE_FALSE_VALUE
;
11650 /* Generate the condition. */
11651 g
= gimple_build_cond (LT_EXPR
,
11653 build_int_cst (unsigned_type_node
,
11654 node
->simdclone
->simdlen
),
11656 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
11657 e
= split_block (incr_bb
, gsi_stmt (gsi
));
11658 basic_block latch_bb
= e
->dest
;
11659 basic_block new_exit_bb
= e
->dest
;
11660 new_exit_bb
= split_block (latch_bb
, NULL
)->dest
;
11661 loop
->latch
= latch_bb
;
11663 redirect_edge_succ (FALLTHRU_EDGE (latch_bb
), body_bb
);
11665 make_edge (incr_bb
, new_exit_bb
, EDGE_FALSE_VALUE
);
11666 /* The successor of incr_bb is already pointing to latch_bb; just
11668 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
11669 FALLTHRU_EDGE (incr_bb
)->flags
= EDGE_TRUE_VALUE
;
11671 gimple phi
= create_phi_node (iter1
, body_bb
);
11672 edge preheader_edge
= find_edge (entry_bb
, body_bb
);
11673 edge latch_edge
= single_succ_edge (latch_bb
);
11674 add_phi_arg (phi
, build_zero_cst (unsigned_type_node
), preheader_edge
,
11676 add_phi_arg (phi
, iter2
, latch_edge
, UNKNOWN_LOCATION
);
11678 /* Generate the new return. */
11679 gsi
= gsi_last_bb (new_exit_bb
);
11681 && TREE_CODE (retval
) == VIEW_CONVERT_EXPR
11682 && TREE_CODE (TREE_OPERAND (retval
, 0)) == RESULT_DECL
)
11683 retval
= TREE_OPERAND (retval
, 0);
11686 retval
= build1 (VIEW_CONVERT_EXPR
,
11687 TREE_TYPE (TREE_TYPE (node
->decl
)),
11689 retval
= force_gimple_operand_gsi (&gsi
, retval
, true, NULL
,
11690 false, GSI_CONTINUE_LINKING
);
11692 g
= gimple_build_return (retval
);
11693 gsi_insert_after (&gsi
, g
, GSI_CONTINUE_LINKING
);
11695 /* Handle aligned clauses by replacing default defs of the aligned
11696 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
11697 lhs. Handle linear by adding PHIs. */
11698 for (unsigned i
= 0; i
< node
->simdclone
->nargs
; i
++)
11699 if (node
->simdclone
->args
[i
].alignment
11700 && node
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_UNIFORM
11701 && (node
->simdclone
->args
[i
].alignment
11702 & (node
->simdclone
->args
[i
].alignment
- 1)) == 0
11703 && TREE_CODE (TREE_TYPE (node
->simdclone
->args
[i
].orig_arg
))
11706 unsigned int alignment
= node
->simdclone
->args
[i
].alignment
;
11707 tree orig_arg
= node
->simdclone
->args
[i
].orig_arg
;
11708 tree def
= ssa_default_def (cfun
, orig_arg
);
11709 if (def
&& !has_zero_uses (def
))
11711 tree fn
= builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED
);
11712 gimple_seq seq
= NULL
;
11713 bool need_cvt
= false;
11715 = gimple_build_call (fn
, 2, def
, size_int (alignment
));
11717 if (!useless_type_conversion_p (TREE_TYPE (orig_arg
),
11720 tree t
= make_ssa_name (need_cvt
? ptr_type_node
: orig_arg
, NULL
);
11721 gimple_call_set_lhs (g
, t
);
11722 gimple_seq_add_stmt_without_update (&seq
, g
);
11725 t
= make_ssa_name (orig_arg
, NULL
);
11726 g
= gimple_build_assign_with_ops (NOP_EXPR
, t
,
11727 gimple_call_lhs (g
),
11729 gimple_seq_add_stmt_without_update (&seq
, g
);
11731 gsi_insert_seq_on_edge_immediate
11732 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
)), seq
);
11734 entry_bb
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
11735 int freq
= compute_call_stmt_bb_frequency (current_function_decl
,
11737 cgraph_create_edge (node
, cgraph_get_create_node (fn
),
11738 call
, entry_bb
->count
, freq
);
11740 imm_use_iterator iter
;
11741 use_operand_p use_p
;
11743 tree repl
= gimple_get_lhs (g
);
11744 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, def
)
11745 if (is_gimple_debug (use_stmt
) || use_stmt
== call
)
11748 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
11749 SET_USE (use_p
, repl
);
11752 else if (node
->simdclone
->args
[i
].arg_type
11753 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
11755 tree orig_arg
= node
->simdclone
->args
[i
].orig_arg
;
11756 tree def
= ssa_default_def (cfun
, orig_arg
);
11757 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg
))
11758 || POINTER_TYPE_P (TREE_TYPE (orig_arg
)));
11759 if (def
&& !has_zero_uses (def
))
11761 iter1
= make_ssa_name (orig_arg
, NULL
);
11762 iter2
= make_ssa_name (orig_arg
, NULL
);
11763 phi
= create_phi_node (iter1
, body_bb
);
11764 add_phi_arg (phi
, def
, preheader_edge
, UNKNOWN_LOCATION
);
11765 add_phi_arg (phi
, iter2
, latch_edge
, UNKNOWN_LOCATION
);
11766 enum tree_code code
= INTEGRAL_TYPE_P (TREE_TYPE (orig_arg
))
11767 ? PLUS_EXPR
: POINTER_PLUS_EXPR
;
11768 tree addtype
= INTEGRAL_TYPE_P (TREE_TYPE (orig_arg
))
11769 ? TREE_TYPE (orig_arg
) : sizetype
;
11771 = build_int_cst (addtype
, node
->simdclone
->args
[i
].linear_step
);
11772 g
= gimple_build_assign_with_ops (code
, iter2
, iter1
, addcst
);
11773 gsi
= gsi_last_bb (incr_bb
);
11774 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
11776 imm_use_iterator iter
;
11777 use_operand_p use_p
;
11779 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, def
)
11780 if (use_stmt
== phi
)
11783 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
11784 SET_USE (use_p
, iter1
);
11788 calculate_dominance_info (CDI_DOMINATORS
);
11789 add_loop (loop
, loop
->header
->loop_father
);
11790 update_ssa (TODO_update_ssa
);
11795 /* If the function in NODE is tagged as an elemental SIMD function,
11796 create the appropriate SIMD clones. */
11799 expand_simd_clones (struct cgraph_node
*node
)
11801 tree attr
= lookup_attribute ("omp declare simd",
11802 DECL_ATTRIBUTES (node
->decl
));
11803 if (attr
== NULL_TREE
11804 || node
->global
.inlined_to
11805 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node
->decl
)))
11809 #pragma omp declare simd
11811 in C, there we don't know the argument types at all. */
11812 if (!node
->definition
11813 && TYPE_ARG_TYPES (TREE_TYPE (node
->decl
)) == NULL_TREE
)
11818 /* Start with parsing the "omp declare simd" attribute(s). */
11819 bool inbranch_clause_specified
;
11820 struct cgraph_simd_clone
*clone_info
11821 = simd_clone_clauses_extract (node
, TREE_VALUE (attr
),
11822 &inbranch_clause_specified
);
11823 if (clone_info
== NULL
)
11826 int orig_simdlen
= clone_info
->simdlen
;
11827 tree base_type
= simd_clone_compute_base_data_type (node
, clone_info
);
11828 /* The target can return 0 (no simd clones should be created),
11829 1 (just one ISA of simd clones should be created) or higher
11830 count of ISA variants. In that case, clone_info is initialized
11831 for the first ISA variant. */
11833 = targetm
.simd_clone
.compute_vecsize_and_simdlen (node
, clone_info
,
11838 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
11839 also create one inbranch and one !inbranch clone of it. */
11840 for (int i
= 0; i
< count
* 2; i
++)
11842 struct cgraph_simd_clone
*clone
= clone_info
;
11843 if (inbranch_clause_specified
&& (i
& 1) != 0)
11848 clone
= simd_clone_struct_alloc (clone_info
->nargs
11850 simd_clone_struct_copy (clone
, clone_info
);
11851 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
11852 and simd_clone_adjust_argument_types did to the first
11854 clone
->nargs
-= clone_info
->inbranch
;
11855 clone
->simdlen
= orig_simdlen
;
11856 /* And call the target hook again to get the right ISA. */
11857 targetm
.simd_clone
.compute_vecsize_and_simdlen (node
, clone
,
11861 clone
->inbranch
= 1;
11864 /* simd_clone_mangle might fail if such a clone has been created
11866 tree id
= simd_clone_mangle (node
, clone
);
11867 if (id
== NULL_TREE
)
11870 /* Only when we are sure we want to create the clone actually
11871 clone the function (or definitions) or create another
11872 extern FUNCTION_DECL (for prototypes without definitions). */
11873 struct cgraph_node
*n
= simd_clone_create (node
);
11877 n
->simdclone
= clone
;
11878 clone
->origin
= node
;
11879 clone
->next_clone
= NULL
;
11880 if (node
->simd_clones
== NULL
)
11882 clone
->prev_clone
= n
;
11883 node
->simd_clones
= n
;
11887 clone
->prev_clone
= node
->simd_clones
->simdclone
->prev_clone
;
11888 clone
->prev_clone
->simdclone
->next_clone
= n
;
11889 node
->simd_clones
->simdclone
->prev_clone
= n
;
11891 change_decl_assembler_name (n
->decl
, id
);
11892 /* And finally adjust the return type, parameters and for
11893 definitions also function body. */
11894 if (node
->definition
)
11895 simd_clone_adjust (n
);
11898 simd_clone_adjust_return_type (n
);
11899 simd_clone_adjust_argument_types (n
);
11903 while ((attr
= lookup_attribute ("omp declare simd", TREE_CHAIN (attr
))));
11906 /* Entry point for IPA simd clone creation pass. */
11908 static unsigned int
11909 ipa_omp_simd_clone (void)
11911 struct cgraph_node
*node
;
11912 FOR_EACH_FUNCTION (node
)
11913 expand_simd_clones (node
);
11919 const pass_data pass_data_omp_simd_clone
=
11921 SIMPLE_IPA_PASS
, /* type */
11922 "simdclone", /* name */
11923 OPTGROUP_NONE
, /* optinfo_flags */
11924 TV_NONE
, /* tv_id */
11925 ( PROP_ssa
| PROP_cfg
), /* properties_required */
11926 0, /* properties_provided */
11927 0, /* properties_destroyed */
11928 0, /* todo_flags_start */
11929 0, /* todo_flags_finish */
11932 class pass_omp_simd_clone
: public simple_ipa_opt_pass
11935 pass_omp_simd_clone(gcc::context
*ctxt
)
11936 : simple_ipa_opt_pass(pass_data_omp_simd_clone
, ctxt
)
11939 /* opt_pass methods: */
11940 virtual bool gate (function
*);
11941 virtual unsigned int execute (function
*) { return ipa_omp_simd_clone (); }
11945 pass_omp_simd_clone::gate (function
*)
11947 return ((flag_openmp
|| flag_openmp_simd
11949 || (in_lto_p
&& !flag_wpa
))
11950 && (targetm
.simd_clone
.compute_vecsize_and_simdlen
!= NULL
));
11953 } // anon namespace
11955 simple_ipa_opt_pass
*
11956 make_pass_omp_simd_clone (gcc::context
*ctxt
)
11958 return new pass_omp_simd_clone (ctxt
);
11961 #include "gt-omp-low.h"