PR c++/33407
[official-gcc.git] / gcc / omp-low.c
blob94d63b6a56a60e0477c44db70ff830511b3a9c3b
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "tree-gimple.h"
31 #include "tree-inline.h"
32 #include "langhooks.h"
33 #include "diagnostic.h"
34 #include "tree-flow.h"
35 #include "timevar.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "tree-pass.h"
41 #include "ggc.h"
42 #include "except.h"
43 #include "splay-tree.h"
44 #include "optabs.h"
45 #include "cfgloop.h"
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
52 expressions.
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
61 typedef struct omp_context
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
67 copy_body_data cb;
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context *outer;
71 tree stmt;
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
75 splay_tree field_map;
76 tree record_type;
77 tree sender_decl;
78 tree receiver_decl;
80 /* A chain of variables to add to the top-level block surrounding the
81 construct. In the case of a parallel, this is in the child function. */
82 tree block_vars;
84 /* What to do with variables with implicitly determined sharing
85 attributes. */
86 enum omp_clause_default_kind default_kind;
88 /* Nesting depth of this context. Used to beautify error messages re
89 invalid gotos. The outermost ctx is depth 1, with depth 0 being
90 reserved for the main body of the function. */
91 int depth;
93 /* True if this parallel directive is nested within another. */
94 bool is_nested;
95 } omp_context;
98 /* A structure describing the main elements of a parallel loop. */
100 struct omp_for_data
102 tree v, n1, n2, step, chunk_size, for_stmt;
103 enum tree_code cond_code;
104 tree pre;
105 bool have_nowait, have_ordered;
106 enum omp_clause_schedule_kind sched_kind;
110 static splay_tree all_contexts;
111 static int parallel_nesting_level;
112 struct omp_region *root_omp_region;
114 static void scan_omp (tree *, omp_context *);
115 static void lower_omp (tree *, omp_context *);
116 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
117 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
119 /* Find an OpenMP clause of type KIND within CLAUSES. */
121 tree
122 find_omp_clause (tree clauses, enum tree_code kind)
124 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
125 if (OMP_CLAUSE_CODE (clauses) == kind)
126 return clauses;
128 return NULL_TREE;
131 /* Return true if CTX is for an omp parallel. */
133 static inline bool
134 is_parallel_ctx (omp_context *ctx)
136 return TREE_CODE (ctx->stmt) == OMP_PARALLEL;
140 /* Return true if REGION is a combined parallel+workshare region. */
142 static inline bool
143 is_combined_parallel (struct omp_region *region)
145 return region->is_combined_parallel;
149 /* Extract the header elements of parallel loop FOR_STMT and store
150 them into *FD. */
152 static void
153 extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
155 tree t, var;
157 fd->for_stmt = for_stmt;
158 fd->pre = NULL;
160 t = OMP_FOR_INIT (for_stmt);
161 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
162 fd->v = GIMPLE_STMT_OPERAND (t, 0);
163 gcc_assert (SSA_VAR_P (fd->v));
164 gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
165 var = TREE_CODE (fd->v) == SSA_NAME ? SSA_NAME_VAR (fd->v) : fd->v;
166 fd->n1 = GIMPLE_STMT_OPERAND (t, 1);
168 t = OMP_FOR_COND (for_stmt);
169 fd->cond_code = TREE_CODE (t);
170 gcc_assert (TREE_OPERAND (t, 0) == var);
171 fd->n2 = TREE_OPERAND (t, 1);
172 switch (fd->cond_code)
174 case LT_EXPR:
175 case GT_EXPR:
176 break;
177 case LE_EXPR:
178 fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
179 build_int_cst (TREE_TYPE (fd->n2), 1));
180 fd->cond_code = LT_EXPR;
181 break;
182 case GE_EXPR:
183 fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
184 build_int_cst (TREE_TYPE (fd->n2), 1));
185 fd->cond_code = GT_EXPR;
186 break;
187 default:
188 gcc_unreachable ();
191 t = OMP_FOR_INCR (fd->for_stmt);
192 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
193 gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
194 t = GIMPLE_STMT_OPERAND (t, 1);
195 gcc_assert (TREE_OPERAND (t, 0) == var);
196 switch (TREE_CODE (t))
198 case PLUS_EXPR:
199 fd->step = TREE_OPERAND (t, 1);
200 break;
201 case MINUS_EXPR:
202 fd->step = TREE_OPERAND (t, 1);
203 fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step);
204 break;
205 default:
206 gcc_unreachable ();
209 fd->have_nowait = fd->have_ordered = false;
210 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
211 fd->chunk_size = NULL_TREE;
213 for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
214 switch (OMP_CLAUSE_CODE (t))
216 case OMP_CLAUSE_NOWAIT:
217 fd->have_nowait = true;
218 break;
219 case OMP_CLAUSE_ORDERED:
220 fd->have_ordered = true;
221 break;
222 case OMP_CLAUSE_SCHEDULE:
223 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
224 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
225 break;
226 default:
227 break;
230 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
231 gcc_assert (fd->chunk_size == NULL);
232 else if (fd->chunk_size == NULL)
234 /* We only need to compute a default chunk size for ordered
235 static loops and dynamic loops. */
236 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)
237 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
238 ? integer_zero_node : integer_one_node;
243 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
244 is the immediate dominator of PAR_ENTRY_BB, return true if there
245 are no data dependencies that would prevent expanding the parallel
246 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
248 When expanding a combined parallel+workshare region, the call to
249 the child function may need additional arguments in the case of
250 OMP_FOR regions. In some cases, these arguments are computed out
251 of variables passed in from the parent to the child via 'struct
252 .omp_data_s'. For instance:
254 #pragma omp parallel for schedule (guided, i * 4)
255 for (j ...)
257 Is lowered into:
259 # BLOCK 2 (PAR_ENTRY_BB)
260 .omp_data_o.i = i;
261 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
263 # BLOCK 3 (WS_ENTRY_BB)
264 .omp_data_i = &.omp_data_o;
265 D.1667 = .omp_data_i->i;
266 D.1598 = D.1667 * 4;
267 #pragma omp for schedule (guided, D.1598)
269 When we outline the parallel region, the call to the child function
270 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
271 that value is computed *after* the call site. So, in principle we
272 cannot do the transformation.
274 To see whether the code in WS_ENTRY_BB blocks the combined
275 parallel+workshare call, we collect all the variables used in the
276 OMP_FOR header check whether they appear on the LHS of any
277 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
278 call.
280 FIXME. If we had the SSA form built at this point, we could merely
281 hoist the code in block 3 into block 2 and be done with it. But at
282 this point we don't have dataflow information and though we could
283 hack something up here, it is really not worth the aggravation. */
285 static bool
286 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
288 struct omp_for_data fd;
289 tree par_stmt, ws_stmt;
291 par_stmt = last_stmt (par_entry_bb);
292 ws_stmt = last_stmt (ws_entry_bb);
294 if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
295 return true;
297 gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR);
299 extract_omp_for_data (ws_stmt, &fd);
301 /* FIXME. We give up too easily here. If any of these arguments
302 are not constants, they will likely involve variables that have
303 been mapped into fields of .omp_data_s for sharing with the child
304 function. With appropriate data flow, it would be possible to
305 see through this. */
306 if (!is_gimple_min_invariant (fd.n1)
307 || !is_gimple_min_invariant (fd.n2)
308 || !is_gimple_min_invariant (fd.step)
309 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
310 return false;
312 return true;
316 /* Collect additional arguments needed to emit a combined
317 parallel+workshare call. WS_STMT is the workshare directive being
318 expanded. */
320 static tree
321 get_ws_args_for (tree ws_stmt)
323 tree t;
325 if (TREE_CODE (ws_stmt) == OMP_FOR)
327 struct omp_for_data fd;
328 tree ws_args;
330 extract_omp_for_data (ws_stmt, &fd);
332 ws_args = NULL_TREE;
333 if (fd.chunk_size)
335 t = fold_convert (long_integer_type_node, fd.chunk_size);
336 ws_args = tree_cons (NULL, t, ws_args);
339 t = fold_convert (long_integer_type_node, fd.step);
340 ws_args = tree_cons (NULL, t, ws_args);
342 t = fold_convert (long_integer_type_node, fd.n2);
343 ws_args = tree_cons (NULL, t, ws_args);
345 t = fold_convert (long_integer_type_node, fd.n1);
346 ws_args = tree_cons (NULL, t, ws_args);
348 return ws_args;
350 else if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
352 /* Number of sections is equal to the number of edges from the
353 OMP_SECTIONS_SWITCH statement, except for the one to the exit
354 of the sections region. */
355 basic_block bb = single_succ (bb_for_stmt (ws_stmt));
356 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
357 t = tree_cons (NULL, t, NULL);
358 return t;
361 gcc_unreachable ();
365 /* Discover whether REGION is a combined parallel+workshare region. */
367 static void
368 determine_parallel_type (struct omp_region *region)
370 basic_block par_entry_bb, par_exit_bb;
371 basic_block ws_entry_bb, ws_exit_bb;
373 if (region == NULL || region->inner == NULL
374 || region->exit == NULL || region->inner->exit == NULL
375 || region->inner->cont == NULL)
376 return;
378 /* We only support parallel+for and parallel+sections. */
379 if (region->type != OMP_PARALLEL
380 || (region->inner->type != OMP_FOR
381 && region->inner->type != OMP_SECTIONS))
382 return;
384 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
385 WS_EXIT_BB -> PAR_EXIT_BB. */
386 par_entry_bb = region->entry;
387 par_exit_bb = region->exit;
388 ws_entry_bb = region->inner->entry;
389 ws_exit_bb = region->inner->exit;
391 if (single_succ (par_entry_bb) == ws_entry_bb
392 && single_succ (ws_exit_bb) == par_exit_bb
393 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
394 && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb))
395 || (last_and_only_stmt (ws_entry_bb)
396 && last_and_only_stmt (par_exit_bb))))
398 tree ws_stmt = last_stmt (ws_entry_bb);
400 if (region->inner->type == OMP_FOR)
402 /* If this is a combined parallel loop, we need to determine
403 whether or not to use the combined library calls. There
404 are two cases where we do not apply the transformation:
405 static loops and any kind of ordered loop. In the first
406 case, we already open code the loop so there is no need
407 to do anything else. In the latter case, the combined
408 parallel loop call would still need extra synchronization
409 to implement ordered semantics, so there would not be any
410 gain in using the combined call. */
411 tree clauses = OMP_FOR_CLAUSES (ws_stmt);
412 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
413 if (c == NULL
414 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
415 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
417 region->is_combined_parallel = false;
418 region->inner->is_combined_parallel = false;
419 return;
423 region->is_combined_parallel = true;
424 region->inner->is_combined_parallel = true;
425 region->ws_args = get_ws_args_for (ws_stmt);
430 /* Return true if EXPR is variable sized. */
432 static inline bool
433 is_variable_sized (const_tree expr)
435 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
438 /* Return true if DECL is a reference type. */
440 static inline bool
441 is_reference (tree decl)
443 return lang_hooks.decls.omp_privatize_by_reference (decl);
446 /* Lookup variables in the decl or field splay trees. The "maybe" form
447 allows for the variable form to not have been entered, otherwise we
448 assert that the variable must have been entered. */
450 static inline tree
451 lookup_decl (tree var, omp_context *ctx)
453 tree *n;
454 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
455 return *n;
458 static inline tree
459 maybe_lookup_decl (tree var, omp_context *ctx)
461 tree *n;
462 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
463 return n ? *n : NULL_TREE;
466 static inline tree
467 lookup_field (tree var, omp_context *ctx)
469 splay_tree_node n;
470 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
471 return (tree) n->value;
474 static inline tree
475 maybe_lookup_field (tree var, omp_context *ctx)
477 splay_tree_node n;
478 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
479 return n ? (tree) n->value : NULL_TREE;
482 /* Return true if DECL should be copied by pointer. SHARED_P is true
483 if DECL is to be shared. */
485 static bool
486 use_pointer_for_field (const_tree decl, bool shared_p)
488 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
489 return true;
491 /* We can only use copy-in/copy-out semantics for shared variables
492 when we know the value is not accessible from an outer scope. */
493 if (shared_p)
495 /* ??? Trivially accessible from anywhere. But why would we even
496 be passing an address in this case? Should we simply assert
497 this to be false, or should we have a cleanup pass that removes
498 these from the list of mappings? */
499 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
500 return true;
502 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
503 without analyzing the expression whether or not its location
504 is accessible to anyone else. In the case of nested parallel
505 regions it certainly may be. */
506 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
507 return true;
509 /* Do not use copy-in/copy-out for variables that have their
510 address taken. */
511 if (TREE_ADDRESSABLE (decl))
512 return true;
515 return false;
518 /* Create a new VAR_DECL and copy information from VAR to it. */
520 tree
521 copy_var_decl (tree var, tree name, tree type)
523 tree copy = build_decl (VAR_DECL, name, type);
525 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
526 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
527 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
528 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
529 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
530 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
531 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
532 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
533 TREE_USED (copy) = 1;
534 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
536 return copy;
539 /* Construct a new automatic decl similar to VAR. */
541 static tree
542 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
544 tree copy = copy_var_decl (var, name, type);
546 DECL_CONTEXT (copy) = current_function_decl;
547 TREE_CHAIN (copy) = ctx->block_vars;
548 ctx->block_vars = copy;
550 return copy;
553 static tree
554 omp_copy_decl_1 (tree var, omp_context *ctx)
556 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
559 /* Build tree nodes to access the field for VAR on the receiver side. */
561 static tree
562 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
564 tree x, field = lookup_field (var, ctx);
566 /* If the receiver record type was remapped in the child function,
567 remap the field into the new record type. */
568 x = maybe_lookup_field (field, ctx);
569 if (x != NULL)
570 field = x;
572 x = build_fold_indirect_ref (ctx->receiver_decl);
573 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
574 if (by_ref)
575 x = build_fold_indirect_ref (x);
577 return x;
580 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
581 of a parallel, this is a component reference; for workshare constructs
582 this is some variable. */
584 static tree
585 build_outer_var_ref (tree var, omp_context *ctx)
587 tree x;
589 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
590 x = var;
591 else if (is_variable_sized (var))
593 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
594 x = build_outer_var_ref (x, ctx);
595 x = build_fold_indirect_ref (x);
597 else if (is_parallel_ctx (ctx))
599 bool by_ref = use_pointer_for_field (var, false);
600 x = build_receiver_ref (var, by_ref, ctx);
602 else if (ctx->outer)
603 x = lookup_decl (var, ctx->outer);
604 else if (is_reference (var))
605 /* This can happen with orphaned constructs. If var is reference, it is
606 possible it is shared and as such valid. */
607 x = var;
608 else
609 gcc_unreachable ();
611 if (is_reference (var))
612 x = build_fold_indirect_ref (x);
614 return x;
617 /* Build tree nodes to access the field for VAR on the sender side. */
619 static tree
620 build_sender_ref (tree var, omp_context *ctx)
622 tree field = lookup_field (var, ctx);
623 return build3 (COMPONENT_REF, TREE_TYPE (field),
624 ctx->sender_decl, field, NULL);
627 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
629 static void
630 install_var_field (tree var, bool by_ref, omp_context *ctx)
632 tree field, type;
634 gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
636 type = TREE_TYPE (var);
637 if (by_ref)
638 type = build_pointer_type (type);
640 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
642 /* Remember what variable this field was created for. This does have a
643 side effect of making dwarf2out ignore this member, so for helpful
644 debugging we clear it later in delete_omp_context. */
645 DECL_ABSTRACT_ORIGIN (field) = var;
647 insert_field_into_struct (ctx->record_type, field);
649 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
650 (splay_tree_value) field);
653 static tree
654 install_var_local (tree var, omp_context *ctx)
656 tree new_var = omp_copy_decl_1 (var, ctx);
657 insert_decl_map (&ctx->cb, var, new_var);
658 return new_var;
661 /* Adjust the replacement for DECL in CTX for the new context. This means
662 copying the DECL_VALUE_EXPR, and fixing up the type. */
664 static void
665 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
667 tree new_decl, size;
669 new_decl = lookup_decl (decl, ctx);
671 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
673 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
674 && DECL_HAS_VALUE_EXPR_P (decl))
676 tree ve = DECL_VALUE_EXPR (decl);
677 walk_tree (&ve, copy_body_r, &ctx->cb, NULL);
678 SET_DECL_VALUE_EXPR (new_decl, ve);
679 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
682 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
684 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
685 if (size == error_mark_node)
686 size = TYPE_SIZE (TREE_TYPE (new_decl));
687 DECL_SIZE (new_decl) = size;
689 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
690 if (size == error_mark_node)
691 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
692 DECL_SIZE_UNIT (new_decl) = size;
696 /* The callback for remap_decl. Search all containing contexts for a
697 mapping of the variable; this avoids having to duplicate the splay
698 tree ahead of time. We know a mapping doesn't already exist in the
699 given context. Create new mappings to implement default semantics. */
701 static tree
702 omp_copy_decl (tree var, copy_body_data *cb)
704 omp_context *ctx = (omp_context *) cb;
705 tree new_var;
707 if (TREE_CODE (var) == LABEL_DECL)
709 new_var = create_artificial_label ();
710 DECL_CONTEXT (new_var) = current_function_decl;
711 insert_decl_map (&ctx->cb, var, new_var);
712 return new_var;
715 while (!is_parallel_ctx (ctx))
717 ctx = ctx->outer;
718 if (ctx == NULL)
719 return var;
720 new_var = maybe_lookup_decl (var, ctx);
721 if (new_var)
722 return new_var;
725 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
726 return var;
728 return error_mark_node;
732 /* Return the parallel region associated with STMT. */
734 /* Debugging dumps for parallel regions. */
735 void dump_omp_region (FILE *, struct omp_region *, int);
736 void debug_omp_region (struct omp_region *);
737 void debug_all_omp_regions (void);
739 /* Dump the parallel region tree rooted at REGION. */
741 void
742 dump_omp_region (FILE *file, struct omp_region *region, int indent)
744 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
745 tree_code_name[region->type]);
747 if (region->inner)
748 dump_omp_region (file, region->inner, indent + 4);
750 if (region->cont)
752 fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "",
753 region->cont->index);
756 if (region->exit)
757 fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "",
758 region->exit->index);
759 else
760 fprintf (file, "%*s[no exit marker]\n", indent, "");
762 if (region->next)
763 dump_omp_region (file, region->next, indent);
766 void
767 debug_omp_region (struct omp_region *region)
769 dump_omp_region (stderr, region, 0);
772 void
773 debug_all_omp_regions (void)
775 dump_omp_region (stderr, root_omp_region, 0);
779 /* Create a new parallel region starting at STMT inside region PARENT. */
781 struct omp_region *
782 new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent)
784 struct omp_region *region = xcalloc (1, sizeof (*region));
786 region->outer = parent;
787 region->entry = bb;
788 region->type = type;
790 if (parent)
792 /* This is a nested region. Add it to the list of inner
793 regions in PARENT. */
794 region->next = parent->inner;
795 parent->inner = region;
797 else
799 /* This is a toplevel region. Add it to the list of toplevel
800 regions in ROOT_OMP_REGION. */
801 region->next = root_omp_region;
802 root_omp_region = region;
805 return region;
808 /* Release the memory associated with the region tree rooted at REGION. */
810 static void
811 free_omp_region_1 (struct omp_region *region)
813 struct omp_region *i, *n;
815 for (i = region->inner; i ; i = n)
817 n = i->next;
818 free_omp_region_1 (i);
821 free (region);
824 /* Release the memory for the entire omp region tree. */
826 void
827 free_omp_regions (void)
829 struct omp_region *r, *n;
830 for (r = root_omp_region; r ; r = n)
832 n = r->next;
833 free_omp_region_1 (r);
835 root_omp_region = NULL;
839 /* Create a new context, with OUTER_CTX being the surrounding context. */
841 static omp_context *
842 new_omp_context (tree stmt, omp_context *outer_ctx)
844 omp_context *ctx = XCNEW (omp_context);
846 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
847 (splay_tree_value) ctx);
848 ctx->stmt = stmt;
850 if (outer_ctx)
852 ctx->outer = outer_ctx;
853 ctx->cb = outer_ctx->cb;
854 ctx->cb.block = NULL;
855 ctx->depth = outer_ctx->depth + 1;
857 else
859 ctx->cb.src_fn = current_function_decl;
860 ctx->cb.dst_fn = current_function_decl;
861 ctx->cb.src_node = cgraph_node (current_function_decl);
862 ctx->cb.dst_node = ctx->cb.src_node;
863 ctx->cb.src_cfun = cfun;
864 ctx->cb.copy_decl = omp_copy_decl;
865 ctx->cb.eh_region = -1;
866 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
867 ctx->depth = 1;
870 ctx->cb.decl_map = pointer_map_create ();
872 return ctx;
875 /* Destroy a omp_context data structures. Called through the splay tree
876 value delete callback. */
878 static void
879 delete_omp_context (splay_tree_value value)
881 omp_context *ctx = (omp_context *) value;
883 pointer_map_destroy (ctx->cb.decl_map);
885 if (ctx->field_map)
886 splay_tree_delete (ctx->field_map);
888 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
889 it produces corrupt debug information. */
890 if (ctx->record_type)
892 tree t;
893 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
894 DECL_ABSTRACT_ORIGIN (t) = NULL;
897 XDELETE (ctx);
900 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
901 context. */
903 static void
904 fixup_child_record_type (omp_context *ctx)
906 tree f, type = ctx->record_type;
908 /* ??? It isn't sufficient to just call remap_type here, because
909 variably_modified_type_p doesn't work the way we expect for
910 record types. Testing each field for whether it needs remapping
911 and creating a new record by hand works, however. */
912 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
913 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
914 break;
915 if (f)
917 tree name, new_fields = NULL;
919 type = lang_hooks.types.make_type (RECORD_TYPE);
920 name = DECL_NAME (TYPE_NAME (ctx->record_type));
921 name = build_decl (TYPE_DECL, name, type);
922 TYPE_NAME (type) = name;
924 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
926 tree new_f = copy_node (f);
927 DECL_CONTEXT (new_f) = type;
928 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
929 TREE_CHAIN (new_f) = new_fields;
930 new_fields = new_f;
932 /* Arrange to be able to look up the receiver field
933 given the sender field. */
934 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
935 (splay_tree_value) new_f);
937 TYPE_FIELDS (type) = nreverse (new_fields);
938 layout_type (type);
941 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
944 /* Instantiate decls as necessary in CTX to satisfy the data sharing
945 specified by CLAUSES. */
947 static void
948 scan_sharing_clauses (tree clauses, omp_context *ctx)
950 tree c, decl;
951 bool scan_array_reductions = false;
953 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
955 bool by_ref;
957 switch (OMP_CLAUSE_CODE (c))
959 case OMP_CLAUSE_PRIVATE:
960 decl = OMP_CLAUSE_DECL (c);
961 if (!is_variable_sized (decl))
962 install_var_local (decl, ctx);
963 break;
965 case OMP_CLAUSE_SHARED:
966 gcc_assert (is_parallel_ctx (ctx));
967 decl = OMP_CLAUSE_DECL (c);
968 gcc_assert (!is_variable_sized (decl));
969 by_ref = use_pointer_for_field (decl, true);
970 /* Global variables don't need to be copied,
971 the receiver side will use them directly. */
972 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
973 break;
974 if (! TREE_READONLY (decl)
975 || TREE_ADDRESSABLE (decl)
976 || by_ref
977 || is_reference (decl))
979 install_var_field (decl, by_ref, ctx);
980 install_var_local (decl, ctx);
981 break;
983 /* We don't need to copy const scalar vars back. */
984 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
985 goto do_private;
987 case OMP_CLAUSE_LASTPRIVATE:
988 /* Let the corresponding firstprivate clause create
989 the variable. */
990 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
991 break;
992 /* FALLTHRU */
994 case OMP_CLAUSE_FIRSTPRIVATE:
995 case OMP_CLAUSE_REDUCTION:
996 decl = OMP_CLAUSE_DECL (c);
997 do_private:
998 if (is_variable_sized (decl))
999 break;
1000 else if (is_parallel_ctx (ctx)
1001 && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
1002 ctx)))
1004 by_ref = use_pointer_for_field (decl, false);
1005 install_var_field (decl, by_ref, ctx);
1007 install_var_local (decl, ctx);
1008 break;
1010 case OMP_CLAUSE_COPYPRIVATE:
1011 if (ctx->outer)
1012 scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer);
1013 /* FALLTHRU */
1015 case OMP_CLAUSE_COPYIN:
1016 decl = OMP_CLAUSE_DECL (c);
1017 by_ref = use_pointer_for_field (decl, false);
1018 install_var_field (decl, by_ref, ctx);
1019 break;
1021 case OMP_CLAUSE_DEFAULT:
1022 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1023 break;
1025 case OMP_CLAUSE_IF:
1026 case OMP_CLAUSE_NUM_THREADS:
1027 case OMP_CLAUSE_SCHEDULE:
1028 if (ctx->outer)
1029 scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1030 break;
1032 case OMP_CLAUSE_NOWAIT:
1033 case OMP_CLAUSE_ORDERED:
1034 break;
1036 default:
1037 gcc_unreachable ();
1041 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1043 switch (OMP_CLAUSE_CODE (c))
1045 case OMP_CLAUSE_LASTPRIVATE:
1046 /* Let the corresponding firstprivate clause create
1047 the variable. */
1048 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1049 break;
1050 /* FALLTHRU */
1052 case OMP_CLAUSE_PRIVATE:
1053 case OMP_CLAUSE_FIRSTPRIVATE:
1054 case OMP_CLAUSE_REDUCTION:
1055 decl = OMP_CLAUSE_DECL (c);
1056 if (is_variable_sized (decl))
1057 install_var_local (decl, ctx);
1058 fixup_remapped_decl (decl, ctx,
1059 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1060 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1061 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1062 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1063 scan_array_reductions = true;
1064 break;
1066 case OMP_CLAUSE_SHARED:
1067 decl = OMP_CLAUSE_DECL (c);
1068 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1069 fixup_remapped_decl (decl, ctx, false);
1070 break;
1072 case OMP_CLAUSE_COPYPRIVATE:
1073 case OMP_CLAUSE_COPYIN:
1074 case OMP_CLAUSE_DEFAULT:
1075 case OMP_CLAUSE_IF:
1076 case OMP_CLAUSE_NUM_THREADS:
1077 case OMP_CLAUSE_SCHEDULE:
1078 case OMP_CLAUSE_NOWAIT:
1079 case OMP_CLAUSE_ORDERED:
1080 break;
1082 default:
1083 gcc_unreachable ();
1087 if (scan_array_reductions)
1088 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1089 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1090 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1092 scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx);
1093 scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx);
1097 /* Create a new name for omp child function. Returns an identifier. */
1099 static GTY(()) unsigned int tmp_ompfn_id_num;
1101 static tree
1102 create_omp_child_function_name (void)
1104 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1105 size_t len = IDENTIFIER_LENGTH (name);
1106 char *tmp_name, *prefix;
1108 prefix = alloca (len + sizeof ("_omp_fn"));
1109 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1110 strcpy (prefix + len, "_omp_fn");
1111 #ifndef NO_DOT_IN_LABEL
1112 prefix[len] = '.';
1113 #elif !defined NO_DOLLAR_IN_LABEL
1114 prefix[len] = '$';
1115 #endif
1116 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1117 return get_identifier (tmp_name);
1120 /* Build a decl for the omp child function. It'll not contain a body
1121 yet, just the bare decl. */
1123 static void
1124 create_omp_child_function (omp_context *ctx)
1126 tree decl, type, name, t;
1128 name = create_omp_child_function_name ();
1129 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1131 decl = build_decl (FUNCTION_DECL, name, type);
1132 decl = lang_hooks.decls.pushdecl (decl);
1134 ctx->cb.dst_fn = decl;
1136 TREE_STATIC (decl) = 1;
1137 TREE_USED (decl) = 1;
1138 DECL_ARTIFICIAL (decl) = 1;
1139 DECL_IGNORED_P (decl) = 0;
1140 TREE_PUBLIC (decl) = 0;
1141 DECL_UNINLINABLE (decl) = 1;
1142 DECL_EXTERNAL (decl) = 0;
1143 DECL_CONTEXT (decl) = NULL_TREE;
1144 DECL_INITIAL (decl) = make_node (BLOCK);
1146 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1147 DECL_ARTIFICIAL (t) = 1;
1148 DECL_IGNORED_P (t) = 1;
1149 DECL_RESULT (decl) = t;
1151 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1152 DECL_ARTIFICIAL (t) = 1;
1153 DECL_ARG_TYPE (t) = ptr_type_node;
1154 DECL_CONTEXT (t) = current_function_decl;
1155 TREE_USED (t) = 1;
1156 DECL_ARGUMENTS (decl) = t;
1157 ctx->receiver_decl = t;
1159 /* Allocate memory for the function structure. The call to
1160 allocate_struct_function clobbers CFUN, so we need to restore
1161 it afterward. */
1162 push_struct_function (decl);
1163 DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt);
1164 cfun->function_end_locus = EXPR_LOCATION (ctx->stmt);
1165 pop_cfun ();
1169 /* Scan an OpenMP parallel directive. */
1171 static void
1172 scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
1174 omp_context *ctx;
1175 tree name;
1177 /* Ignore parallel directives with empty bodies, unless there
1178 are copyin clauses. */
1179 if (optimize > 0
1180 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p))
1181 && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL)
1183 *stmt_p = build_empty_stmt ();
1184 return;
1187 ctx = new_omp_context (*stmt_p, outer_ctx);
1188 if (parallel_nesting_level > 1)
1189 ctx->is_nested = true;
1190 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1191 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1192 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1193 name = create_tmp_var_name (".omp_data_s");
1194 name = build_decl (TYPE_DECL, name, ctx->record_type);
1195 TYPE_NAME (ctx->record_type) = name;
1196 create_omp_child_function (ctx);
1197 OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn;
1199 scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx);
1200 scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx);
1202 if (TYPE_FIELDS (ctx->record_type) == NULL)
1203 ctx->record_type = ctx->receiver_decl = NULL;
1204 else
1206 layout_type (ctx->record_type);
1207 fixup_child_record_type (ctx);
1212 /* Scan an OpenMP loop directive. */
1214 static void
1215 scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
1217 omp_context *ctx;
1218 tree stmt;
1220 stmt = *stmt_p;
1221 ctx = new_omp_context (stmt, outer_ctx);
1223 scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx);
1225 scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
1226 scan_omp (&OMP_FOR_INIT (stmt), ctx);
1227 scan_omp (&OMP_FOR_COND (stmt), ctx);
1228 scan_omp (&OMP_FOR_INCR (stmt), ctx);
1229 scan_omp (&OMP_FOR_BODY (stmt), ctx);
1232 /* Scan an OpenMP sections directive. */
1234 static void
1235 scan_omp_sections (tree *stmt_p, omp_context *outer_ctx)
1237 tree stmt;
1238 omp_context *ctx;
1240 stmt = *stmt_p;
1241 ctx = new_omp_context (stmt, outer_ctx);
1242 scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx);
1243 scan_omp (&OMP_SECTIONS_BODY (stmt), ctx);
1246 /* Scan an OpenMP single directive. */
1248 static void
1249 scan_omp_single (tree *stmt_p, omp_context *outer_ctx)
1251 tree stmt = *stmt_p;
1252 omp_context *ctx;
1253 tree name;
1255 ctx = new_omp_context (stmt, outer_ctx);
1256 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1257 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1258 name = create_tmp_var_name (".omp_copy_s");
1259 name = build_decl (TYPE_DECL, name, ctx->record_type);
1260 TYPE_NAME (ctx->record_type) = name;
1262 scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx);
1263 scan_omp (&OMP_SINGLE_BODY (stmt), ctx);
1265 if (TYPE_FIELDS (ctx->record_type) == NULL)
1266 ctx->record_type = NULL;
1267 else
1268 layout_type (ctx->record_type);
1272 /* Check OpenMP nesting restrictions. */
1273 static void
1274 check_omp_nesting_restrictions (tree t, omp_context *ctx)
1276 switch (TREE_CODE (t))
1278 case OMP_FOR:
1279 case OMP_SECTIONS:
1280 case OMP_SINGLE:
1281 for (; ctx != NULL; ctx = ctx->outer)
1282 switch (TREE_CODE (ctx->stmt))
1284 case OMP_FOR:
1285 case OMP_SECTIONS:
1286 case OMP_SINGLE:
1287 case OMP_ORDERED:
1288 case OMP_MASTER:
1289 warning (0, "work-sharing region may not be closely nested inside "
1290 "of work-sharing, critical, ordered or master region");
1291 return;
1292 case OMP_PARALLEL:
1293 return;
1294 default:
1295 break;
1297 break;
1298 case OMP_MASTER:
1299 for (; ctx != NULL; ctx = ctx->outer)
1300 switch (TREE_CODE (ctx->stmt))
1302 case OMP_FOR:
1303 case OMP_SECTIONS:
1304 case OMP_SINGLE:
1305 warning (0, "master region may not be closely nested inside "
1306 "of work-sharing region");
1307 return;
1308 case OMP_PARALLEL:
1309 return;
1310 default:
1311 break;
1313 break;
1314 case OMP_ORDERED:
1315 for (; ctx != NULL; ctx = ctx->outer)
1316 switch (TREE_CODE (ctx->stmt))
1318 case OMP_CRITICAL:
1319 warning (0, "ordered region may not be closely nested inside "
1320 "of critical region");
1321 return;
1322 case OMP_FOR:
1323 if (find_omp_clause (OMP_CLAUSES (ctx->stmt),
1324 OMP_CLAUSE_ORDERED) == NULL)
1325 warning (0, "ordered region must be closely nested inside "
1326 "a loop region with an ordered clause");
1327 return;
1328 case OMP_PARALLEL:
1329 return;
1330 default:
1331 break;
1333 break;
1334 case OMP_CRITICAL:
1335 for (; ctx != NULL; ctx = ctx->outer)
1336 if (TREE_CODE (ctx->stmt) == OMP_CRITICAL
1337 && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt))
1339 warning (0, "critical region may not be nested inside a critical "
1340 "region with the same name");
1341 return;
1343 break;
1344 default:
1345 break;
1350 /* Callback for walk_stmts used to scan for OpenMP directives at TP. */
1352 static tree
1353 scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
1355 struct walk_stmt_info *wi = data;
1356 omp_context *ctx = wi->info;
1357 tree t = *tp;
1359 if (EXPR_HAS_LOCATION (t))
1360 input_location = EXPR_LOCATION (t);
1362 /* Check the OpenMP nesting restrictions. */
1363 if (OMP_DIRECTIVE_P (t) && ctx != NULL)
1364 check_omp_nesting_restrictions (t, ctx);
1366 *walk_subtrees = 0;
1367 switch (TREE_CODE (t))
1369 case OMP_PARALLEL:
1370 parallel_nesting_level++;
1371 scan_omp_parallel (tp, ctx);
1372 parallel_nesting_level--;
1373 break;
1375 case OMP_FOR:
1376 scan_omp_for (tp, ctx);
1377 break;
1379 case OMP_SECTIONS:
1380 scan_omp_sections (tp, ctx);
1381 break;
1383 case OMP_SINGLE:
1384 scan_omp_single (tp, ctx);
1385 break;
1387 case OMP_SECTION:
1388 case OMP_MASTER:
1389 case OMP_ORDERED:
1390 case OMP_CRITICAL:
1391 ctx = new_omp_context (*tp, ctx);
1392 scan_omp (&OMP_BODY (*tp), ctx);
1393 break;
1395 case BIND_EXPR:
1397 tree var;
1398 *walk_subtrees = 1;
1400 for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var))
1401 insert_decl_map (&ctx->cb, var, var);
1403 break;
1405 case VAR_DECL:
1406 case PARM_DECL:
1407 case LABEL_DECL:
1408 case RESULT_DECL:
1409 if (ctx)
1410 *tp = remap_decl (t, &ctx->cb);
1411 break;
1413 default:
1414 if (ctx && TYPE_P (t))
1415 *tp = remap_type (t, &ctx->cb);
1416 else if (!DECL_P (t))
1417 *walk_subtrees = 1;
1418 break;
1421 return NULL_TREE;
1425 /* Scan all the statements starting at STMT_P. CTX contains context
1426 information about the OpenMP directives and clauses found during
1427 the scan. */
1429 static void
1430 scan_omp (tree *stmt_p, omp_context *ctx)
1432 location_t saved_location;
1433 struct walk_stmt_info wi;
1435 memset (&wi, 0, sizeof (wi));
1436 wi.callback = scan_omp_1;
1437 wi.info = ctx;
1438 wi.want_bind_expr = (ctx != NULL);
1439 wi.want_locations = true;
1441 saved_location = input_location;
1442 walk_stmts (&wi, stmt_p);
1443 input_location = saved_location;
1446 /* Re-gimplification and code generation routines. */
1448 /* Build a call to GOMP_barrier. */
1450 static tree
1451 build_omp_barrier (void)
1453 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
1456 /* If a context was created for STMT when it was scanned, return it. */
1458 static omp_context *
1459 maybe_lookup_ctx (tree stmt)
1461 splay_tree_node n;
1462 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
1463 return n ? (omp_context *) n->value : NULL;
1467 /* Find the mapping for DECL in CTX or the immediately enclosing
1468 context that has a mapping for DECL.
1470 If CTX is a nested parallel directive, we may have to use the decl
1471 mappings created in CTX's parent context. Suppose that we have the
1472 following parallel nesting (variable UIDs showed for clarity):
1474 iD.1562 = 0;
1475 #omp parallel shared(iD.1562) -> outer parallel
1476 iD.1562 = iD.1562 + 1;
1478 #omp parallel shared (iD.1562) -> inner parallel
1479 iD.1562 = iD.1562 - 1;
1481 Each parallel structure will create a distinct .omp_data_s structure
1482 for copying iD.1562 in/out of the directive:
1484 outer parallel .omp_data_s.1.i -> iD.1562
1485 inner parallel .omp_data_s.2.i -> iD.1562
1487 A shared variable mapping will produce a copy-out operation before
1488 the parallel directive and a copy-in operation after it. So, in
1489 this case we would have:
1491 iD.1562 = 0;
1492 .omp_data_o.1.i = iD.1562;
1493 #omp parallel shared(iD.1562) -> outer parallel
1494 .omp_data_i.1 = &.omp_data_o.1
1495 .omp_data_i.1->i = .omp_data_i.1->i + 1;
1497 .omp_data_o.2.i = iD.1562; -> **
1498 #omp parallel shared(iD.1562) -> inner parallel
1499 .omp_data_i.2 = &.omp_data_o.2
1500 .omp_data_i.2->i = .omp_data_i.2->i - 1;
1503 ** This is a problem. The symbol iD.1562 cannot be referenced
1504 inside the body of the outer parallel region. But since we are
1505 emitting this copy operation while expanding the inner parallel
1506 directive, we need to access the CTX structure of the outer
1507 parallel directive to get the correct mapping:
1509 .omp_data_o.2.i = .omp_data_i.1->i
1511 Since there may be other workshare or parallel directives enclosing
1512 the parallel directive, it may be necessary to walk up the context
1513 parent chain. This is not a problem in general because nested
1514 parallelism happens only rarely. */
1516 static tree
1517 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
1519 tree t;
1520 omp_context *up;
1522 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
1523 t = maybe_lookup_decl (decl, up);
1525 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
1527 return t ? t : decl;
1531 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
1532 in outer contexts. */
1534 static tree
1535 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
1537 tree t = NULL;
1538 omp_context *up;
1540 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
1541 t = maybe_lookup_decl (decl, up);
1543 return t ? t : decl;
1547 /* Construct the initialization value for reduction CLAUSE. */
1549 tree
1550 omp_reduction_init (tree clause, tree type)
1552 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
1554 case PLUS_EXPR:
1555 case MINUS_EXPR:
1556 case BIT_IOR_EXPR:
1557 case BIT_XOR_EXPR:
1558 case TRUTH_OR_EXPR:
1559 case TRUTH_ORIF_EXPR:
1560 case TRUTH_XOR_EXPR:
1561 case NE_EXPR:
1562 return fold_convert (type, integer_zero_node);
1564 case MULT_EXPR:
1565 case TRUTH_AND_EXPR:
1566 case TRUTH_ANDIF_EXPR:
1567 case EQ_EXPR:
1568 return fold_convert (type, integer_one_node);
1570 case BIT_AND_EXPR:
1571 return fold_convert (type, integer_minus_one_node);
1573 case MAX_EXPR:
1574 if (SCALAR_FLOAT_TYPE_P (type))
1576 REAL_VALUE_TYPE max, min;
1577 if (HONOR_INFINITIES (TYPE_MODE (type)))
1579 real_inf (&max);
1580 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
1582 else
1583 real_maxval (&min, 1, TYPE_MODE (type));
1584 return build_real (type, min);
1586 else
1588 gcc_assert (INTEGRAL_TYPE_P (type));
1589 return TYPE_MIN_VALUE (type);
1592 case MIN_EXPR:
1593 if (SCALAR_FLOAT_TYPE_P (type))
1595 REAL_VALUE_TYPE max;
1596 if (HONOR_INFINITIES (TYPE_MODE (type)))
1597 real_inf (&max);
1598 else
1599 real_maxval (&max, 0, TYPE_MODE (type));
1600 return build_real (type, max);
1602 else
1604 gcc_assert (INTEGRAL_TYPE_P (type));
1605 return TYPE_MAX_VALUE (type);
1608 default:
1609 gcc_unreachable ();
1613 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
1614 from the receiver (aka child) side and initializers for REFERENCE_TYPE
1615 private variables. Initialization statements go in ILIST, while calls
1616 to destructors go in DLIST. */
1618 static void
1619 lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
1620 omp_context *ctx)
1622 tree_stmt_iterator diter;
1623 tree c, dtor, copyin_seq, x, ptr;
1624 bool copyin_by_ref = false;
1625 bool lastprivate_firstprivate = false;
1626 int pass;
1628 *dlist = alloc_stmt_list ();
1629 diter = tsi_start (*dlist);
1630 copyin_seq = NULL;
1632 /* Do all the fixed sized types in the first pass, and the variable sized
1633 types in the second pass. This makes sure that the scalar arguments to
1634 the variable sized types are processed before we use them in the
1635 variable sized operations. */
1636 for (pass = 0; pass < 2; ++pass)
1638 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1640 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1641 tree var, new_var;
1642 bool by_ref;
1644 switch (c_kind)
1646 case OMP_CLAUSE_PRIVATE:
1647 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
1648 continue;
1649 break;
1650 case OMP_CLAUSE_SHARED:
1651 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
1653 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
1654 continue;
1656 case OMP_CLAUSE_FIRSTPRIVATE:
1657 case OMP_CLAUSE_COPYIN:
1658 case OMP_CLAUSE_REDUCTION:
1659 break;
1660 case OMP_CLAUSE_LASTPRIVATE:
1661 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1663 lastprivate_firstprivate = true;
1664 if (pass != 0)
1665 continue;
1667 break;
1668 default:
1669 continue;
1672 new_var = var = OMP_CLAUSE_DECL (c);
1673 if (c_kind != OMP_CLAUSE_COPYIN)
1674 new_var = lookup_decl (var, ctx);
1676 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
1678 if (pass != 0)
1679 continue;
1681 else if (is_variable_sized (var))
1683 /* For variable sized types, we need to allocate the
1684 actual storage here. Call alloca and store the
1685 result in the pointer decl that we created elsewhere. */
1686 if (pass == 0)
1687 continue;
1689 ptr = DECL_VALUE_EXPR (new_var);
1690 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
1691 ptr = TREE_OPERAND (ptr, 0);
1692 gcc_assert (DECL_P (ptr));
1694 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
1695 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
1696 x = fold_convert (TREE_TYPE (ptr), x);
1697 x = build_gimple_modify_stmt (ptr, x);
1698 gimplify_and_add (x, ilist);
1700 else if (is_reference (var))
1702 /* For references that are being privatized for Fortran,
1703 allocate new backing storage for the new pointer
1704 variable. This allows us to avoid changing all the
1705 code that expects a pointer to something that expects
1706 a direct variable. Note that this doesn't apply to
1707 C++, since reference types are disallowed in data
1708 sharing clauses there, except for NRV optimized
1709 return values. */
1710 if (pass == 0)
1711 continue;
1713 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
1714 if (TREE_CONSTANT (x))
1716 const char *name = NULL;
1717 if (DECL_NAME (var))
1718 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
1720 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
1721 name);
1722 gimple_add_tmp_var (x);
1723 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
1725 else
1727 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
1728 x = fold_convert (TREE_TYPE (new_var), x);
1731 x = build_gimple_modify_stmt (new_var, x);
1732 gimplify_and_add (x, ilist);
1734 new_var = build_fold_indirect_ref (new_var);
1736 else if (c_kind == OMP_CLAUSE_REDUCTION
1737 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1739 if (pass == 0)
1740 continue;
1742 else if (pass != 0)
1743 continue;
1745 switch (OMP_CLAUSE_CODE (c))
1747 case OMP_CLAUSE_SHARED:
1748 /* Shared global vars are just accessed directly. */
1749 if (is_global_var (new_var))
1750 break;
1751 /* Set up the DECL_VALUE_EXPR for shared variables now. This
1752 needs to be delayed until after fixup_child_record_type so
1753 that we get the correct type during the dereference. */
1754 by_ref = use_pointer_for_field (var, true);
1755 x = build_receiver_ref (var, by_ref, ctx);
1756 SET_DECL_VALUE_EXPR (new_var, x);
1757 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
1759 /* ??? If VAR is not passed by reference, and the variable
1760 hasn't been initialized yet, then we'll get a warning for
1761 the store into the omp_data_s structure. Ideally, we'd be
1762 able to notice this and not store anything at all, but
1763 we're generating code too early. Suppress the warning. */
1764 if (!by_ref)
1765 TREE_NO_WARNING (var) = 1;
1766 break;
1768 case OMP_CLAUSE_LASTPRIVATE:
1769 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1770 break;
1771 /* FALLTHRU */
1773 case OMP_CLAUSE_PRIVATE:
1774 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var);
1775 if (x)
1776 gimplify_and_add (x, ilist);
1777 /* FALLTHRU */
1779 do_dtor:
1780 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
1781 if (x)
1783 dtor = x;
1784 gimplify_stmt (&dtor);
1785 tsi_link_before (&diter, dtor, TSI_SAME_STMT);
1787 break;
1789 case OMP_CLAUSE_FIRSTPRIVATE:
1790 x = build_outer_var_ref (var, ctx);
1791 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
1792 gimplify_and_add (x, ilist);
1793 goto do_dtor;
1794 break;
1796 case OMP_CLAUSE_COPYIN:
1797 by_ref = use_pointer_for_field (var, false);
1798 x = build_receiver_ref (var, by_ref, ctx);
1799 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
1800 append_to_statement_list (x, &copyin_seq);
1801 copyin_by_ref |= by_ref;
1802 break;
1804 case OMP_CLAUSE_REDUCTION:
1805 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1807 gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist);
1808 OMP_CLAUSE_REDUCTION_INIT (c) = NULL;
1810 else
1812 x = omp_reduction_init (c, TREE_TYPE (new_var));
1813 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
1814 x = build_gimple_modify_stmt (new_var, x);
1815 gimplify_and_add (x, ilist);
1817 break;
1819 default:
1820 gcc_unreachable ();
1825 /* The copyin sequence is not to be executed by the main thread, since
1826 that would result in self-copies. Perhaps not visible to scalars,
1827 but it certainly is to C++ operator=. */
1828 if (copyin_seq)
1830 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
1831 x = build2 (NE_EXPR, boolean_type_node, x,
1832 build_int_cst (TREE_TYPE (x), 0));
1833 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
1834 gimplify_and_add (x, ilist);
1837 /* If any copyin variable is passed by reference, we must ensure the
1838 master thread doesn't modify it before it is copied over in all
1839 threads. Similarly for variables in both firstprivate and
1840 lastprivate clauses we need to ensure the lastprivate copying
1841 happens after firstprivate copying in all threads. */
1842 if (copyin_by_ref || lastprivate_firstprivate)
1843 gimplify_and_add (build_omp_barrier (), ilist);
1847 /* Generate code to implement the LASTPRIVATE clauses. This is used for
1848 both parallel and workshare constructs. PREDICATE may be NULL if it's
1849 always true. */
1851 static void
1852 lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
1853 omp_context *ctx)
1855 tree sub_list, x, c;
1857 /* Early exit if there are no lastprivate clauses. */
1858 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
1859 if (clauses == NULL)
1861 /* If this was a workshare clause, see if it had been combined
1862 with its parallel. In that case, look for the clauses on the
1863 parallel statement itself. */
1864 if (is_parallel_ctx (ctx))
1865 return;
1867 ctx = ctx->outer;
1868 if (ctx == NULL || !is_parallel_ctx (ctx))
1869 return;
1871 clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
1872 OMP_CLAUSE_LASTPRIVATE);
1873 if (clauses == NULL)
1874 return;
1877 sub_list = alloc_stmt_list ();
1879 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1881 tree var, new_var;
1883 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE)
1884 continue;
1886 var = OMP_CLAUSE_DECL (c);
1887 new_var = lookup_decl (var, ctx);
1889 x = build_outer_var_ref (var, ctx);
1890 if (is_reference (var))
1891 new_var = build_fold_indirect_ref (new_var);
1892 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
1893 append_to_statement_list (x, &sub_list);
1896 if (predicate)
1897 x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL);
1898 else
1899 x = sub_list;
1901 gimplify_and_add (x, stmt_list);
1905 /* Generate code to implement the REDUCTION clauses. */
1907 static void
1908 lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx)
1910 tree sub_list = NULL, x, c;
1911 int count = 0;
1913 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
1914 update in that case, otherwise use a lock. */
1915 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
1916 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1918 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1920 /* Never use OMP_ATOMIC for array reductions. */
1921 count = -1;
1922 break;
1924 count++;
1927 if (count == 0)
1928 return;
1930 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1932 tree var, ref, new_var;
1933 enum tree_code code;
1935 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1936 continue;
1938 var = OMP_CLAUSE_DECL (c);
1939 new_var = lookup_decl (var, ctx);
1940 if (is_reference (var))
1941 new_var = build_fold_indirect_ref (new_var);
1942 ref = build_outer_var_ref (var, ctx);
1943 code = OMP_CLAUSE_REDUCTION_CODE (c);
1945 /* reduction(-:var) sums up the partial results, so it acts
1946 identically to reduction(+:var). */
1947 if (code == MINUS_EXPR)
1948 code = PLUS_EXPR;
1950 if (count == 1)
1952 tree addr = build_fold_addr_expr (ref);
1954 addr = save_expr (addr);
1955 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
1956 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
1957 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
1958 gimplify_and_add (x, stmt_list);
1959 return;
1962 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1964 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
1966 if (is_reference (var))
1967 ref = build_fold_addr_expr (ref);
1968 SET_DECL_VALUE_EXPR (placeholder, ref);
1969 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
1970 gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list);
1971 OMP_CLAUSE_REDUCTION_MERGE (c) = NULL;
1972 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
1974 else
1976 x = build2 (code, TREE_TYPE (ref), ref, new_var);
1977 ref = build_outer_var_ref (var, ctx);
1978 x = build_gimple_modify_stmt (ref, x);
1979 append_to_statement_list (x, &sub_list);
1983 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
1984 gimplify_and_add (x, stmt_list);
1986 gimplify_and_add (sub_list, stmt_list);
1988 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
1989 gimplify_and_add (x, stmt_list);
1993 /* Generate code to implement the COPYPRIVATE clauses. */
1995 static void
1996 lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist,
1997 omp_context *ctx)
1999 tree c;
2001 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2003 tree var, ref, x;
2004 bool by_ref;
2006 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2007 continue;
2009 var = OMP_CLAUSE_DECL (c);
2010 by_ref = use_pointer_for_field (var, false);
2012 ref = build_sender_ref (var, ctx);
2013 x = lookup_decl_in_outer_ctx (var, ctx);
2014 x = by_ref ? build_fold_addr_expr (x) : x;
2015 x = build_gimple_modify_stmt (ref, x);
2016 gimplify_and_add (x, slist);
2018 ref = build_receiver_ref (var, by_ref, ctx);
2019 if (is_reference (var))
2021 ref = build_fold_indirect_ref (ref);
2022 var = build_fold_indirect_ref (var);
2024 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2025 gimplify_and_add (x, rlist);
2030 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2031 and REDUCTION from the sender (aka parent) side. */
2033 static void
2034 lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
2036 tree c;
2038 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2040 tree val, ref, x, var;
2041 bool by_ref, do_in = false, do_out = false;
2043 switch (OMP_CLAUSE_CODE (c))
2045 case OMP_CLAUSE_FIRSTPRIVATE:
2046 case OMP_CLAUSE_COPYIN:
2047 case OMP_CLAUSE_LASTPRIVATE:
2048 case OMP_CLAUSE_REDUCTION:
2049 break;
2050 default:
2051 continue;
2054 val = OMP_CLAUSE_DECL (c);
2055 var = lookup_decl_in_outer_ctx (val, ctx);
2057 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2058 && is_global_var (var))
2059 continue;
2060 if (is_variable_sized (val))
2061 continue;
2062 by_ref = use_pointer_for_field (val, false);
2064 switch (OMP_CLAUSE_CODE (c))
2066 case OMP_CLAUSE_FIRSTPRIVATE:
2067 case OMP_CLAUSE_COPYIN:
2068 do_in = true;
2069 break;
2071 case OMP_CLAUSE_LASTPRIVATE:
2072 if (by_ref || is_reference (val))
2074 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2075 continue;
2076 do_in = true;
2078 else
2079 do_out = true;
2080 break;
2082 case OMP_CLAUSE_REDUCTION:
2083 do_in = true;
2084 do_out = !(by_ref || is_reference (val));
2085 break;
2087 default:
2088 gcc_unreachable ();
2091 if (do_in)
2093 ref = build_sender_ref (val, ctx);
2094 x = by_ref ? build_fold_addr_expr (var) : var;
2095 x = build_gimple_modify_stmt (ref, x);
2096 gimplify_and_add (x, ilist);
2099 if (do_out)
2101 ref = build_sender_ref (val, ctx);
2102 x = build_gimple_modify_stmt (var, ref);
2103 gimplify_and_add (x, olist);
2108 /* Generate code to implement SHARED from the sender (aka parent) side.
2109 This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
2110 got automatically shared. */
2112 static void
2113 lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
2115 tree var, ovar, nvar, f, x;
2117 if (ctx->record_type == NULL)
2118 return;
2120 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
2122 ovar = DECL_ABSTRACT_ORIGIN (f);
2123 nvar = maybe_lookup_decl (ovar, ctx);
2124 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2125 continue;
2127 /* If CTX is a nested parallel directive. Find the immediately
2128 enclosing parallel or workshare construct that contains a
2129 mapping for OVAR. */
2130 var = lookup_decl_in_outer_ctx (ovar, ctx);
2132 if (use_pointer_for_field (ovar, true))
2134 x = build_sender_ref (ovar, ctx);
2135 var = build_fold_addr_expr (var);
2136 x = build_gimple_modify_stmt (x, var);
2137 gimplify_and_add (x, ilist);
2139 else
2141 x = build_sender_ref (ovar, ctx);
2142 x = build_gimple_modify_stmt (x, var);
2143 gimplify_and_add (x, ilist);
2145 x = build_sender_ref (ovar, ctx);
2146 x = build_gimple_modify_stmt (var, x);
2147 gimplify_and_add (x, olist);
2152 /* Build the function calls to GOMP_parallel_start etc to actually
2153 generate the parallel operation. REGION is the parallel region
2154 being expanded. BB is the block where to insert the code. WS_ARGS
2155 will be set if this is a call to a combined parallel+workshare
2156 construct, it contains the list of additional arguments needed by
2157 the workshare construct. */
2159 static void
2160 expand_parallel_call (struct omp_region *region, basic_block bb,
2161 tree entry_stmt, tree ws_args)
2163 tree t, t1, t2, val, cond, c, clauses;
2164 block_stmt_iterator si;
2165 int start_ix;
2167 clauses = OMP_PARALLEL_CLAUSES (entry_stmt);
2169 /* Determine what flavor of GOMP_parallel_start we will be
2170 emitting. */
2171 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2172 if (is_combined_parallel (region))
2174 switch (region->inner->type)
2176 case OMP_FOR:
2177 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2178 + region->inner->sched_kind;
2179 break;
2180 case OMP_SECTIONS:
2181 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2182 break;
2183 default:
2184 gcc_unreachable ();
2188 /* By default, the value of NUM_THREADS is zero (selected at run time)
2189 and there is no conditional. */
2190 cond = NULL_TREE;
2191 val = build_int_cst (unsigned_type_node, 0);
2193 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2194 if (c)
2195 cond = OMP_CLAUSE_IF_EXPR (c);
2197 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2198 if (c)
2199 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2201 /* Ensure 'val' is of the correct type. */
2202 val = fold_convert (unsigned_type_node, val);
2204 /* If we found the clause 'if (cond)', build either
2205 (cond != 0) or (cond ? val : 1u). */
2206 if (cond)
2208 block_stmt_iterator si;
2210 cond = gimple_boolify (cond);
2212 if (integer_zerop (val))
2213 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2214 build_int_cst (TREE_TYPE (cond), 0));
2215 else
2217 basic_block cond_bb, then_bb, else_bb;
2218 edge e, e_then, e_else;
2219 tree t, tmp_then, tmp_else, tmp_join, tmp_var;
2221 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2222 if (gimple_in_ssa_p (cfun))
2224 tmp_then = make_ssa_name (tmp_var, NULL_TREE);
2225 tmp_else = make_ssa_name (tmp_var, NULL_TREE);
2226 tmp_join = make_ssa_name (tmp_var, NULL_TREE);
2228 else
2230 tmp_then = tmp_var;
2231 tmp_else = tmp_var;
2232 tmp_join = tmp_var;
2235 e = split_block (bb, NULL);
2236 cond_bb = e->src;
2237 bb = e->dest;
2238 remove_edge (e);
2240 then_bb = create_empty_bb (cond_bb);
2241 else_bb = create_empty_bb (then_bb);
2242 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2243 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2245 t = build3 (COND_EXPR, void_type_node,
2246 cond, NULL_TREE, NULL_TREE);
2248 si = bsi_start (cond_bb);
2249 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2251 si = bsi_start (then_bb);
2252 t = build_gimple_modify_stmt (tmp_then, val);
2253 if (gimple_in_ssa_p (cfun))
2254 SSA_NAME_DEF_STMT (tmp_then) = t;
2255 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2257 si = bsi_start (else_bb);
2258 t = build_gimple_modify_stmt (tmp_else,
2259 build_int_cst (unsigned_type_node, 1));
2260 if (gimple_in_ssa_p (cfun))
2261 SSA_NAME_DEF_STMT (tmp_else) = t;
2262 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2264 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2265 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2266 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2267 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2269 if (gimple_in_ssa_p (cfun))
2271 tree phi = create_phi_node (tmp_join, bb);
2272 SSA_NAME_DEF_STMT (tmp_join) = phi;
2273 add_phi_arg (phi, tmp_then, e_then);
2274 add_phi_arg (phi, tmp_else, e_else);
2277 val = tmp_join;
2280 si = bsi_start (bb);
2281 val = force_gimple_operand_bsi (&si, val, true, NULL_TREE,
2282 false, BSI_CONTINUE_LINKING);
2285 si = bsi_last (bb);
2286 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2287 if (t == NULL)
2288 t1 = null_pointer_node;
2289 else
2290 t1 = build_fold_addr_expr (t);
2291 t2 = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt));
2293 if (ws_args)
2295 tree args = tree_cons (NULL, t2,
2296 tree_cons (NULL, t1,
2297 tree_cons (NULL, val, ws_args)));
2298 t = build_function_call_expr (built_in_decls[start_ix], args);
2300 else
2301 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
2303 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2304 false, BSI_CONTINUE_LINKING);
2306 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2307 if (t == NULL)
2308 t = null_pointer_node;
2309 else
2310 t = build_fold_addr_expr (t);
2311 t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t);
2312 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2313 false, BSI_CONTINUE_LINKING);
2315 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
2316 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2317 false, BSI_CONTINUE_LINKING);
2321 /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
2322 handler. This prevents programs from violating the structured
2323 block semantics with throws. */
2325 static void
2326 maybe_catch_exception (tree *stmt_p)
2328 tree f, t;
2330 if (!flag_exceptions)
2331 return;
2333 if (lang_protect_cleanup_actions)
2334 t = lang_protect_cleanup_actions ();
2335 else
2336 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
2337 f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL);
2338 EH_FILTER_MUST_NOT_THROW (f) = 1;
2339 gimplify_and_add (t, &EH_FILTER_FAILURE (f));
2341 t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL);
2342 append_to_statement_list (f, &TREE_OPERAND (t, 1));
2344 *stmt_p = NULL;
2345 append_to_statement_list (t, stmt_p);
2348 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
2350 static tree
2351 list2chain (tree list)
2353 tree t;
2355 for (t = list; t; t = TREE_CHAIN (t))
2357 tree var = TREE_VALUE (t);
2358 if (TREE_CHAIN (t))
2359 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
2360 else
2361 TREE_CHAIN (var) = NULL_TREE;
2364 return list ? TREE_VALUE (list) : NULL_TREE;
2368 /* Remove barriers in REGION->EXIT's block. Note that this is only
2369 valid for OMP_PARALLEL regions. Since the end of a parallel region
2370 is an implicit barrier, any workshare inside the OMP_PARALLEL that
2371 left a barrier at the end of the OMP_PARALLEL region can now be
2372 removed. */
2374 static void
2375 remove_exit_barrier (struct omp_region *region)
2377 block_stmt_iterator si;
2378 basic_block exit_bb;
2379 edge_iterator ei;
2380 edge e;
2381 tree t;
2383 exit_bb = region->exit;
2385 /* If the parallel region doesn't return, we don't have REGION->EXIT
2386 block at all. */
2387 if (! exit_bb)
2388 return;
2390 /* The last insn in the block will be the parallel's OMP_RETURN. The
2391 workshare's OMP_RETURN will be in a preceding block. The kinds of
2392 statements that can appear in between are extremely limited -- no
2393 memory operations at all. Here, we allow nothing at all, so the
2394 only thing we allow to precede this OMP_RETURN is a label. */
2395 si = bsi_last (exit_bb);
2396 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
2397 bsi_prev (&si);
2398 if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR)
2399 return;
2401 FOR_EACH_EDGE (e, ei, exit_bb->preds)
2403 si = bsi_last (e->src);
2404 if (bsi_end_p (si))
2405 continue;
2406 t = bsi_stmt (si);
2407 if (TREE_CODE (t) == OMP_RETURN)
2408 OMP_RETURN_NOWAIT (t) = 1;
2412 static void
2413 remove_exit_barriers (struct omp_region *region)
2415 if (region->type == OMP_PARALLEL)
2416 remove_exit_barrier (region);
2418 if (region->inner)
2420 region = region->inner;
2421 remove_exit_barriers (region);
2422 while (region->next)
2424 region = region->next;
2425 remove_exit_barriers (region);
2430 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
2431 calls. These can't be declared as const functions, but
2432 within one parallel body they are constant, so they can be
2433 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
2434 which are declared const. */
2436 static void
2437 optimize_omp_library_calls (void)
2439 basic_block bb;
2440 block_stmt_iterator bsi;
2441 tree thr_num_id
2442 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
2443 tree num_thr_id
2444 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
2446 FOR_EACH_BB (bb)
2447 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
2449 tree stmt = bsi_stmt (bsi);
2450 tree call = get_call_expr_in (stmt);
2451 tree decl;
2453 if (call
2454 && (decl = get_callee_fndecl (call))
2455 && DECL_EXTERNAL (decl)
2456 && TREE_PUBLIC (decl)
2457 && DECL_INITIAL (decl) == NULL)
2459 tree built_in;
2461 if (DECL_NAME (decl) == thr_num_id)
2462 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
2463 else if (DECL_NAME (decl) == num_thr_id)
2464 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
2465 else
2466 continue;
2468 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
2469 || call_expr_nargs (call) != 0)
2470 continue;
2472 if (flag_exceptions && !TREE_NOTHROW (decl))
2473 continue;
2475 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
2476 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
2477 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
2478 continue;
2480 CALL_EXPR_FN (call) = build_fold_addr_expr (built_in);
2485 /* Expand the OpenMP parallel directive starting at REGION. */
2487 static void
2488 expand_omp_parallel (struct omp_region *region)
2490 basic_block entry_bb, exit_bb, new_bb;
2491 struct function *child_cfun;
2492 tree child_fn, block, t, ws_args;
2493 block_stmt_iterator si;
2494 tree entry_stmt;
2495 edge e;
2497 entry_stmt = last_stmt (region->entry);
2498 child_fn = OMP_PARALLEL_FN (entry_stmt);
2499 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
2501 entry_bb = region->entry;
2502 exit_bb = region->exit;
2504 if (is_combined_parallel (region))
2505 ws_args = region->ws_args;
2506 else
2507 ws_args = NULL_TREE;
2509 if (child_cfun->cfg)
2511 /* Due to inlining, it may happen that we have already outlined
2512 the region, in which case all we need to do is make the
2513 sub-graph unreachable and emit the parallel call. */
2514 edge entry_succ_e, exit_succ_e;
2515 block_stmt_iterator si;
2517 entry_succ_e = single_succ_edge (entry_bb);
2519 si = bsi_last (entry_bb);
2520 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL);
2521 bsi_remove (&si, true);
2523 new_bb = entry_bb;
2524 if (exit_bb)
2526 exit_succ_e = single_succ_edge (exit_bb);
2527 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
2529 remove_edge_and_dominated_blocks (entry_succ_e);
2531 else
2533 /* If the parallel region needs data sent from the parent
2534 function, then the very first statement (except possible
2535 tree profile counter updates) of the parallel body
2536 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
2537 &.OMP_DATA_O is passed as an argument to the child function,
2538 we need to replace it with the argument as seen by the child
2539 function.
2541 In most cases, this will end up being the identity assignment
2542 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
2543 a function call that has been inlined, the original PARM_DECL
2544 .OMP_DATA_I may have been converted into a different local
2545 variable. In which case, we need to keep the assignment. */
2546 if (OMP_PARALLEL_DATA_ARG (entry_stmt))
2548 basic_block entry_succ_bb = single_succ (entry_bb);
2549 block_stmt_iterator si;
2550 tree parcopy_stmt = NULL_TREE, arg, narg;
2552 for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
2554 tree stmt, arg;
2556 gcc_assert (!bsi_end_p (si));
2557 stmt = bsi_stmt (si);
2558 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2559 continue;
2561 arg = GIMPLE_STMT_OPERAND (stmt, 1);
2562 STRIP_NOPS (arg);
2563 if (TREE_CODE (arg) == ADDR_EXPR
2564 && TREE_OPERAND (arg, 0)
2565 == OMP_PARALLEL_DATA_ARG (entry_stmt))
2567 parcopy_stmt = stmt;
2568 break;
2572 gcc_assert (parcopy_stmt != NULL_TREE);
2573 arg = DECL_ARGUMENTS (child_fn);
2575 if (!gimple_in_ssa_p (cfun))
2577 if (GIMPLE_STMT_OPERAND (parcopy_stmt, 0) == arg)
2578 bsi_remove (&si, true);
2579 else
2580 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = arg;
2582 else
2584 /* If we are in ssa form, we must load the value from the default
2585 definition of the argument. That should not be defined now,
2586 since the argument is not used uninitialized. */
2587 gcc_assert (gimple_default_def (cfun, arg) == NULL);
2588 narg = make_ssa_name (arg, build_empty_stmt ());
2589 set_default_def (arg, narg);
2590 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = narg;
2591 update_stmt (parcopy_stmt);
2595 /* Declare local variables needed in CHILD_CFUN. */
2596 block = DECL_INITIAL (child_fn);
2597 BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list);
2598 DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb));
2600 /* Reset DECL_CONTEXT on function arguments. */
2601 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
2602 DECL_CONTEXT (t) = child_fn;
2604 /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the
2605 child function. */
2606 si = bsi_last (entry_bb);
2607 t = bsi_stmt (si);
2608 gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL);
2609 bsi_remove (&si, true);
2610 e = split_block (entry_bb, t);
2611 entry_bb = e->dest;
2612 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
2614 /* Convert OMP_RETURN into a RETURN_EXPR. */
2615 if (exit_bb)
2617 si = bsi_last (exit_bb);
2618 gcc_assert (!bsi_end_p (si)
2619 && TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
2620 t = build1 (RETURN_EXPR, void_type_node, NULL);
2621 bsi_insert_after (&si, t, BSI_SAME_STMT);
2622 bsi_remove (&si, true);
2625 /* Move the parallel region into CHILD_CFUN. */
2627 if (gimple_in_ssa_p (cfun))
2629 push_cfun (child_cfun);
2630 init_tree_ssa ();
2631 init_ssa_operands ();
2632 cfun->gimple_df->in_ssa_p = true;
2633 pop_cfun ();
2635 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
2636 if (exit_bb)
2637 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
2639 /* Inform the callgraph about the new function. */
2640 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
2641 = cfun->curr_properties;
2642 cgraph_add_new_function (child_fn, true);
2644 /* Fix the callgraph edges for child_cfun. Those for cfun will be
2645 fixed in a following pass. */
2646 push_cfun (child_cfun);
2647 if (optimize)
2648 optimize_omp_library_calls ();
2649 rebuild_cgraph_edges ();
2651 /* Some EH regions might become dead, see PR34608. If
2652 pass_cleanup_cfg isn't the first pass to happen with the
2653 new child, these dead EH edges might cause problems.
2654 Clean them up now. */
2655 if (flag_exceptions)
2657 basic_block bb;
2658 tree save_current = current_function_decl;
2659 bool changed = false;
2661 current_function_decl = child_fn;
2662 FOR_EACH_BB (bb)
2663 changed |= tree_purge_dead_eh_edges (bb);
2664 if (changed)
2665 cleanup_tree_cfg ();
2666 current_function_decl = save_current;
2668 pop_cfun ();
2671 /* Emit a library call to launch the children threads. */
2672 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
2673 update_ssa (TODO_update_ssa_only_virtuals);
2677 /* A subroutine of expand_omp_for. Generate code for a parallel
2678 loop with any schedule. Given parameters:
2680 for (V = N1; V cond N2; V += STEP) BODY;
2682 where COND is "<" or ">", we generate pseudocode
2684 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2685 if (more) goto L0; else goto L3;
2687 V = istart0;
2688 iend = iend0;
2690 BODY;
2691 V += STEP;
2692 if (V cond iend) goto L1; else goto L2;
2694 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2697 If this is a combined omp parallel loop, instead of the call to
2698 GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
2700 static void
2701 expand_omp_for_generic (struct omp_region *region,
2702 struct omp_for_data *fd,
2703 enum built_in_function start_fn,
2704 enum built_in_function next_fn)
2706 tree type, istart0, iend0, iend, phi;
2707 tree t, vmain, vback;
2708 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
2709 basic_block l2_bb = NULL, l3_bb = NULL;
2710 block_stmt_iterator si;
2711 bool in_combined_parallel = is_combined_parallel (region);
2712 bool broken_loop = region->cont == NULL;
2713 edge e, ne;
2715 gcc_assert (!broken_loop || !in_combined_parallel);
2717 type = TREE_TYPE (fd->v);
2719 istart0 = create_tmp_var (long_integer_type_node, ".istart0");
2720 iend0 = create_tmp_var (long_integer_type_node, ".iend0");
2721 TREE_ADDRESSABLE (istart0) = 1;
2722 TREE_ADDRESSABLE (iend0) = 1;
2723 if (gimple_in_ssa_p (cfun))
2725 add_referenced_var (istart0);
2726 add_referenced_var (iend0);
2729 entry_bb = region->entry;
2730 cont_bb = region->cont;
2731 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2732 gcc_assert (broken_loop
2733 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2734 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2735 l1_bb = single_succ (l0_bb);
2736 if (!broken_loop)
2738 l2_bb = create_empty_bb (cont_bb);
2739 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
2740 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2742 else
2743 l2_bb = NULL;
2744 l3_bb = BRANCH_EDGE (entry_bb)->dest;
2745 exit_bb = region->exit;
2747 si = bsi_last (entry_bb);
2748 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2749 if (in_combined_parallel)
2751 /* In a combined parallel loop, emit a call to
2752 GOMP_loop_foo_next. */
2753 t = build_call_expr (built_in_decls[next_fn], 2,
2754 build_fold_addr_expr (istart0),
2755 build_fold_addr_expr (iend0));
2757 else
2759 tree t0, t1, t2, t3, t4;
2760 /* If this is not a combined parallel loop, emit a call to
2761 GOMP_loop_foo_start in ENTRY_BB. */
2762 t4 = build_fold_addr_expr (iend0);
2763 t3 = build_fold_addr_expr (istart0);
2764 t2 = fold_convert (long_integer_type_node, fd->step);
2765 t1 = fold_convert (long_integer_type_node, fd->n2);
2766 t0 = fold_convert (long_integer_type_node, fd->n1);
2767 if (fd->chunk_size)
2769 t = fold_convert (long_integer_type_node, fd->chunk_size);
2770 t = build_call_expr (built_in_decls[start_fn], 6,
2771 t0, t1, t2, t, t3, t4);
2773 else
2774 t = build_call_expr (built_in_decls[start_fn], 5,
2775 t0, t1, t2, t3, t4);
2777 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2778 true, BSI_SAME_STMT);
2779 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2780 bsi_insert_after (&si, t, BSI_SAME_STMT);
2782 /* V may be used outside of the loop (e.g., to handle lastprivate clause).
2783 If this is the case, its value is undefined if the loop is not entered
2784 at all. To handle this case, set its initial value to N1. */
2785 if (gimple_in_ssa_p (cfun))
2787 e = find_edge (entry_bb, l3_bb);
2788 for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
2789 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == fd->v)
2790 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), fd->n1);
2792 else
2794 t = build_gimple_modify_stmt (fd->v, fd->n1);
2795 bsi_insert_before (&si, t, BSI_SAME_STMT);
2798 /* Remove the OMP_FOR statement. */
2799 bsi_remove (&si, true);
2801 /* Iteration setup for sequential loop goes in L0_BB. */
2802 si = bsi_start (l0_bb);
2803 t = fold_convert (type, istart0);
2804 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2805 false, BSI_CONTINUE_LINKING);
2806 t = build_gimple_modify_stmt (fd->v, t);
2807 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2808 if (gimple_in_ssa_p (cfun))
2809 SSA_NAME_DEF_STMT (fd->v) = t;
2811 t = fold_convert (type, iend0);
2812 iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2813 false, BSI_CONTINUE_LINKING);
2815 if (!broken_loop)
2817 /* Code to control the increment and predicate for the sequential
2818 loop goes in the CONT_BB. */
2819 si = bsi_last (cont_bb);
2820 t = bsi_stmt (si);
2821 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
2822 vmain = TREE_OPERAND (t, 1);
2823 vback = TREE_OPERAND (t, 0);
2825 t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
2826 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2827 true, BSI_SAME_STMT);
2828 t = build_gimple_modify_stmt (vback, t);
2829 bsi_insert_before (&si, t, BSI_SAME_STMT);
2830 if (gimple_in_ssa_p (cfun))
2831 SSA_NAME_DEF_STMT (vback) = t;
2833 t = build2 (fd->cond_code, boolean_type_node, vback, iend);
2834 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2835 bsi_insert_before (&si, t, BSI_SAME_STMT);
2837 /* Remove OMP_CONTINUE. */
2838 bsi_remove (&si, true);
2840 /* Emit code to get the next parallel iteration in L2_BB. */
2841 si = bsi_start (l2_bb);
2843 t = build_call_expr (built_in_decls[next_fn], 2,
2844 build_fold_addr_expr (istart0),
2845 build_fold_addr_expr (iend0));
2846 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2847 false, BSI_CONTINUE_LINKING);
2848 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2849 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2852 /* Add the loop cleanup function. */
2853 si = bsi_last (exit_bb);
2854 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
2855 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
2856 else
2857 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
2858 t = build_call_expr (t, 0);
2859 bsi_insert_after (&si, t, BSI_SAME_STMT);
2860 bsi_remove (&si, true);
2862 /* Connect the new blocks. */
2863 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
2864 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
2866 if (!broken_loop)
2868 e = find_edge (cont_bb, l3_bb);
2869 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
2871 for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
2872 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
2873 PHI_ARG_DEF_FROM_EDGE (phi, e));
2874 remove_edge (e);
2876 find_edge (cont_bb, l1_bb)->flags = EDGE_TRUE_VALUE;
2877 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
2878 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
2880 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
2881 recompute_dominator (CDI_DOMINATORS, l2_bb));
2882 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
2883 recompute_dominator (CDI_DOMINATORS, l3_bb));
2884 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
2885 recompute_dominator (CDI_DOMINATORS, l0_bb));
2886 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
2887 recompute_dominator (CDI_DOMINATORS, l1_bb));
2892 /* A subroutine of expand_omp_for. Generate code for a parallel
2893 loop with static schedule and no specified chunk size. Given
2894 parameters:
2896 for (V = N1; V cond N2; V += STEP) BODY;
2898 where COND is "<" or ">", we generate pseudocode
2900 if (cond is <)
2901 adj = STEP - 1;
2902 else
2903 adj = STEP + 1;
2904 n = (adj + N2 - N1) / STEP;
2905 q = n / nthreads;
2906 q += (q * nthreads != n);
2907 s0 = q * threadid;
2908 e0 = min(s0 + q, n);
2909 V = s0 * STEP + N1;
2910 if (s0 >= e0) goto L2; else goto L0;
2912 e = e0 * STEP + N1;
2914 BODY;
2915 V += STEP;
2916 if (V cond e) goto L1;
2920 static void
2921 expand_omp_for_static_nochunk (struct omp_region *region,
2922 struct omp_for_data *fd)
2924 tree n, q, s0, e0, e, t, nthreads, threadid;
2925 tree type, vmain, vback;
2926 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
2927 basic_block fin_bb;
2928 block_stmt_iterator si;
2930 type = TREE_TYPE (fd->v);
2932 entry_bb = region->entry;
2933 cont_bb = region->cont;
2934 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2935 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2936 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2937 body_bb = single_succ (seq_start_bb);
2938 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
2939 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2940 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
2941 exit_bb = region->exit;
2943 /* Iteration space partitioning goes in ENTRY_BB. */
2944 si = bsi_last (entry_bb);
2945 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2947 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
2948 t = fold_convert (type, t);
2949 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2950 true, BSI_SAME_STMT);
2952 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2953 t = fold_convert (type, t);
2954 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2955 true, BSI_SAME_STMT);
2957 fd->n1 = force_gimple_operand_bsi (&si,
2958 fold_convert (type, fd->n1),
2959 true, NULL_TREE,
2960 true, BSI_SAME_STMT);
2962 fd->n2 = force_gimple_operand_bsi (&si,
2963 fold_convert (type, fd->n2),
2964 true, NULL_TREE,
2965 true, BSI_SAME_STMT);
2967 fd->step = force_gimple_operand_bsi (&si,
2968 fold_convert (type, fd->step),
2969 true, NULL_TREE,
2970 true, BSI_SAME_STMT);
2972 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
2973 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
2974 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
2975 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
2976 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
2977 t = fold_convert (type, t);
2978 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2980 t = fold_build2 (TRUNC_DIV_EXPR, type, n, nthreads);
2981 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2983 t = fold_build2 (MULT_EXPR, type, q, nthreads);
2984 t = fold_build2 (NE_EXPR, type, t, n);
2985 t = fold_build2 (PLUS_EXPR, type, q, t);
2986 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2988 t = build2 (MULT_EXPR, type, q, threadid);
2989 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2991 t = fold_build2 (PLUS_EXPR, type, s0, q);
2992 t = fold_build2 (MIN_EXPR, type, t, n);
2993 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2995 t = fold_convert (type, s0);
2996 t = fold_build2 (MULT_EXPR, type, t, fd->step);
2997 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
2998 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2999 true, BSI_SAME_STMT);
3000 t = build_gimple_modify_stmt (fd->v, t);
3001 bsi_insert_before (&si, t, BSI_SAME_STMT);
3002 if (gimple_in_ssa_p (cfun))
3003 SSA_NAME_DEF_STMT (fd->v) = t;
3005 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
3006 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3007 bsi_insert_before (&si, t, BSI_SAME_STMT);
3009 /* Remove the OMP_FOR statement. */
3010 bsi_remove (&si, true);
3012 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3013 si = bsi_start (seq_start_bb);
3015 t = fold_convert (type, e0);
3016 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3017 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3018 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3019 false, BSI_CONTINUE_LINKING);
3021 /* The code controlling the sequential loop replaces the OMP_CONTINUE. */
3022 si = bsi_last (cont_bb);
3023 t = bsi_stmt (si);
3024 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
3025 vmain = TREE_OPERAND (t, 1);
3026 vback = TREE_OPERAND (t, 0);
3028 t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
3029 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3030 true, BSI_SAME_STMT);
3031 t = build_gimple_modify_stmt (vback, t);
3032 bsi_insert_before (&si, t, BSI_SAME_STMT);
3033 if (gimple_in_ssa_p (cfun))
3034 SSA_NAME_DEF_STMT (vback) = t;
3036 t = build2 (fd->cond_code, boolean_type_node, vback, e);
3037 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3038 bsi_insert_before (&si, t, BSI_SAME_STMT);
3040 /* Remove the OMP_CONTINUE statement. */
3041 bsi_remove (&si, true);
3043 /* Replace the OMP_RETURN with a barrier, or nothing. */
3044 si = bsi_last (exit_bb);
3045 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
3046 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3047 false, BSI_SAME_STMT);
3048 bsi_remove (&si, true);
3050 /* Connect all the blocks. */
3051 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
3052 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
3054 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
3055 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
3057 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
3058 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3059 recompute_dominator (CDI_DOMINATORS, body_bb));
3060 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3061 recompute_dominator (CDI_DOMINATORS, fin_bb));
3065 /* A subroutine of expand_omp_for. Generate code for a parallel
3066 loop with static schedule and a specified chunk size. Given
3067 parameters:
3069 for (V = N1; V cond N2; V += STEP) BODY;
3071 where COND is "<" or ">", we generate pseudocode
3073 if (cond is <)
3074 adj = STEP - 1;
3075 else
3076 adj = STEP + 1;
3077 n = (adj + N2 - N1) / STEP;
3078 trip = 0;
3079 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
3080 here so that V is defined
3081 if the loop is not entered
3083 s0 = (trip * nthreads + threadid) * CHUNK;
3084 e0 = min(s0 + CHUNK, n);
3085 if (s0 < n) goto L1; else goto L4;
3087 V = s0 * STEP + N1;
3088 e = e0 * STEP + N1;
3090 BODY;
3091 V += STEP;
3092 if (V cond e) goto L2; else goto L3;
3094 trip += 1;
3095 goto L0;
3099 static void
3100 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
3102 tree n, s0, e0, e, t, phi, nphi, args;
3103 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
3104 tree type, cont, v_main, v_back, v_extra;
3105 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
3106 basic_block trip_update_bb, cont_bb, fin_bb;
3107 block_stmt_iterator si;
3108 edge se, re, ene;
3110 type = TREE_TYPE (fd->v);
3112 entry_bb = region->entry;
3113 se = split_block (entry_bb, last_stmt (entry_bb));
3114 entry_bb = se->src;
3115 iter_part_bb = se->dest;
3116 cont_bb = region->cont;
3117 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
3118 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
3119 == FALLTHRU_EDGE (cont_bb)->dest);
3120 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
3121 body_bb = single_succ (seq_start_bb);
3122 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
3123 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3124 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
3125 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
3126 exit_bb = region->exit;
3128 /* Trip and adjustment setup goes in ENTRY_BB. */
3129 si = bsi_last (entry_bb);
3130 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
3132 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
3133 t = fold_convert (type, t);
3134 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3135 true, BSI_SAME_STMT);
3137 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
3138 t = fold_convert (type, t);
3139 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3140 true, BSI_SAME_STMT);
3142 fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1),
3143 true, NULL_TREE,
3144 true, BSI_SAME_STMT);
3145 fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2),
3146 true, NULL_TREE,
3147 true, BSI_SAME_STMT);
3148 fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step),
3149 true, NULL_TREE,
3150 true, BSI_SAME_STMT);
3151 fd->chunk_size
3152 = force_gimple_operand_bsi (&si, fold_convert (type,
3153 fd->chunk_size),
3154 true, NULL_TREE,
3155 true, BSI_SAME_STMT);
3157 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
3158 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
3159 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
3160 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
3161 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
3162 t = fold_convert (type, t);
3163 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3164 true, BSI_SAME_STMT);
3166 trip_var = create_tmp_var (type, ".trip");
3167 if (gimple_in_ssa_p (cfun))
3169 add_referenced_var (trip_var);
3170 trip_init = make_ssa_name (trip_var, NULL_TREE);
3171 trip_main = make_ssa_name (trip_var, NULL_TREE);
3172 trip_back = make_ssa_name (trip_var, NULL_TREE);
3174 else
3176 trip_init = trip_var;
3177 trip_main = trip_var;
3178 trip_back = trip_var;
3181 t = build_gimple_modify_stmt (trip_init, build_int_cst (type, 0));
3182 bsi_insert_before (&si, t, BSI_SAME_STMT);
3183 if (gimple_in_ssa_p (cfun))
3184 SSA_NAME_DEF_STMT (trip_init) = t;
3186 t = fold_build2 (MULT_EXPR, type, threadid, fd->chunk_size);
3187 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3188 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3189 v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3190 true, BSI_SAME_STMT);
3192 /* Remove the OMP_FOR. */
3193 bsi_remove (&si, true);
3195 /* Iteration space partitioning goes in ITER_PART_BB. */
3196 si = bsi_last (iter_part_bb);
3198 t = fold_build2 (MULT_EXPR, type, trip_main, nthreads);
3199 t = fold_build2 (PLUS_EXPR, type, t, threadid);
3200 t = fold_build2 (MULT_EXPR, type, t, fd->chunk_size);
3201 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3202 false, BSI_CONTINUE_LINKING);
3204 t = fold_build2 (PLUS_EXPR, type, s0, fd->chunk_size);
3205 t = fold_build2 (MIN_EXPR, type, t, n);
3206 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3207 false, BSI_CONTINUE_LINKING);
3209 t = build2 (LT_EXPR, boolean_type_node, s0, n);
3210 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3211 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3213 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3214 si = bsi_start (seq_start_bb);
3216 t = fold_convert (type, s0);
3217 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3218 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3219 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3220 false, BSI_CONTINUE_LINKING);
3221 t = build_gimple_modify_stmt (fd->v, t);
3222 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3223 if (gimple_in_ssa_p (cfun))
3224 SSA_NAME_DEF_STMT (fd->v) = t;
3226 t = fold_convert (type, e0);
3227 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3228 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3229 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3230 false, BSI_CONTINUE_LINKING);
3232 /* The code controlling the sequential loop goes in CONT_BB,
3233 replacing the OMP_CONTINUE. */
3234 si = bsi_last (cont_bb);
3235 cont = bsi_stmt (si);
3236 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
3237 v_main = TREE_OPERAND (cont, 1);
3238 v_back = TREE_OPERAND (cont, 0);
3240 t = build2 (PLUS_EXPR, type, v_main, fd->step);
3241 t = build_gimple_modify_stmt (v_back, t);
3242 bsi_insert_before (&si, t, BSI_SAME_STMT);
3243 if (gimple_in_ssa_p (cfun))
3244 SSA_NAME_DEF_STMT (v_back) = t;
3246 t = build2 (fd->cond_code, boolean_type_node, v_back, e);
3247 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3248 bsi_insert_before (&si, t, BSI_SAME_STMT);
3250 /* Remove OMP_CONTINUE. */
3251 bsi_remove (&si, true);
3253 /* Trip update code goes into TRIP_UPDATE_BB. */
3254 si = bsi_start (trip_update_bb);
3256 t = build_int_cst (type, 1);
3257 t = build2 (PLUS_EXPR, type, trip_main, t);
3258 t = build_gimple_modify_stmt (trip_back, t);
3259 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3260 if (gimple_in_ssa_p (cfun))
3261 SSA_NAME_DEF_STMT (trip_back) = t;
3263 /* Replace the OMP_RETURN with a barrier, or nothing. */
3264 si = bsi_last (exit_bb);
3265 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
3266 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3267 false, BSI_SAME_STMT);
3268 bsi_remove (&si, true);
3270 /* Connect the new blocks. */
3271 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
3272 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
3274 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
3275 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
3277 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
3279 if (gimple_in_ssa_p (cfun))
3281 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
3282 remove arguments of the phi nodes in fin_bb. We need to create
3283 appropriate phi nodes in iter_part_bb instead. */
3284 se = single_pred_edge (fin_bb);
3285 re = single_succ_edge (trip_update_bb);
3286 ene = single_succ_edge (entry_bb);
3288 args = PENDING_STMT (re);
3289 PENDING_STMT (re) = NULL_TREE;
3290 for (phi = phi_nodes (fin_bb);
3291 phi && args;
3292 phi = PHI_CHAIN (phi), args = TREE_CHAIN (args))
3294 t = PHI_RESULT (phi);
3295 gcc_assert (t == TREE_PURPOSE (args));
3296 nphi = create_phi_node (t, iter_part_bb);
3297 SSA_NAME_DEF_STMT (t) = nphi;
3299 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
3300 /* A special case -- fd->v is not yet computed in iter_part_bb, we
3301 need to use v_extra instead. */
3302 if (t == fd->v)
3303 t = v_extra;
3304 add_phi_arg (nphi, t, ene);
3305 add_phi_arg (nphi, TREE_VALUE (args), re);
3307 gcc_assert (!phi && !args);
3308 while ((phi = phi_nodes (fin_bb)) != NULL_TREE)
3309 remove_phi_node (phi, NULL_TREE, false);
3311 /* Make phi node for trip. */
3312 phi = create_phi_node (trip_main, iter_part_bb);
3313 SSA_NAME_DEF_STMT (trip_main) = phi;
3314 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
3315 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
3318 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
3319 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
3320 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
3321 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3322 recompute_dominator (CDI_DOMINATORS, fin_bb));
3323 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
3324 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
3325 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3326 recompute_dominator (CDI_DOMINATORS, body_bb));
3330 /* Expand the OpenMP loop defined by REGION. */
3332 static void
3333 expand_omp_for (struct omp_region *region)
3335 struct omp_for_data fd;
3337 extract_omp_for_data (last_stmt (region->entry), &fd);
3338 region->sched_kind = fd.sched_kind;
3340 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
3341 && !fd.have_ordered
3342 && region->cont != NULL)
3344 if (fd.chunk_size == NULL)
3345 expand_omp_for_static_nochunk (region, &fd);
3346 else
3347 expand_omp_for_static_chunk (region, &fd);
3349 else
3351 int fn_index = fd.sched_kind + fd.have_ordered * 4;
3352 int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
3353 int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
3354 expand_omp_for_generic (region, &fd, start_ix, next_ix);
3357 update_ssa (TODO_update_ssa_only_virtuals);
3361 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
3363 v = GOMP_sections_start (n);
3365 switch (v)
3367 case 0:
3368 goto L2;
3369 case 1:
3370 section 1;
3371 goto L1;
3372 case 2:
3374 case n:
3376 default:
3377 abort ();
3380 v = GOMP_sections_next ();
3381 goto L0;
3383 reduction;
3385 If this is a combined parallel sections, replace the call to
3386 GOMP_sections_start with call to GOMP_sections_next. */
3388 static void
3389 expand_omp_sections (struct omp_region *region)
3391 tree label_vec, l1, l2, t, u, sections_stmt, vin, vmain, vnext, cont;
3392 unsigned i, casei, len;
3393 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
3394 block_stmt_iterator si;
3395 struct omp_region *inner;
3396 bool exit_reachable = region->cont != NULL;
3398 gcc_assert (exit_reachable == (region->exit != NULL));
3399 entry_bb = region->entry;
3400 l0_bb = single_succ (entry_bb);
3401 l1_bb = region->cont;
3402 l2_bb = region->exit;
3403 if (exit_reachable)
3405 gcc_assert (single_pred (l2_bb) == l0_bb);
3406 default_bb = create_empty_bb (l1_bb->prev_bb);
3407 l1 = tree_block_label (l1_bb);
3408 l2 = tree_block_label (l2_bb);
3410 else
3412 default_bb = create_empty_bb (l0_bb);
3413 l1 = NULL_TREE;
3414 l2 = tree_block_label (default_bb);
3417 /* We will build a switch() with enough cases for all the
3418 OMP_SECTION regions, a '0' case to handle the end of more work
3419 and a default case to abort if something goes wrong. */
3420 len = EDGE_COUNT (l0_bb->succs);
3421 label_vec = make_tree_vec (len + 1);
3423 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
3424 OMP_SECTIONS statement. */
3425 si = bsi_last (entry_bb);
3426 sections_stmt = bsi_stmt (si);
3427 gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS);
3428 vin = OMP_SECTIONS_CONTROL (sections_stmt);
3429 if (!is_combined_parallel (region))
3431 /* If we are not inside a combined parallel+sections region,
3432 call GOMP_sections_start. */
3433 t = build_int_cst (unsigned_type_node,
3434 exit_reachable ? len - 1 : len);
3435 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
3436 t = build_call_expr (u, 1, t);
3438 else
3440 /* Otherwise, call GOMP_sections_next. */
3441 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
3442 t = build_call_expr (u, 0);
3444 t = build_gimple_modify_stmt (vin, t);
3445 bsi_insert_after (&si, t, BSI_SAME_STMT);
3446 if (gimple_in_ssa_p (cfun))
3447 SSA_NAME_DEF_STMT (vin) = t;
3448 bsi_remove (&si, true);
3450 /* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
3451 si = bsi_last (l0_bb);
3452 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTIONS_SWITCH);
3453 if (exit_reachable)
3455 cont = last_stmt (l1_bb);
3456 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
3457 vmain = TREE_OPERAND (cont, 1);
3458 vnext = TREE_OPERAND (cont, 0);
3460 else
3462 vmain = vin;
3463 vnext = NULL_TREE;
3466 t = build3 (SWITCH_EXPR, void_type_node, vmain, NULL, label_vec);
3467 bsi_insert_after (&si, t, BSI_SAME_STMT);
3468 bsi_remove (&si, true);
3470 i = 0;
3471 if (exit_reachable)
3473 t = build3 (CASE_LABEL_EXPR, void_type_node,
3474 build_int_cst (unsigned_type_node, 0), NULL, l2);
3475 TREE_VEC_ELT (label_vec, 0) = t;
3476 i++;
3479 /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
3480 for (inner = region->inner, casei = 1;
3481 inner;
3482 inner = inner->next, i++, casei++)
3484 basic_block s_entry_bb, s_exit_bb;
3486 s_entry_bb = inner->entry;
3487 s_exit_bb = inner->exit;
3489 t = tree_block_label (s_entry_bb);
3490 u = build_int_cst (unsigned_type_node, casei);
3491 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
3492 TREE_VEC_ELT (label_vec, i) = u;
3494 si = bsi_last (s_entry_bb);
3495 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION);
3496 gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si)));
3497 bsi_remove (&si, true);
3498 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
3500 if (s_exit_bb == NULL)
3501 continue;
3503 si = bsi_last (s_exit_bb);
3504 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3505 bsi_remove (&si, true);
3507 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
3510 /* Error handling code goes in DEFAULT_BB. */
3511 t = tree_block_label (default_bb);
3512 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
3513 TREE_VEC_ELT (label_vec, len) = u;
3514 make_edge (l0_bb, default_bb, 0);
3516 si = bsi_start (default_bb);
3517 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
3518 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3520 if (exit_reachable)
3522 /* Code to get the next section goes in L1_BB. */
3523 si = bsi_last (l1_bb);
3524 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
3526 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
3527 t = build_gimple_modify_stmt (vnext, t);
3528 bsi_insert_after (&si, t, BSI_SAME_STMT);
3529 if (gimple_in_ssa_p (cfun))
3530 SSA_NAME_DEF_STMT (vnext) = t;
3531 bsi_remove (&si, true);
3533 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
3535 /* Cleanup function replaces OMP_RETURN in EXIT_BB. */
3536 si = bsi_last (l2_bb);
3537 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
3538 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
3539 else
3540 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
3541 t = build_call_expr (t, 0);
3542 bsi_insert_after (&si, t, BSI_SAME_STMT);
3543 bsi_remove (&si, true);
3546 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
3550 /* Expand code for an OpenMP single directive. We've already expanded
3551 much of the code, here we simply place the GOMP_barrier call. */
3553 static void
3554 expand_omp_single (struct omp_region *region)
3556 basic_block entry_bb, exit_bb;
3557 block_stmt_iterator si;
3558 bool need_barrier = false;
3560 entry_bb = region->entry;
3561 exit_bb = region->exit;
3563 si = bsi_last (entry_bb);
3564 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
3565 be removed. We need to ensure that the thread that entered the single
3566 does not exit before the data is copied out by the other threads. */
3567 if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)),
3568 OMP_CLAUSE_COPYPRIVATE))
3569 need_barrier = true;
3570 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE);
3571 bsi_remove (&si, true);
3572 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3574 si = bsi_last (exit_bb);
3575 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier)
3576 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3577 false, BSI_SAME_STMT);
3578 bsi_remove (&si, true);
3579 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3583 /* Generic expansion for OpenMP synchronization directives: master,
3584 ordered and critical. All we need to do here is remove the entry
3585 and exit markers for REGION. */
3587 static void
3588 expand_omp_synch (struct omp_region *region)
3590 basic_block entry_bb, exit_bb;
3591 block_stmt_iterator si;
3593 entry_bb = region->entry;
3594 exit_bb = region->exit;
3596 si = bsi_last (entry_bb);
3597 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE
3598 || TREE_CODE (bsi_stmt (si)) == OMP_MASTER
3599 || TREE_CODE (bsi_stmt (si)) == OMP_ORDERED
3600 || TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL);
3601 bsi_remove (&si, true);
3602 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3604 if (exit_bb)
3606 si = bsi_last (exit_bb);
3607 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3608 bsi_remove (&si, true);
3609 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3613 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
3614 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
3615 size of the data type, and thus usable to find the index of the builtin
3616 decl. Returns false if the expression is not of the proper form. */
3618 static bool
3619 expand_omp_atomic_fetch_op (basic_block load_bb,
3620 tree addr, tree loaded_val,
3621 tree stored_val, int index)
3623 enum built_in_function base;
3624 tree decl, itype, call;
3625 enum insn_code *optab;
3626 tree rhs;
3627 basic_block store_bb = single_succ (load_bb);
3628 block_stmt_iterator bsi;
3629 tree stmt;
3631 /* We expect to find the following sequences:
3633 load_bb:
3634 OMP_ATOMIC_LOAD (tmp, mem)
3636 store_bb:
3637 val = tmp OP something; (or: something OP tmp)
3638 OMP_STORE (val)
3640 ???FIXME: Allow a more flexible sequence.
3641 Perhaps use data flow to pick the statements.
3645 bsi = bsi_after_labels (store_bb);
3646 stmt = bsi_stmt (bsi);
3647 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
3648 return false;
3649 bsi_next (&bsi);
3650 if (TREE_CODE (bsi_stmt (bsi)) != OMP_ATOMIC_STORE)
3651 return false;
3653 if (!operand_equal_p (GIMPLE_STMT_OPERAND (stmt, 0), stored_val, 0))
3654 return false;
3656 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
3658 /* Check for one of the supported fetch-op operations. */
3659 switch (TREE_CODE (rhs))
3661 case PLUS_EXPR:
3662 case POINTER_PLUS_EXPR:
3663 base = BUILT_IN_FETCH_AND_ADD_N;
3664 optab = sync_add_optab;
3665 break;
3666 case MINUS_EXPR:
3667 base = BUILT_IN_FETCH_AND_SUB_N;
3668 optab = sync_add_optab;
3669 break;
3670 case BIT_AND_EXPR:
3671 base = BUILT_IN_FETCH_AND_AND_N;
3672 optab = sync_and_optab;
3673 break;
3674 case BIT_IOR_EXPR:
3675 base = BUILT_IN_FETCH_AND_OR_N;
3676 optab = sync_ior_optab;
3677 break;
3678 case BIT_XOR_EXPR:
3679 base = BUILT_IN_FETCH_AND_XOR_N;
3680 optab = sync_xor_optab;
3681 break;
3682 default:
3683 return false;
3685 /* Make sure the expression is of the proper form. */
3686 if (operand_equal_p (TREE_OPERAND (rhs, 0), loaded_val, 0))
3687 rhs = TREE_OPERAND (rhs, 1);
3688 else if (commutative_tree_code (TREE_CODE (rhs))
3689 && operand_equal_p (TREE_OPERAND (rhs, 1), loaded_val, 0))
3690 rhs = TREE_OPERAND (rhs, 0);
3691 else
3692 return false;
3694 decl = built_in_decls[base + index + 1];
3695 itype = TREE_TYPE (TREE_TYPE (decl));
3697 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
3698 return false;
3700 bsi = bsi_last (load_bb);
3701 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3702 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
3703 force_gimple_operand_bsi (&bsi, call, true, NULL_TREE, true, BSI_SAME_STMT);
3704 bsi_remove (&bsi, true);
3706 bsi = bsi_last (store_bb);
3707 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3708 bsi_remove (&bsi, true);
3709 bsi = bsi_last (store_bb);
3710 bsi_remove (&bsi, true);
3712 if (gimple_in_ssa_p (cfun))
3713 update_ssa (TODO_update_ssa_no_phi);
3715 return true;
3718 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3720 oldval = *addr;
3721 repeat:
3722 newval = rhs; // with oldval replacing *addr in rhs
3723 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
3724 if (oldval != newval)
3725 goto repeat;
3727 INDEX is log2 of the size of the data type, and thus usable to find the
3728 index of the builtin decl. */
3730 static bool
3731 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
3732 tree addr, tree loaded_val, tree stored_val,
3733 int index)
3735 tree loadedi, storedi, initial, new_stored, new_storedi, old_vali;
3736 tree type, itype, cmpxchg, iaddr;
3737 block_stmt_iterator bsi;
3738 basic_block loop_header = single_succ (load_bb);
3739 tree phi, x;
3740 edge e;
3742 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
3743 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
3744 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
3746 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
3747 return false;
3749 /* Load the initial value, replacing the OMP_ATOMIC_LOAD. */
3750 bsi = bsi_last (load_bb);
3751 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3752 initial = force_gimple_operand_bsi (&bsi, build_fold_indirect_ref (addr),
3753 true, NULL_TREE, true, BSI_SAME_STMT);
3754 /* Move the value to the LOADED_VAL temporary. */
3755 if (gimple_in_ssa_p (cfun))
3757 gcc_assert (phi_nodes (loop_header) == NULL_TREE);
3758 phi = create_phi_node (loaded_val, loop_header);
3759 SSA_NAME_DEF_STMT (loaded_val) = phi;
3760 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
3761 initial);
3763 else
3764 bsi_insert_before (&bsi,
3765 build_gimple_modify_stmt (loaded_val, initial),
3766 BSI_SAME_STMT);
3767 bsi_remove (&bsi, true);
3769 bsi = bsi_last (store_bb);
3770 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3772 /* For floating-point values, we'll need to view-convert them to integers
3773 so that we can perform the atomic compare and swap. Simplify the
3774 following code by always setting up the "i"ntegral variables. */
3775 if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3777 loadedi = loaded_val;
3778 storedi = stored_val;
3779 iaddr = addr;
3781 else
3783 loadedi = force_gimple_operand_bsi (&bsi,
3784 build1 (VIEW_CONVERT_EXPR, itype,
3785 loaded_val), true,
3786 NULL_TREE, true, BSI_SAME_STMT);
3787 storedi =
3788 force_gimple_operand_bsi (&bsi,
3789 build1 (VIEW_CONVERT_EXPR, itype,
3790 stored_val), true, NULL_TREE, true,
3791 BSI_SAME_STMT);
3792 iaddr = fold_convert (build_pointer_type (itype), addr);
3795 /* Build the compare&swap statement. */
3796 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
3797 new_storedi = force_gimple_operand_bsi (&bsi,
3798 fold_convert (itype, new_storedi),
3799 true, NULL_TREE,
3800 true, BSI_SAME_STMT);
3801 if (storedi == stored_val)
3802 new_stored = new_storedi;
3803 else
3804 new_stored = force_gimple_operand_bsi (&bsi,
3805 build1 (VIEW_CONVERT_EXPR, type,
3806 new_storedi), true,
3807 NULL_TREE, true, BSI_SAME_STMT);
3809 if (gimple_in_ssa_p (cfun))
3810 old_vali = loadedi;
3811 else
3813 old_vali = create_tmp_var (itype, NULL);
3814 x = build_gimple_modify_stmt (old_vali, loadedi);
3815 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3817 x = build_gimple_modify_stmt (loaded_val, new_stored);
3818 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3821 /* Note that we always perform the comparison as an integer, even for
3822 floating point. This allows the atomic operation to properly
3823 succeed even with NaNs and -0.0. */
3824 x = build3 (COND_EXPR, void_type_node,
3825 build2 (NE_EXPR, boolean_type_node,
3826 new_storedi, old_vali), NULL_TREE, NULL_TREE);
3827 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3829 /* Update cfg. */
3830 e = single_succ_edge (store_bb);
3831 e->flags &= ~EDGE_FALLTHRU;
3832 e->flags |= EDGE_FALSE_VALUE;
3834 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
3836 /* Copy the new value to loaded_val (we already did that before the condition
3837 if we are not in SSA). */
3838 if (gimple_in_ssa_p (cfun))
3840 phi = phi_nodes (loop_header);
3841 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_stored);
3844 /* Remove OMP_ATOMIC_STORE. */
3845 bsi_remove (&bsi, true);
3847 if (gimple_in_ssa_p (cfun))
3848 update_ssa (TODO_update_ssa_no_phi);
3850 return true;
3853 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3855 GOMP_atomic_start ();
3856 *addr = rhs;
3857 GOMP_atomic_end ();
3859 The result is not globally atomic, but works so long as all parallel
3860 references are within #pragma omp atomic directives. According to
3861 responses received from omp@openmp.org, appears to be within spec.
3862 Which makes sense, since that's how several other compilers handle
3863 this situation as well.
3864 LOADED_VAL and ADDR are the operands of OMP_ATOMIC_LOAD we're expanding.
3865 STORED_VAL is the operand of the matching OMP_ATOMIC_STORE.
3867 We replace
3868 OMP_ATOMIC_LOAD (loaded_val, addr) with
3869 loaded_val = *addr;
3871 and replace
3872 OMP_ATOMIC_ATORE (stored_val) with
3873 *addr = stored_val;
3876 static bool
3877 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
3878 tree addr, tree loaded_val, tree stored_val)
3880 block_stmt_iterator bsi;
3881 tree t;
3883 bsi = bsi_last (load_bb);
3884 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3886 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
3887 t = build_function_call_expr (t, 0);
3888 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
3890 t = build_gimple_modify_stmt (loaded_val, build_fold_indirect_ref (addr));
3891 if (gimple_in_ssa_p (cfun))
3892 SSA_NAME_DEF_STMT (loaded_val) = t;
3893 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
3894 bsi_remove (&bsi, true);
3896 bsi = bsi_last (store_bb);
3897 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3899 t = build_gimple_modify_stmt (build_fold_indirect_ref (unshare_expr (addr)),
3900 stored_val);
3901 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
3903 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
3904 t = build_function_call_expr (t, 0);
3905 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
3906 bsi_remove (&bsi, true);
3908 if (gimple_in_ssa_p (cfun))
3909 update_ssa (TODO_update_ssa_no_phi);
3910 return true;
3913 /* Expand an OMP_ATOMIC statement. We try to expand
3914 using expand_omp_atomic_fetch_op. If it failed, we try to
3915 call expand_omp_atomic_pipeline, and if it fails too, the
3916 ultimate fallback is wrapping the operation in a mutex
3917 (expand_omp_atomic_mutex). REGION is the atomic region built
3918 by build_omp_regions_1(). */
3920 static void
3921 expand_omp_atomic (struct omp_region *region)
3923 basic_block load_bb = region->entry, store_bb = region->exit;
3924 tree load = last_stmt (load_bb), store = last_stmt (store_bb);
3925 tree loaded_val = TREE_OPERAND (load, 0);
3926 tree addr = TREE_OPERAND (load, 1);
3927 tree stored_val = TREE_OPERAND (store, 0);
3928 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
3929 HOST_WIDE_INT index;
3931 /* Make sure the type is one of the supported sizes. */
3932 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
3933 index = exact_log2 (index);
3934 if (index >= 0 && index <= 4)
3936 unsigned int align = TYPE_ALIGN_UNIT (type);
3938 /* __sync builtins require strict data alignment. */
3939 if (exact_log2 (align) >= index)
3941 /* When possible, use specialized atomic update functions. */
3942 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3943 && store_bb == single_succ (load_bb))
3945 if (expand_omp_atomic_fetch_op (load_bb, addr,
3946 loaded_val, stored_val, index))
3947 return;
3950 /* If we don't have specialized __sync builtins, try and implement
3951 as a compare and swap loop. */
3952 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
3953 loaded_val, stored_val, index))
3954 return;
3958 /* The ultimate fallback is wrapping the operation in a mutex. */
3959 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
3963 /* Expand the parallel region tree rooted at REGION. Expansion
3964 proceeds in depth-first order. Innermost regions are expanded
3965 first. This way, parallel regions that require a new function to
3966 be created (e.g., OMP_PARALLEL) can be expanded without having any
3967 internal dependencies in their body. */
3969 static void
3970 expand_omp (struct omp_region *region)
3972 while (region)
3974 /* First, determine whether this is a combined parallel+workshare
3975 region. */
3976 if (region->type == OMP_PARALLEL)
3977 determine_parallel_type (region);
3979 if (region->inner)
3980 expand_omp (region->inner);
3982 switch (region->type)
3984 case OMP_PARALLEL:
3985 expand_omp_parallel (region);
3986 break;
3988 case OMP_FOR:
3989 expand_omp_for (region);
3990 break;
3992 case OMP_SECTIONS:
3993 expand_omp_sections (region);
3994 break;
3996 case OMP_SECTION:
3997 /* Individual omp sections are handled together with their
3998 parent OMP_SECTIONS region. */
3999 break;
4001 case OMP_SINGLE:
4002 expand_omp_single (region);
4003 break;
4005 case OMP_MASTER:
4006 case OMP_ORDERED:
4007 case OMP_CRITICAL:
4008 expand_omp_synch (region);
4009 break;
4011 case OMP_ATOMIC_LOAD:
4012 expand_omp_atomic (region);
4013 break;
4016 default:
4017 gcc_unreachable ();
4020 region = region->next;
4025 /* Helper for build_omp_regions. Scan the dominator tree starting at
4026 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
4027 true, the function ends once a single tree is built (otherwise, whole
4028 forest of OMP constructs may be built). */
4030 static void
4031 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
4032 bool single_tree)
4034 block_stmt_iterator si;
4035 tree stmt;
4036 basic_block son;
4038 si = bsi_last (bb);
4039 if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si)))
4041 struct omp_region *region;
4042 enum tree_code code;
4044 stmt = bsi_stmt (si);
4045 code = TREE_CODE (stmt);
4046 if (code == OMP_RETURN)
4048 /* STMT is the return point out of region PARENT. Mark it
4049 as the exit point and make PARENT the immediately
4050 enclosing region. */
4051 gcc_assert (parent);
4052 region = parent;
4053 region->exit = bb;
4054 parent = parent->outer;
4056 else if (code == OMP_ATOMIC_STORE)
4058 /* OMP_ATOMIC_STORE is analoguous to OMP_RETURN, but matches with
4059 OMP_ATOMIC_LOAD. */
4060 gcc_assert (parent);
4061 gcc_assert (parent->type == OMP_ATOMIC_LOAD);
4062 region = parent;
4063 region->exit = bb;
4064 parent = parent->outer;
4067 else if (code == OMP_CONTINUE)
4069 gcc_assert (parent);
4070 parent->cont = bb;
4072 else if (code == OMP_SECTIONS_SWITCH)
4074 /* OMP_SECTIONS_SWITCH is part of OMP_SECTIONS, and we do nothing for
4075 it. */ ;
4077 else
4079 /* Otherwise, this directive becomes the parent for a new
4080 region. */
4081 region = new_omp_region (bb, code, parent);
4082 parent = region;
4086 if (single_tree && !parent)
4087 return;
4089 for (son = first_dom_son (CDI_DOMINATORS, bb);
4090 son;
4091 son = next_dom_son (CDI_DOMINATORS, son))
4092 build_omp_regions_1 (son, parent, single_tree);
4095 /* Builds the tree of OMP regions rooted at ROOT, storing it to
4096 root_omp_region. */
4098 static void
4099 build_omp_regions_root (basic_block root)
4101 gcc_assert (root_omp_region == NULL);
4102 build_omp_regions_1 (root, NULL, true);
4103 gcc_assert (root_omp_region != NULL);
4106 /* Expands omp construct (and its subconstructs) starting in HEAD. */
4108 void
4109 omp_expand_local (basic_block head)
4111 build_omp_regions_root (head);
4112 if (dump_file && (dump_flags & TDF_DETAILS))
4114 fprintf (dump_file, "\nOMP region tree\n\n");
4115 dump_omp_region (dump_file, root_omp_region, 0);
4116 fprintf (dump_file, "\n");
4119 remove_exit_barriers (root_omp_region);
4120 expand_omp (root_omp_region);
4122 free_omp_regions ();
4125 /* Scan the CFG and build a tree of OMP regions. Return the root of
4126 the OMP region tree. */
4128 static void
4129 build_omp_regions (void)
4131 gcc_assert (root_omp_region == NULL);
4132 calculate_dominance_info (CDI_DOMINATORS);
4133 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
4137 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
4139 static unsigned int
4140 execute_expand_omp (void)
4142 build_omp_regions ();
4144 if (!root_omp_region)
4145 return 0;
4147 if (dump_file)
4149 fprintf (dump_file, "\nOMP region tree\n\n");
4150 dump_omp_region (dump_file, root_omp_region, 0);
4151 fprintf (dump_file, "\n");
4154 remove_exit_barriers (root_omp_region);
4156 expand_omp (root_omp_region);
4158 cleanup_tree_cfg ();
4160 free_omp_regions ();
4162 return 0;
4165 /* OMP expansion in SSA form. For testing purposes only. */
4167 static bool
4168 gate_expand_omp_ssa (void)
4170 return flag_openmp_ssa && flag_openmp != 0 && errorcount == 0;
4173 struct tree_opt_pass pass_expand_omp_ssa =
4175 "ompexpssa", /* name */
4176 gate_expand_omp_ssa, /* gate */
4177 execute_expand_omp, /* execute */
4178 NULL, /* sub */
4179 NULL, /* next */
4180 0, /* static_pass_number */
4181 0, /* tv_id */
4182 PROP_gimple_any, /* properties_required */
4183 PROP_gimple_lomp, /* properties_provided */
4184 0, /* properties_destroyed */
4185 0, /* todo_flags_start */
4186 TODO_dump_func, /* todo_flags_finish */
4187 0 /* letter */
4190 /* OMP expansion -- the default pass, run before creation of SSA form. */
4192 static bool
4193 gate_expand_omp (void)
4195 return ((!flag_openmp_ssa || !optimize)
4196 && flag_openmp != 0 && errorcount == 0);
4199 struct tree_opt_pass pass_expand_omp =
4201 "ompexp", /* name */
4202 gate_expand_omp, /* gate */
4203 execute_expand_omp, /* execute */
4204 NULL, /* sub */
4205 NULL, /* next */
4206 0, /* static_pass_number */
4207 0, /* tv_id */
4208 PROP_gimple_any, /* properties_required */
4209 PROP_gimple_lomp, /* properties_provided */
4210 0, /* properties_destroyed */
4211 0, /* todo_flags_start */
4212 TODO_dump_func, /* todo_flags_finish */
4213 0 /* letter */
4216 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
4218 /* Lower the OpenMP sections directive in *STMT_P. */
4220 static void
4221 lower_omp_sections (tree *stmt_p, omp_context *ctx)
4223 tree new_stmt, stmt, body, bind, block, ilist, olist, new_body, control;
4224 tree t, dlist;
4225 tree_stmt_iterator tsi;
4226 unsigned i, len;
4228 stmt = *stmt_p;
4230 push_gimplify_context ();
4232 dlist = NULL;
4233 ilist = NULL;
4234 lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx);
4236 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
4237 for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi))
4238 continue;
4240 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
4241 body = alloc_stmt_list ();
4242 for (i = 0; i < len; i++, tsi_next (&tsi))
4244 omp_context *sctx;
4245 tree sec_start, sec_end;
4247 sec_start = tsi_stmt (tsi);
4248 sctx = maybe_lookup_ctx (sec_start);
4249 gcc_assert (sctx);
4251 append_to_statement_list (sec_start, &body);
4253 lower_omp (&OMP_SECTION_BODY (sec_start), sctx);
4254 append_to_statement_list (OMP_SECTION_BODY (sec_start), &body);
4255 OMP_SECTION_BODY (sec_start) = NULL;
4257 if (i == len - 1)
4259 tree l = alloc_stmt_list ();
4260 lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL,
4261 &l, ctx);
4262 append_to_statement_list (l, &body);
4263 OMP_SECTION_LAST (sec_start) = 1;
4266 sec_end = make_node (OMP_RETURN);
4267 append_to_statement_list (sec_end, &body);
4270 block = make_node (BLOCK);
4271 bind = build3 (BIND_EXPR, void_type_node, NULL, body, block);
4273 olist = NULL_TREE;
4274 lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx);
4276 pop_gimplify_context (NULL_TREE);
4277 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
4279 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4280 TREE_SIDE_EFFECTS (new_stmt) = 1;
4282 new_body = alloc_stmt_list ();
4283 append_to_statement_list (ilist, &new_body);
4284 append_to_statement_list (stmt, &new_body);
4285 append_to_statement_list (make_node (OMP_SECTIONS_SWITCH), &new_body);
4286 append_to_statement_list (bind, &new_body);
4288 control = create_tmp_var (unsigned_type_node, ".section");
4289 t = build2 (OMP_CONTINUE, void_type_node, control, control);
4290 OMP_SECTIONS_CONTROL (stmt) = control;
4291 append_to_statement_list (t, &new_body);
4293 append_to_statement_list (olist, &new_body);
4294 append_to_statement_list (dlist, &new_body);
4296 maybe_catch_exception (&new_body);
4298 t = make_node (OMP_RETURN);
4299 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt),
4300 OMP_CLAUSE_NOWAIT);
4301 append_to_statement_list (t, &new_body);
4303 BIND_EXPR_BODY (new_stmt) = new_body;
4304 OMP_SECTIONS_BODY (stmt) = NULL;
4306 *stmt_p = new_stmt;
4310 /* A subroutine of lower_omp_single. Expand the simple form of
4311 an OMP_SINGLE, without a copyprivate clause:
4313 if (GOMP_single_start ())
4314 BODY;
4315 [ GOMP_barrier (); ] -> unless 'nowait' is present.
4317 FIXME. It may be better to delay expanding the logic of this until
4318 pass_expand_omp. The expanded logic may make the job more difficult
4319 to a synchronization analysis pass. */
4321 static void
4322 lower_omp_single_simple (tree single_stmt, tree *pre_p)
4324 tree t;
4326 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_START], 0);
4327 t = build3 (COND_EXPR, void_type_node, t,
4328 OMP_SINGLE_BODY (single_stmt), NULL);
4329 gimplify_and_add (t, pre_p);
4333 /* A subroutine of lower_omp_single. Expand the simple form of
4334 an OMP_SINGLE, with a copyprivate clause:
4336 #pragma omp single copyprivate (a, b, c)
4338 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
4341 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
4343 BODY;
4344 copyout.a = a;
4345 copyout.b = b;
4346 copyout.c = c;
4347 GOMP_single_copy_end (&copyout);
4349 else
4351 a = copyout_p->a;
4352 b = copyout_p->b;
4353 c = copyout_p->c;
4355 GOMP_barrier ();
4358 FIXME. It may be better to delay expanding the logic of this until
4359 pass_expand_omp. The expanded logic may make the job more difficult
4360 to a synchronization analysis pass. */
4362 static void
4363 lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx)
4365 tree ptr_type, t, l0, l1, l2, copyin_seq;
4367 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
4369 ptr_type = build_pointer_type (ctx->record_type);
4370 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
4372 l0 = create_artificial_label ();
4373 l1 = create_artificial_label ();
4374 l2 = create_artificial_label ();
4376 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
4377 t = fold_convert (ptr_type, t);
4378 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
4379 gimplify_and_add (t, pre_p);
4381 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
4382 build_int_cst (ptr_type, 0));
4383 t = build3 (COND_EXPR, void_type_node, t,
4384 build_and_jump (&l0), build_and_jump (&l1));
4385 gimplify_and_add (t, pre_p);
4387 t = build1 (LABEL_EXPR, void_type_node, l0);
4388 gimplify_and_add (t, pre_p);
4390 append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p);
4392 copyin_seq = NULL;
4393 lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p,
4394 &copyin_seq, ctx);
4396 t = build_fold_addr_expr (ctx->sender_decl);
4397 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
4398 gimplify_and_add (t, pre_p);
4400 t = build_and_jump (&l2);
4401 gimplify_and_add (t, pre_p);
4403 t = build1 (LABEL_EXPR, void_type_node, l1);
4404 gimplify_and_add (t, pre_p);
4406 append_to_statement_list (copyin_seq, pre_p);
4408 t = build1 (LABEL_EXPR, void_type_node, l2);
4409 gimplify_and_add (t, pre_p);
4413 /* Expand code for an OpenMP single directive. */
4415 static void
4416 lower_omp_single (tree *stmt_p, omp_context *ctx)
4418 tree t, bind, block, single_stmt = *stmt_p, dlist;
4420 push_gimplify_context ();
4422 block = make_node (BLOCK);
4423 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4424 TREE_SIDE_EFFECTS (bind) = 1;
4426 lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt),
4427 &BIND_EXPR_BODY (bind), &dlist, ctx);
4428 lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx);
4430 append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind));
4432 if (ctx->record_type)
4433 lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx);
4434 else
4435 lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind));
4437 OMP_SINGLE_BODY (single_stmt) = NULL;
4439 append_to_statement_list (dlist, &BIND_EXPR_BODY (bind));
4441 maybe_catch_exception (&BIND_EXPR_BODY (bind));
4443 t = make_node (OMP_RETURN);
4444 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt),
4445 OMP_CLAUSE_NOWAIT);
4446 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
4448 pop_gimplify_context (bind);
4450 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4451 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4455 /* Expand code for an OpenMP master directive. */
4457 static void
4458 lower_omp_master (tree *stmt_p, omp_context *ctx)
4460 tree bind, block, stmt = *stmt_p, lab = NULL, x;
4462 push_gimplify_context ();
4464 block = make_node (BLOCK);
4465 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4466 TREE_SIDE_EFFECTS (bind) = 1;
4468 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4470 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4471 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
4472 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
4473 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4475 lower_omp (&OMP_MASTER_BODY (stmt), ctx);
4476 maybe_catch_exception (&OMP_MASTER_BODY (stmt));
4477 append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind));
4478 OMP_MASTER_BODY (stmt) = NULL;
4480 x = build1 (LABEL_EXPR, void_type_node, lab);
4481 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4483 x = make_node (OMP_RETURN);
4484 OMP_RETURN_NOWAIT (x) = 1;
4485 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
4487 pop_gimplify_context (bind);
4489 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4490 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4494 /* Expand code for an OpenMP ordered directive. */
4496 static void
4497 lower_omp_ordered (tree *stmt_p, omp_context *ctx)
4499 tree bind, block, stmt = *stmt_p, x;
4501 push_gimplify_context ();
4503 block = make_node (BLOCK);
4504 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4505 TREE_SIDE_EFFECTS (bind) = 1;
4507 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4509 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
4510 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4512 lower_omp (&OMP_ORDERED_BODY (stmt), ctx);
4513 maybe_catch_exception (&OMP_ORDERED_BODY (stmt));
4514 append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind));
4515 OMP_ORDERED_BODY (stmt) = NULL;
4517 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
4518 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4520 x = make_node (OMP_RETURN);
4521 OMP_RETURN_NOWAIT (x) = 1;
4522 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
4524 pop_gimplify_context (bind);
4526 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4527 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4531 /* Gimplify an OMP_CRITICAL statement. This is a relatively simple
4532 substitution of a couple of function calls. But in the NAMED case,
4533 requires that languages coordinate a symbol name. It is therefore
4534 best put here in common code. */
4536 static GTY((param1_is (tree), param2_is (tree)))
4537 splay_tree critical_name_mutexes;
4539 static void
4540 lower_omp_critical (tree *stmt_p, omp_context *ctx)
4542 tree bind, block, stmt = *stmt_p;
4543 tree t, lock, unlock, name;
4545 name = OMP_CRITICAL_NAME (stmt);
4546 if (name)
4548 tree decl;
4549 splay_tree_node n;
4551 if (!critical_name_mutexes)
4552 critical_name_mutexes
4553 = splay_tree_new_ggc (splay_tree_compare_pointers);
4555 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
4556 if (n == NULL)
4558 char *new_str;
4560 decl = create_tmp_var_raw (ptr_type_node, NULL);
4562 new_str = ACONCAT ((".gomp_critical_user_",
4563 IDENTIFIER_POINTER (name), NULL));
4564 DECL_NAME (decl) = get_identifier (new_str);
4565 TREE_PUBLIC (decl) = 1;
4566 TREE_STATIC (decl) = 1;
4567 DECL_COMMON (decl) = 1;
4568 DECL_ARTIFICIAL (decl) = 1;
4569 DECL_IGNORED_P (decl) = 1;
4570 varpool_finalize_decl (decl);
4572 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
4573 (splay_tree_value) decl);
4575 else
4576 decl = (tree) n->value;
4578 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
4579 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
4581 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
4582 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
4584 else
4586 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
4587 lock = build_call_expr (lock, 0);
4589 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
4590 unlock = build_call_expr (unlock, 0);
4593 push_gimplify_context ();
4595 block = make_node (BLOCK);
4596 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4597 TREE_SIDE_EFFECTS (bind) = 1;
4599 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4601 gimplify_and_add (lock, &BIND_EXPR_BODY (bind));
4603 lower_omp (&OMP_CRITICAL_BODY (stmt), ctx);
4604 maybe_catch_exception (&OMP_CRITICAL_BODY (stmt));
4605 append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind));
4606 OMP_CRITICAL_BODY (stmt) = NULL;
4608 gimplify_and_add (unlock, &BIND_EXPR_BODY (bind));
4610 t = make_node (OMP_RETURN);
4611 OMP_RETURN_NOWAIT (t) = 1;
4612 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
4614 pop_gimplify_context (bind);
4615 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4616 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4620 /* A subroutine of lower_omp_for. Generate code to emit the predicate
4621 for a lastprivate clause. Given a loop control predicate of (V
4622 cond N2), we gate the clause on (!(V cond N2)). The lowered form
4623 is appended to *DLIST, iterator initialization is appended to
4624 *BODY_P. */
4626 static void
4627 lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p,
4628 tree *dlist, struct omp_context *ctx)
4630 tree clauses, cond, stmts, vinit, t;
4631 enum tree_code cond_code;
4633 cond_code = fd->cond_code;
4634 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
4636 /* When possible, use a strict equality expression. This can let VRP
4637 type optimizations deduce the value and remove a copy. */
4638 if (host_integerp (fd->step, 0))
4640 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step);
4641 if (step == 1 || step == -1)
4642 cond_code = EQ_EXPR;
4645 cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2);
4647 clauses = OMP_FOR_CLAUSES (fd->for_stmt);
4648 stmts = NULL;
4649 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
4650 if (stmts != NULL)
4652 append_to_statement_list (stmts, dlist);
4654 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
4655 vinit = fd->n1;
4656 if (cond_code == EQ_EXPR
4657 && host_integerp (fd->n2, 0)
4658 && ! integer_zerop (fd->n2))
4659 vinit = build_int_cst (TREE_TYPE (fd->v), 0);
4661 /* Initialize the iterator variable, so that threads that don't execute
4662 any iterations don't execute the lastprivate clauses by accident. */
4663 t = build_gimple_modify_stmt (fd->v, vinit);
4664 gimplify_and_add (t, body_p);
4669 /* Lower code for an OpenMP loop directive. */
4671 static void
4672 lower_omp_for (tree *stmt_p, omp_context *ctx)
4674 tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p;
4675 struct omp_for_data fd;
4677 stmt = *stmt_p;
4679 push_gimplify_context ();
4681 lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
4682 lower_omp (&OMP_FOR_BODY (stmt), ctx);
4684 /* Move declaration of temporaries in the loop body before we make
4685 it go away. */
4686 if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR)
4687 record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn);
4689 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4690 TREE_SIDE_EFFECTS (new_stmt) = 1;
4691 body_p = &BIND_EXPR_BODY (new_stmt);
4693 /* The pre-body and input clauses go before the lowered OMP_FOR. */
4694 ilist = NULL;
4695 dlist = NULL;
4696 append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p);
4697 lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx);
4699 /* Lower the header expressions. At this point, we can assume that
4700 the header is of the form:
4702 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
4704 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
4705 using the .omp_data_s mapping, if needed. */
4706 rhs_p = &GIMPLE_STMT_OPERAND (OMP_FOR_INIT (stmt), 1);
4707 if (!is_gimple_min_invariant (*rhs_p))
4708 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4710 rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1);
4711 if (!is_gimple_min_invariant (*rhs_p))
4712 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4714 rhs_p = &TREE_OPERAND (GIMPLE_STMT_OPERAND (OMP_FOR_INCR (stmt), 1), 1);
4715 if (!is_gimple_min_invariant (*rhs_p))
4716 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4718 /* Once lowered, extract the bounds and clauses. */
4719 extract_omp_for_data (stmt, &fd);
4721 lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx);
4723 append_to_statement_list (stmt, body_p);
4725 append_to_statement_list (OMP_FOR_BODY (stmt), body_p);
4727 t = build2 (OMP_CONTINUE, void_type_node, fd.v, fd.v);
4728 append_to_statement_list (t, body_p);
4730 /* After the loop, add exit clauses. */
4731 lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx);
4732 append_to_statement_list (dlist, body_p);
4734 maybe_catch_exception (body_p);
4736 /* Region exit marker goes at the end of the loop body. */
4737 t = make_node (OMP_RETURN);
4738 OMP_RETURN_NOWAIT (t) = fd.have_nowait;
4739 append_to_statement_list (t, body_p);
4741 pop_gimplify_context (NULL_TREE);
4742 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
4744 OMP_FOR_BODY (stmt) = NULL_TREE;
4745 OMP_FOR_PRE_BODY (stmt) = NULL_TREE;
4746 *stmt_p = new_stmt;
4749 /* Callback for walk_stmts. Check if *TP only contains OMP_FOR
4750 or OMP_PARALLEL. */
4752 static tree
4753 check_combined_parallel (tree *tp, int *walk_subtrees, void *data)
4755 struct walk_stmt_info *wi = data;
4756 int *info = wi->info;
4758 *walk_subtrees = 0;
4759 switch (TREE_CODE (*tp))
4761 case OMP_FOR:
4762 case OMP_SECTIONS:
4763 *info = *info == 0 ? 1 : -1;
4764 break;
4765 default:
4766 *info = -1;
4767 break;
4769 return NULL;
4772 /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
4773 information for the directive. */
4775 static void
4776 lower_omp_parallel (tree *stmt_p, omp_context *ctx)
4778 tree clauses, par_bind, par_body, new_body, bind;
4779 tree olist, ilist, par_olist, par_ilist;
4780 tree stmt, child_fn, t;
4782 stmt = *stmt_p;
4784 clauses = OMP_PARALLEL_CLAUSES (stmt);
4785 par_bind = OMP_PARALLEL_BODY (stmt);
4786 par_body = BIND_EXPR_BODY (par_bind);
4787 child_fn = ctx->cb.dst_fn;
4788 if (!OMP_PARALLEL_COMBINED (stmt))
4790 struct walk_stmt_info wi;
4791 int ws_num = 0;
4793 memset (&wi, 0, sizeof (wi));
4794 wi.callback = check_combined_parallel;
4795 wi.info = &ws_num;
4796 wi.val_only = true;
4797 walk_stmts (&wi, &par_bind);
4798 if (ws_num == 1)
4799 OMP_PARALLEL_COMBINED (stmt) = 1;
4802 push_gimplify_context ();
4804 par_olist = NULL_TREE;
4805 par_ilist = NULL_TREE;
4806 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
4807 lower_omp (&par_body, ctx);
4808 lower_reduction_clauses (clauses, &par_olist, ctx);
4810 /* Declare all the variables created by mapping and the variables
4811 declared in the scope of the parallel body. */
4812 record_vars_into (ctx->block_vars, child_fn);
4813 record_vars_into (BIND_EXPR_VARS (par_bind), child_fn);
4815 if (ctx->record_type)
4817 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o");
4818 OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl;
4821 olist = NULL_TREE;
4822 ilist = NULL_TREE;
4823 lower_send_clauses (clauses, &ilist, &olist, ctx);
4824 lower_send_shared_vars (&ilist, &olist, ctx);
4826 /* Once all the expansions are done, sequence all the different
4827 fragments inside OMP_PARALLEL_BODY. */
4828 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4829 append_to_statement_list (ilist, &BIND_EXPR_BODY (bind));
4831 new_body = alloc_stmt_list ();
4833 if (ctx->record_type)
4835 t = build_fold_addr_expr (ctx->sender_decl);
4836 /* fixup_child_record_type might have changed receiver_decl's type. */
4837 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
4838 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
4839 append_to_statement_list (t, &new_body);
4842 append_to_statement_list (par_ilist, &new_body);
4843 append_to_statement_list (par_body, &new_body);
4844 append_to_statement_list (par_olist, &new_body);
4845 maybe_catch_exception (&new_body);
4846 t = make_node (OMP_RETURN);
4847 append_to_statement_list (t, &new_body);
4848 OMP_PARALLEL_BODY (stmt) = new_body;
4850 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4851 append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
4853 *stmt_p = bind;
4855 pop_gimplify_context (NULL_TREE);
4859 /* Pass *TP back through the gimplifier within the context determined by WI.
4860 This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
4861 flags on ADDR_EXPR. */
4863 static void
4864 lower_regimplify (tree *tp, struct walk_stmt_info *wi)
4866 enum gimplify_status gs;
4867 tree pre = NULL;
4869 if (wi->is_lhs)
4870 gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue);
4871 else if (wi->val_only)
4872 gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue);
4873 else
4874 gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue);
4875 gcc_assert (gs == GS_ALL_DONE);
4877 if (pre)
4878 tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT);
4881 /* Copy EXP into a temporary. Insert the initialization statement before TSI. */
4883 static tree
4884 init_tmp_var (tree exp, tree_stmt_iterator *tsi)
4886 tree t, stmt;
4888 t = create_tmp_var (TREE_TYPE (exp), NULL);
4889 DECL_GIMPLE_REG_P (t) = 1;
4890 stmt = build_gimple_modify_stmt (t, exp);
4891 SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
4892 tsi_link_before (tsi, stmt, TSI_SAME_STMT);
4894 return t;
4897 /* Similarly, but copy from the temporary and insert the statement
4898 after the iterator. */
4900 static tree
4901 save_tmp_var (tree exp, tree_stmt_iterator *tsi)
4903 tree t, stmt;
4905 t = create_tmp_var (TREE_TYPE (exp), NULL);
4906 DECL_GIMPLE_REG_P (t) = 1;
4907 stmt = build_gimple_modify_stmt (exp, t);
4908 SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
4909 tsi_link_after (tsi, stmt, TSI_SAME_STMT);
4911 return t;
4914 /* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
4916 static tree
4917 lower_omp_1 (tree *tp, int *walk_subtrees, void *data)
4919 struct walk_stmt_info *wi = data;
4920 omp_context *ctx = wi->info;
4921 tree t = *tp;
4923 /* If we have issued syntax errors, avoid doing any heavy lifting.
4924 Just replace the OpenMP directives with a NOP to avoid
4925 confusing RTL expansion. */
4926 if (errorcount && OMP_DIRECTIVE_P (*tp))
4928 *tp = build_empty_stmt ();
4929 return NULL_TREE;
4932 *walk_subtrees = 0;
4933 switch (TREE_CODE (*tp))
4935 case OMP_PARALLEL:
4936 ctx = maybe_lookup_ctx (t);
4937 lower_omp_parallel (tp, ctx);
4938 break;
4940 case OMP_FOR:
4941 ctx = maybe_lookup_ctx (t);
4942 gcc_assert (ctx);
4943 lower_omp_for (tp, ctx);
4944 break;
4946 case OMP_SECTIONS:
4947 ctx = maybe_lookup_ctx (t);
4948 gcc_assert (ctx);
4949 lower_omp_sections (tp, ctx);
4950 break;
4952 case OMP_SINGLE:
4953 ctx = maybe_lookup_ctx (t);
4954 gcc_assert (ctx);
4955 lower_omp_single (tp, ctx);
4956 break;
4958 case OMP_MASTER:
4959 ctx = maybe_lookup_ctx (t);
4960 gcc_assert (ctx);
4961 lower_omp_master (tp, ctx);
4962 break;
4964 case OMP_ORDERED:
4965 ctx = maybe_lookup_ctx (t);
4966 gcc_assert (ctx);
4967 lower_omp_ordered (tp, ctx);
4968 break;
4970 case OMP_CRITICAL:
4971 ctx = maybe_lookup_ctx (t);
4972 gcc_assert (ctx);
4973 lower_omp_critical (tp, ctx);
4974 break;
4976 case VAR_DECL:
4977 if (ctx && DECL_HAS_VALUE_EXPR_P (t))
4979 lower_regimplify (&t, wi);
4980 if (wi->val_only)
4982 if (wi->is_lhs)
4983 t = save_tmp_var (t, &wi->tsi);
4984 else
4985 t = init_tmp_var (t, &wi->tsi);
4987 *tp = t;
4989 break;
4991 case ADDR_EXPR:
4992 if (ctx)
4993 lower_regimplify (tp, wi);
4994 break;
4996 case ARRAY_REF:
4997 case ARRAY_RANGE_REF:
4998 case REALPART_EXPR:
4999 case IMAGPART_EXPR:
5000 case COMPONENT_REF:
5001 case VIEW_CONVERT_EXPR:
5002 if (ctx)
5003 lower_regimplify (tp, wi);
5004 break;
5006 case INDIRECT_REF:
5007 if (ctx)
5009 wi->is_lhs = false;
5010 wi->val_only = true;
5011 lower_regimplify (&TREE_OPERAND (t, 0), wi);
5013 break;
5015 default:
5016 if (!TYPE_P (t) && !DECL_P (t))
5017 *walk_subtrees = 1;
5018 break;
5021 return NULL_TREE;
5024 static void
5025 lower_omp (tree *stmt_p, omp_context *ctx)
5027 struct walk_stmt_info wi;
5029 memset (&wi, 0, sizeof (wi));
5030 wi.callback = lower_omp_1;
5031 wi.info = ctx;
5032 wi.val_only = true;
5033 wi.want_locations = true;
5035 walk_stmts (&wi, stmt_p);
5038 /* Main entry point. */
5040 static unsigned int
5041 execute_lower_omp (void)
5043 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
5044 delete_omp_context);
5046 scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
5047 gcc_assert (parallel_nesting_level == 0);
5049 if (all_contexts->root)
5050 lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
5052 if (all_contexts)
5054 splay_tree_delete (all_contexts);
5055 all_contexts = NULL;
5057 return 0;
5060 static bool
5061 gate_lower_omp (void)
5063 return flag_openmp != 0;
5066 struct tree_opt_pass pass_lower_omp =
5068 "omplower", /* name */
5069 gate_lower_omp, /* gate */
5070 execute_lower_omp, /* execute */
5071 NULL, /* sub */
5072 NULL, /* next */
5073 0, /* static_pass_number */
5074 0, /* tv_id */
5075 PROP_gimple_any, /* properties_required */
5076 PROP_gimple_lomp, /* properties_provided */
5077 0, /* properties_destroyed */
5078 0, /* todo_flags_start */
5079 TODO_dump_func, /* todo_flags_finish */
5080 0 /* letter */
5083 /* The following is a utility to diagnose OpenMP structured block violations.
5084 It is not part of the "omplower" pass, as that's invoked too late. It
5085 should be invoked by the respective front ends after gimplification. */
5087 static splay_tree all_labels;
5089 /* Check for mismatched contexts and generate an error if needed. Return
5090 true if an error is detected. */
5092 static bool
5093 diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx)
5095 bool exit_p = true;
5097 if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx)
5098 return false;
5100 /* Try to avoid confusing the user by producing and error message
5101 with correct "exit" or "enter" verbage. We prefer "exit"
5102 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
5103 if (branch_ctx == NULL)
5104 exit_p = false;
5105 else
5107 while (label_ctx)
5109 if (TREE_VALUE (label_ctx) == branch_ctx)
5111 exit_p = false;
5112 break;
5114 label_ctx = TREE_CHAIN (label_ctx);
5118 if (exit_p)
5119 error ("invalid exit from OpenMP structured block");
5120 else
5121 error ("invalid entry to OpenMP structured block");
5123 *stmt_p = build_empty_stmt ();
5124 return true;
5127 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
5128 where in the tree each label is found. */
5130 static tree
5131 diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
5133 struct walk_stmt_info *wi = data;
5134 tree context = (tree) wi->info;
5135 tree inner_context;
5136 tree t = *tp;
5138 *walk_subtrees = 0;
5139 switch (TREE_CODE (t))
5141 case OMP_PARALLEL:
5142 case OMP_SECTIONS:
5143 case OMP_SINGLE:
5144 walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL);
5145 /* FALLTHRU */
5146 case OMP_SECTION:
5147 case OMP_MASTER:
5148 case OMP_ORDERED:
5149 case OMP_CRITICAL:
5150 /* The minimal context here is just a tree of statements. */
5151 inner_context = tree_cons (NULL, t, context);
5152 wi->info = inner_context;
5153 walk_stmts (wi, &OMP_BODY (t));
5154 wi->info = context;
5155 break;
5157 case OMP_FOR:
5158 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL);
5159 inner_context = tree_cons (NULL, t, context);
5160 wi->info = inner_context;
5161 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL);
5162 walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL);
5163 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL);
5164 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
5165 walk_stmts (wi, &OMP_FOR_BODY (t));
5166 wi->info = context;
5167 break;
5169 case LABEL_EXPR:
5170 splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t),
5171 (splay_tree_value) context);
5172 break;
5174 default:
5175 break;
5178 return NULL_TREE;
5181 /* Pass 2: Check each branch and see if its context differs from that of
5182 the destination label's context. */
5184 static tree
5185 diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
5187 struct walk_stmt_info *wi = data;
5188 tree context = (tree) wi->info;
5189 splay_tree_node n;
5190 tree t = *tp;
5192 *walk_subtrees = 0;
5193 switch (TREE_CODE (t))
5195 case OMP_PARALLEL:
5196 case OMP_SECTIONS:
5197 case OMP_SINGLE:
5198 walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL);
5199 /* FALLTHRU */
5200 case OMP_SECTION:
5201 case OMP_MASTER:
5202 case OMP_ORDERED:
5203 case OMP_CRITICAL:
5204 wi->info = t;
5205 walk_stmts (wi, &OMP_BODY (t));
5206 wi->info = context;
5207 break;
5209 case OMP_FOR:
5210 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL);
5211 wi->info = t;
5212 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL);
5213 walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL);
5214 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL);
5215 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
5216 walk_stmts (wi, &OMP_FOR_BODY (t));
5217 wi->info = context;
5218 break;
5220 case GOTO_EXPR:
5222 tree lab = GOTO_DESTINATION (t);
5223 if (TREE_CODE (lab) != LABEL_DECL)
5224 break;
5226 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
5227 diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE);
5229 break;
5231 case SWITCH_EXPR:
5233 tree vec = SWITCH_LABELS (t);
5234 int i, len = TREE_VEC_LENGTH (vec);
5235 for (i = 0; i < len; ++i)
5237 tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i));
5238 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
5239 if (diagnose_sb_0 (tp, context, (tree) n->value))
5240 break;
5243 break;
5245 case RETURN_EXPR:
5246 diagnose_sb_0 (tp, context, NULL_TREE);
5247 break;
5249 default:
5250 break;
5253 return NULL_TREE;
5256 void
5257 diagnose_omp_structured_block_errors (tree fndecl)
5259 tree save_current = current_function_decl;
5260 struct walk_stmt_info wi;
5262 current_function_decl = fndecl;
5264 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
5266 memset (&wi, 0, sizeof (wi));
5267 wi.callback = diagnose_sb_1;
5268 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
5270 memset (&wi, 0, sizeof (wi));
5271 wi.callback = diagnose_sb_2;
5272 wi.want_locations = true;
5273 wi.want_return_expr = true;
5274 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
5276 splay_tree_delete (all_labels);
5277 all_labels = NULL;
5279 current_function_decl = save_current;
5282 #include "gt-omp-low.h"