gcc/
[official-gcc.git] / gcc / config / nvptx / nvptx.c
blob2dec8467437f9a4b0fea9f1eb29781e21ddbc162
1 /* Target code for NVPTX.
2 Copyright (C) 2014-2015 Free Software Foundation, Inc.
3 Contributed by Bernd Schmidt <bernds@codesourcery.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include <sstream>
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "cfghooks.h"
30 #include "df.h"
31 #include "tm_p.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "diagnostic.h"
38 #include "alias.h"
39 #include "insn-flags.h"
40 #include "output.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "dojump.h"
44 #include "explow.h"
45 #include "calls.h"
46 #include "varasm.h"
47 #include "stmt.h"
48 #include "expr.h"
49 #include "tm-preds.h"
50 #include "tm-constrs.h"
51 #include "langhooks.h"
52 #include "dbxout.h"
53 #include "cfgrtl.h"
54 #include "gimple.h"
55 #include "stor-layout.h"
56 #include "builtins.h"
57 #include "omp-low.h"
58 #include "gomp-constants.h"
59 #include "dumpfile.h"
60 #include "internal-fn.h"
61 #include "gimple-iterator.h"
62 #include "stringpool.h"
63 #include "tree-ssa-operands.h"
64 #include "tree-ssanames.h"
65 #include "gimplify.h"
66 #include "tree-phinodes.h"
67 #include "cfgloop.h"
68 #include "fold-const.h"
70 /* This file should be included last. */
71 #include "target-def.h"
73 /* The kind of shuffe instruction. */
74 enum nvptx_shuffle_kind
76 SHUFFLE_UP,
77 SHUFFLE_DOWN,
78 SHUFFLE_BFLY,
79 SHUFFLE_IDX,
80 SHUFFLE_MAX
83 /* The various PTX memory areas an object might reside in. */
84 enum nvptx_data_area
86 DATA_AREA_GENERIC,
87 DATA_AREA_GLOBAL,
88 DATA_AREA_SHARED,
89 DATA_AREA_LOCAL,
90 DATA_AREA_CONST,
91 DATA_AREA_PARAM,
92 DATA_AREA_MAX
95 /* We record the data area in the target symbol flags. */
96 #define SYMBOL_DATA_AREA(SYM) \
97 (nvptx_data_area)((SYMBOL_REF_FLAGS (SYM) >> SYMBOL_FLAG_MACH_DEP_SHIFT) \
98 & 7)
99 #define SET_SYMBOL_DATA_AREA(SYM,AREA) \
100 (SYMBOL_REF_FLAGS (SYM) |= (AREA) << SYMBOL_FLAG_MACH_DEP_SHIFT)
102 /* Record the function decls we've written, and the libfuncs and function
103 decls corresponding to them. */
104 static std::stringstream func_decls;
106 struct declared_libfunc_hasher : ggc_cache_ptr_hash<rtx_def>
108 static hashval_t hash (rtx x) { return htab_hash_pointer (x); }
109 static bool equal (rtx a, rtx b) { return a == b; }
112 static GTY((cache))
113 hash_table<declared_libfunc_hasher> *declared_libfuncs_htab;
115 struct tree_hasher : ggc_cache_ptr_hash<tree_node>
117 static hashval_t hash (tree t) { return htab_hash_pointer (t); }
118 static bool equal (tree a, tree b) { return a == b; }
121 static GTY((cache)) hash_table<tree_hasher> *declared_fndecls_htab;
122 static GTY((cache)) hash_table<tree_hasher> *needed_fndecls_htab;
124 /* Buffer needed to broadcast across workers. This is used for both
125 worker-neutering and worker broadcasting. It is shared by all
126 functions emitted. The buffer is placed in shared memory. It'd be
127 nice if PTX supported common blocks, because then this could be
128 shared across TUs (taking the largest size). */
129 static unsigned worker_bcast_size;
130 static unsigned worker_bcast_align;
131 #define worker_bcast_name "__worker_bcast"
132 static GTY(()) rtx worker_bcast_sym;
134 /* Buffer needed for worker reductions. This has to be distinct from
135 the worker broadcast array, as both may be live concurrently. */
136 static unsigned worker_red_size;
137 static unsigned worker_red_align;
138 #define worker_red_name "__worker_red"
139 static GTY(()) rtx worker_red_sym;
141 /* Global lock variable, needed for 128bit worker & gang reductions. */
142 static GTY(()) tree global_lock_var;
144 /* Allocate a new, cleared machine_function structure. */
146 static struct machine_function *
147 nvptx_init_machine_status (void)
149 struct machine_function *p = ggc_cleared_alloc<machine_function> ();
150 p->ret_reg_mode = VOIDmode;
151 return p;
154 /* Implement TARGET_OPTION_OVERRIDE. */
156 static void
157 nvptx_option_override (void)
159 init_machine_status = nvptx_init_machine_status;
160 /* Gives us a predictable order, which we need especially for variables. */
161 flag_toplevel_reorder = 1;
162 /* Assumes that it will see only hard registers. */
163 flag_var_tracking = 0;
164 write_symbols = NO_DEBUG;
165 debug_info_level = DINFO_LEVEL_NONE;
167 if (nvptx_optimize < 0)
168 nvptx_optimize = optimize > 0;
170 declared_fndecls_htab = hash_table<tree_hasher>::create_ggc (17);
171 needed_fndecls_htab = hash_table<tree_hasher>::create_ggc (17);
172 declared_libfuncs_htab
173 = hash_table<declared_libfunc_hasher>::create_ggc (17);
175 worker_bcast_sym = gen_rtx_SYMBOL_REF (Pmode, worker_bcast_name);
176 SET_SYMBOL_DATA_AREA (worker_bcast_sym, DATA_AREA_SHARED);
177 worker_bcast_align = GET_MODE_ALIGNMENT (SImode) / BITS_PER_UNIT;
179 worker_red_sym = gen_rtx_SYMBOL_REF (Pmode, worker_red_name);
180 SET_SYMBOL_DATA_AREA (worker_red_sym, DATA_AREA_SHARED);
181 worker_red_align = GET_MODE_ALIGNMENT (SImode) / BITS_PER_UNIT;
184 /* Return a ptx type for MODE. If PROMOTE, then use .u32 for QImode to
185 deal with ptx ideosyncracies. */
187 const char *
188 nvptx_ptx_type_from_mode (machine_mode mode, bool promote)
190 switch (mode)
192 case BLKmode:
193 return ".b8";
194 case BImode:
195 return ".pred";
196 case QImode:
197 if (promote)
198 return ".u32";
199 else
200 return ".u8";
201 case HImode:
202 return ".u16";
203 case SImode:
204 return ".u32";
205 case DImode:
206 return ".u64";
208 case SFmode:
209 return ".f32";
210 case DFmode:
211 return ".f64";
213 default:
214 gcc_unreachable ();
218 /* Encode the PTX data area that DECL (which might not actually be a
219 _DECL) should reside in. */
221 static void
222 nvptx_encode_section_info (tree decl, rtx rtl, int first)
224 default_encode_section_info (decl, rtl, first);
225 if (first && MEM_P (rtl))
227 nvptx_data_area area = DATA_AREA_GENERIC;
229 if (TREE_CONSTANT (decl))
230 area = DATA_AREA_CONST;
231 else if (TREE_CODE (decl) == VAR_DECL)
232 /* TODO: This would be a good place to check for a .shared or
233 other section name. */
234 area = TREE_READONLY (decl) ? DATA_AREA_CONST : DATA_AREA_GLOBAL;
236 SET_SYMBOL_DATA_AREA (XEXP (rtl, 0), area);
240 /* Return the PTX name of the data area in which SYM should be
241 placed. The symbol must have already been processed by
242 nvptx_encode_seciton_info, or equivalent. */
244 static const char *
245 section_for_sym (rtx sym)
247 nvptx_data_area area = SYMBOL_DATA_AREA (sym);
248 /* Same order as nvptx_data_area enum. */
249 static char const *const areas[] =
250 {"", ".global", ".shared", ".local", ".const", ".param"};
252 return areas[area];
255 /* Similarly for a decl. */
257 static const char *
258 section_for_decl (const_tree decl)
260 return section_for_sym (XEXP (DECL_RTL (CONST_CAST (tree, decl)), 0));
263 /* Check NAME for special function names and redirect them by returning a
264 replacement. This applies to malloc, free and realloc, for which we
265 want to use libgcc wrappers, and call, which triggers a bug in ptxas. */
267 static const char *
268 nvptx_name_replacement (const char *name)
270 if (strcmp (name, "call") == 0)
271 return "__nvptx_call";
272 if (strcmp (name, "malloc") == 0)
273 return "__nvptx_malloc";
274 if (strcmp (name, "free") == 0)
275 return "__nvptx_free";
276 if (strcmp (name, "realloc") == 0)
277 return "__nvptx_realloc";
278 return name;
281 /* If MODE should be treated as two registers of an inner mode, return
282 that inner mode. Otherwise return VOIDmode. */
284 static machine_mode
285 maybe_split_mode (machine_mode mode)
287 if (COMPLEX_MODE_P (mode))
288 return GET_MODE_INNER (mode);
290 if (mode == TImode)
291 return DImode;
293 return VOIDmode;
296 /* Output a register, subreg, or register pair (with optional
297 enclosing braces). */
299 static void
300 output_reg (FILE *file, unsigned regno, machine_mode inner_mode,
301 int subreg_offset = -1)
303 if (inner_mode == VOIDmode)
305 if (HARD_REGISTER_NUM_P (regno))
306 fprintf (file, "%s", reg_names[regno]);
307 else
308 fprintf (file, "%%r%d", regno);
310 else if (subreg_offset >= 0)
312 output_reg (file, regno, VOIDmode);
313 fprintf (file, "$%d", subreg_offset);
315 else
317 if (subreg_offset == -1)
318 fprintf (file, "{");
319 output_reg (file, regno, inner_mode, GET_MODE_SIZE (inner_mode));
320 fprintf (file, ",");
321 output_reg (file, regno, inner_mode, 0);
322 if (subreg_offset == -1)
323 fprintf (file, "}");
327 /* Emit forking instructions for MASK. */
329 static void
330 nvptx_emit_forking (unsigned mask, bool is_call)
332 mask &= (GOMP_DIM_MASK (GOMP_DIM_WORKER)
333 | GOMP_DIM_MASK (GOMP_DIM_VECTOR));
334 if (mask)
336 rtx op = GEN_INT (mask | (is_call << GOMP_DIM_MAX));
338 /* Emit fork at all levels. This helps form SESE regions, as
339 it creates a block with a single successor before entering a
340 partitooned region. That is a good candidate for the end of
341 an SESE region. */
342 if (!is_call)
343 emit_insn (gen_nvptx_fork (op));
344 emit_insn (gen_nvptx_forked (op));
348 /* Emit joining instructions for MASK. */
350 static void
351 nvptx_emit_joining (unsigned mask, bool is_call)
353 mask &= (GOMP_DIM_MASK (GOMP_DIM_WORKER)
354 | GOMP_DIM_MASK (GOMP_DIM_VECTOR));
355 if (mask)
357 rtx op = GEN_INT (mask | (is_call << GOMP_DIM_MAX));
359 /* Emit joining for all non-call pars to ensure there's a single
360 predecessor for the block the join insn ends up in. This is
361 needed for skipping entire loops. */
362 if (!is_call)
363 emit_insn (gen_nvptx_joining (op));
364 emit_insn (gen_nvptx_join (op));
368 #define PASS_IN_REG_P(MODE, TYPE) \
369 ((GET_MODE_CLASS (MODE) == MODE_INT \
370 || GET_MODE_CLASS (MODE) == MODE_FLOAT \
371 || ((GET_MODE_CLASS (MODE) == MODE_COMPLEX_INT \
372 || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT) \
373 && !AGGREGATE_TYPE_P (TYPE))) \
374 && (MODE) != TImode)
376 #define RETURN_IN_REG_P(MODE) \
377 ((GET_MODE_CLASS (MODE) == MODE_INT \
378 || GET_MODE_CLASS (MODE) == MODE_FLOAT) \
379 && GET_MODE_SIZE (MODE) <= 8)
381 /* Perform a mode promotion for a function argument with MODE. Return
382 the promoted mode. */
384 static machine_mode
385 arg_promotion (machine_mode mode)
387 if (mode == QImode || mode == HImode)
388 return SImode;
389 return mode;
392 /* Process function parameter TYPE, either emitting in a prototype
393 argument, or as a copy a in a function prologue. ARGNO is the
394 index of this argument in the PTX function. FOR_REG is negative,
395 if we're emitting the PTX prototype. It is zero if we're copying
396 to an argument register and it is greater than zero if we're
397 copying to a specific hard register. PROTOTYPED is true, if this
398 is a prototyped function, rather than an old-style C declaration.
400 The behaviour here must match the regular GCC function parameter
401 marshalling machinery. */
403 static int
404 write_one_arg (std::stringstream &s, int for_reg, int argno,
405 tree type, bool prototyped)
407 machine_mode mode = TYPE_MODE (type);
409 if (!PASS_IN_REG_P (mode, type))
410 mode = Pmode;
412 machine_mode split = maybe_split_mode (mode);
413 if (split != VOIDmode)
415 mode = split;
416 argno = write_one_arg (s, for_reg, argno,
417 TREE_TYPE (type), prototyped);
420 if (!prototyped && !AGGREGATE_TYPE_P (type))
422 if (mode == SFmode)
423 mode = DFmode;
424 mode = arg_promotion (mode);
427 if (for_reg < 0)
429 /* Writing PTX prototype. */
430 s << (argno ? ", " : " (");
431 s << ".param" << nvptx_ptx_type_from_mode (mode, false)
432 << " %in_ar" << argno;
433 if (mode == QImode || mode == HImode)
434 s << "[1]";
436 else
438 mode = arg_promotion (mode);
439 s << "\t.reg" << nvptx_ptx_type_from_mode (mode, false) << " ";
440 if (for_reg)
441 s << reg_names[for_reg];
442 else
443 s << "%ar" << argno;
444 s << ";\n";
445 s << "\tld.param" << nvptx_ptx_type_from_mode (mode, false) << " ";
446 if (for_reg)
447 s << reg_names[for_reg];
448 else
449 s << "%ar" << argno;
450 s<< ", [%in_ar" << argno << "];\n";
452 return argno + 1;
455 /* Look for attributes in ATTRS that would indicate we must write a function
456 as a .entry kernel rather than a .func. Return true if one is found. */
458 static bool
459 write_as_kernel (tree attrs)
461 return (lookup_attribute ("kernel", attrs) != NULL_TREE
462 || lookup_attribute ("omp target entrypoint", attrs) != NULL_TREE);
465 /* Emit a linker marker for a function decl or defn. */
467 static void
468 write_fn_marker (std::stringstream &s, bool is_defn, bool globalize,
469 const char *name)
471 s << "\n// BEGIN";
472 if (globalize)
473 s << " GLOBAL";
474 s << " FUNCTION " << (is_defn ? "DEF: " : "DECL: ");
475 s << name << "\n";
478 /* Emit a linker marker for a variable decl or defn. */
480 static void
481 write_var_marker (FILE *file, bool is_defn, bool globalize, const char *name)
483 fprintf (file, "\n// BEGIN%s VAR %s: ",
484 globalize ? " GLOBAL" : "",
485 is_defn ? "DEF" : "DECL");
486 assemble_name_raw (file, name);
487 fputs ("\n", file);
490 /* Write a .func or .kernel declaration or definition along with
491 a helper comment for use by ld. S is the stream to write to, DECL
492 the decl for the function with name NAME. For definitions, emit
493 a declaration too. */
495 static const char *
496 write_fn_proto (std::stringstream &s, bool is_defn,
497 const char *name, const_tree decl)
499 if (is_defn)
500 /* Emit a declaration. The PTX assembler gets upset without it. */
501 name = write_fn_proto (s, false, name, decl);
502 else
504 /* Avoid repeating the name replacement. */
505 name = nvptx_name_replacement (name);
506 if (name[0] == '*')
507 name++;
510 write_fn_marker (s, is_defn, TREE_PUBLIC (decl), name);
512 /* PTX declaration. */
513 if (DECL_EXTERNAL (decl))
514 s << ".extern ";
515 else if (TREE_PUBLIC (decl))
516 s << (DECL_WEAK (decl) ? ".weak " : ".visible ");
517 s << (write_as_kernel (DECL_ATTRIBUTES (decl)) ? ".entry " : ".func ");
519 tree fntype = TREE_TYPE (decl);
520 tree result_type = TREE_TYPE (fntype);
522 /* Declare the result. */
523 bool return_in_mem = false;
524 if (TYPE_MODE (result_type) != VOIDmode)
526 machine_mode mode = TYPE_MODE (result_type);
527 if (!RETURN_IN_REG_P (mode))
528 return_in_mem = true;
529 else
531 mode = arg_promotion (mode);
532 s << "(.param" << nvptx_ptx_type_from_mode (mode, false)
533 << " %out_retval) ";
537 s << name;
539 int argno = 0;
541 /* Emit argument list. */
542 if (return_in_mem)
543 argno = write_one_arg (s, -1, argno, ptr_type_node, true);
545 /* We get:
546 NULL in TYPE_ARG_TYPES, for old-style functions
547 NULL in DECL_ARGUMENTS, for builtin functions without another
548 declaration.
549 So we have to pick the best one we have. */
550 tree args = TYPE_ARG_TYPES (fntype);
551 bool prototyped = true;
552 if (!args)
554 args = DECL_ARGUMENTS (decl);
555 prototyped = false;
558 for (; args; args = TREE_CHAIN (args))
560 tree type = prototyped ? TREE_VALUE (args) : TREE_TYPE (args);
562 if (type != void_type_node)
563 argno = write_one_arg (s, -1, argno, type, prototyped);
566 if (stdarg_p (fntype))
567 argno = write_one_arg (s, -1, argno, ptr_type_node, true);
569 if (DECL_STATIC_CHAIN (decl))
570 argno = write_one_arg (s, -1, argno, ptr_type_node, true);
572 if (!argno && strcmp (name, "main") == 0)
574 argno = write_one_arg (s, -1, argno, integer_type_node, true);
575 argno = write_one_arg (s, -1, argno, ptr_type_node, true);
578 if (argno)
579 s << ")";
581 s << (is_defn ? "\n" : ";\n");
583 return name;
586 /* Construct a function declaration from a call insn. This can be
587 necessary for two reasons - either we have an indirect call which
588 requires a .callprototype declaration, or we have a libcall
589 generated by emit_library_call for which no decl exists. */
591 static void
592 write_fn_proto_from_insn (std::stringstream &s, const char *name,
593 rtx result, rtx pat)
595 if (!name)
597 s << "\t.callprototype ";
598 name = "_";
600 else
602 name = nvptx_name_replacement (name);
603 write_fn_marker (s, false, true, name);
604 s << "\t.extern .func ";
607 if (result != NULL_RTX)
608 s << "(.param"
609 << nvptx_ptx_type_from_mode (arg_promotion (GET_MODE (result)), false)
610 << " %rval) ";
612 s << name;
614 const char *sep = " (";
615 int arg_end = XVECLEN (pat, 0);
616 for (int i = 1; i < arg_end; i++)
618 /* We don't have to deal with mode splitting here, as that was
619 already done when generating the call sequence. */
620 machine_mode mode = GET_MODE (XEXP (XVECEXP (pat, 0, i), 0));
622 s << sep
623 << ".param"
624 << nvptx_ptx_type_from_mode (mode, false)
625 << " %arg"
626 << i;
627 if (mode == QImode || mode == HImode)
628 s << "[1]";
629 sep = ", ";
631 if (arg_end != 1)
632 s << ")";
633 s << ";\n";
636 /* DECL is an external FUNCTION_DECL, make sure its in the fndecl hash
637 table and and write a ptx prototype. These are emitted at end of
638 compilation. */
640 static void
641 nvptx_record_fndecl (tree decl)
643 tree *slot = declared_fndecls_htab->find_slot (decl, INSERT);
644 if (*slot == NULL)
646 *slot = decl;
647 const char *name = get_fnname_from_decl (decl);
648 write_fn_proto (func_decls, false, name, decl);
652 /* Record a libcall or unprototyped external function. CALLEE is the
653 SYMBOL_REF. Insert into the libfunc hash table and emit a ptx
654 declaration for it. */
656 static void
657 nvptx_record_libfunc (rtx callee, rtx retval, rtx pat)
659 rtx *slot = declared_libfuncs_htab->find_slot (callee, INSERT);
660 if (*slot == NULL)
662 *slot = callee;
664 const char *name = XSTR (callee, 0);
665 write_fn_proto_from_insn (func_decls, name, retval, pat);
669 /* DECL is an external FUNCTION_DECL, that we're referencing. If it
670 is prototyped, record it now. Otherwise record it as needed at end
671 of compilation, when we might have more information about it. */
673 void
674 nvptx_record_needed_fndecl (tree decl)
676 if (TYPE_ARG_TYPES (TREE_TYPE (decl)) == NULL_TREE)
678 tree *slot = needed_fndecls_htab->find_slot (decl, INSERT);
679 if (*slot == NULL)
680 *slot = decl;
682 else
683 nvptx_record_fndecl (decl);
686 /* SYM is a SYMBOL_REF. If it refers to an external function, record
687 it as needed. */
689 static void
690 nvptx_maybe_record_fnsym (rtx sym)
692 tree decl = SYMBOL_REF_DECL (sym);
694 if (decl && TREE_CODE (decl) == FUNCTION_DECL && DECL_EXTERNAL (decl))
695 nvptx_record_needed_fndecl (decl);
698 /* Emit code to initialize the REGNO predicate register to indicate
699 whether we are not lane zero on the NAME axis. */
701 static void
702 nvptx_init_axis_predicate (FILE *file, int regno, const char *name)
704 fprintf (file, "\t{\n");
705 fprintf (file, "\t\t.reg.u32\t%%%s;\n", name);
706 fprintf (file, "\t\tmov.u32\t%%%s, %%tid.%s;\n", name, name);
707 fprintf (file, "\t\tsetp.ne.u32\t%%r%d, %%%s, 0;\n", regno, name);
708 fprintf (file, "\t}\n");
711 /* Implement ASM_DECLARE_FUNCTION_NAME. Writes the start of a ptx
712 function, including local var decls and copies from the arguments to
713 local regs. */
715 void
716 nvptx_declare_function_name (FILE *file, const char *name, const_tree decl)
718 tree fntype = TREE_TYPE (decl);
719 tree result_type = TREE_TYPE (fntype);
720 int argno = 0;
722 /* We construct the initial part of the function into a string
723 stream, in order to share the prototype writing code. */
724 std::stringstream s;
725 write_fn_proto (s, true, name, decl);
726 s << "{\n";
728 bool return_in_mem = (TYPE_MODE (result_type) != VOIDmode
729 && !RETURN_IN_REG_P (TYPE_MODE (result_type)));
730 if (return_in_mem)
731 argno = write_one_arg (s, 0, argno, ptr_type_node, true);
733 /* Declare and initialize incoming arguments. */
734 tree args = TYPE_ARG_TYPES (fntype);
735 bool prototyped = true;
736 if (!args)
738 args = DECL_ARGUMENTS (decl);
739 prototyped = false;
742 for (; args != NULL_TREE; args = TREE_CHAIN (args))
744 tree type = prototyped ? TREE_VALUE (args) : TREE_TYPE (args);
746 if (type != void_type_node)
747 argno = write_one_arg (s, 0, argno, type, prototyped);
750 if (stdarg_p (fntype))
751 argno = write_one_arg (s, ARG_POINTER_REGNUM, argno, ptr_type_node, true);
753 if (DECL_STATIC_CHAIN (decl))
754 argno = write_one_arg (s, STATIC_CHAIN_REGNUM, argno, ptr_type_node, true);
756 fprintf (file, "%s", s.str().c_str());
758 /* C++11 ABI causes us to return a reference to the passed in
759 pointer for return_in_mem. */
760 if (cfun->machine->ret_reg_mode != VOIDmode)
762 machine_mode mode = arg_promotion
763 ((machine_mode)cfun->machine->ret_reg_mode);
764 fprintf (file, "\t.reg%s %%retval;\n",
765 nvptx_ptx_type_from_mode (mode, false));
768 fprintf (file, "\t.reg.u%d %s;\n", GET_MODE_BITSIZE (Pmode),
769 reg_names[OUTGOING_STATIC_CHAIN_REGNUM]);
771 /* Declare the pseudos we have as ptx registers. */
772 int maxregs = max_reg_num ();
773 for (int i = LAST_VIRTUAL_REGISTER + 1; i < maxregs; i++)
775 if (regno_reg_rtx[i] != const0_rtx)
777 machine_mode mode = PSEUDO_REGNO_MODE (i);
778 machine_mode split = maybe_split_mode (mode);
780 if (split != VOIDmode)
781 mode = split;
782 fprintf (file, "\t.reg%s ", nvptx_ptx_type_from_mode (mode, true));
783 output_reg (file, i, split, -2);
784 fprintf (file, ";\n");
788 /* The only reason we might be using outgoing args is if we call a stdargs
789 function. Allocate the space for this. If we called varargs functions
790 without passing any variadic arguments, we'll see a reference to outargs
791 even with a zero outgoing_args_size. */
792 HOST_WIDE_INT sz = crtl->outgoing_args_size;
793 if (sz == 0)
794 sz = 1;
795 if (cfun->machine->has_call_with_varargs)
797 fprintf (file, "\t.reg.u%d %%outargs;\n"
798 "\t.local.align 8 .b8 %%outargs_ar["
799 HOST_WIDE_INT_PRINT_DEC"];\n",
800 BITS_PER_WORD, sz);
801 fprintf (file, "\tcvta.local.u%d %%outargs, %%outargs_ar;\n",
802 BITS_PER_WORD);
805 /* Declare a local variable for the frame. */
806 sz = get_frame_size ();
807 if (sz > 0 || cfun->machine->has_call_with_sc)
809 int alignment = crtl->stack_alignment_needed / BITS_PER_UNIT;
811 fprintf (file, "\t.reg.u%d %%frame;\n"
812 "\t.local.align %d .b8 %%farray[" HOST_WIDE_INT_PRINT_DEC"];\n",
813 BITS_PER_WORD, alignment, sz == 0 ? 1 : sz);
814 fprintf (file, "\tcvta.local.u%d %%frame, %%farray;\n",
815 BITS_PER_WORD);
818 /* Emit axis predicates. */
819 if (cfun->machine->axis_predicate[0])
820 nvptx_init_axis_predicate (file,
821 REGNO (cfun->machine->axis_predicate[0]), "y");
822 if (cfun->machine->axis_predicate[1])
823 nvptx_init_axis_predicate (file,
824 REGNO (cfun->machine->axis_predicate[1]), "x");
827 /* Output a return instruction. Also copy the return value to its outgoing
828 location. */
830 const char *
831 nvptx_output_return (void)
833 machine_mode mode = (machine_mode)cfun->machine->ret_reg_mode;
835 if (mode != VOIDmode)
837 mode = arg_promotion (mode);
838 fprintf (asm_out_file, "\tst.param%s\t[%%out_retval], %%retval;\n",
839 nvptx_ptx_type_from_mode (mode, false));
842 return "ret;";
845 /* Terminate a function by writing a closing brace to FILE. */
847 void
848 nvptx_function_end (FILE *file)
850 fprintf (file, "}\n");
853 /* Decide whether we can make a sibling call to a function. For ptx, we
854 can't. */
856 static bool
857 nvptx_function_ok_for_sibcall (tree, tree)
859 return false;
862 /* Return Dynamic ReAlignment Pointer RTX. For PTX there isn't any. */
864 static rtx
865 nvptx_get_drap_rtx (void)
867 return NULL_RTX;
870 /* Implement the TARGET_CALL_ARGS hook. Record information about one
871 argument to the next call. */
873 static void
874 nvptx_call_args (rtx arg, tree funtype)
876 if (cfun->machine->start_call == NULL_RTX)
878 cfun->machine->call_args = NULL;
879 cfun->machine->funtype = funtype;
880 cfun->machine->start_call = const0_rtx;
882 if (arg == pc_rtx)
883 return;
885 rtx_expr_list *args_so_far = cfun->machine->call_args;
886 if (REG_P (arg))
887 cfun->machine->call_args = alloc_EXPR_LIST (VOIDmode, arg, args_so_far);
890 /* Implement the corresponding END_CALL_ARGS hook. Clear and free the
891 information we recorded. */
893 static void
894 nvptx_end_call_args (void)
896 cfun->machine->start_call = NULL_RTX;
897 free_EXPR_LIST_list (&cfun->machine->call_args);
900 /* Emit the sequence for a call to ADDRESS, setting RETVAL. Keep
901 track of whether calls involving static chains or varargs were seen
902 in the current function.
903 For libcalls, maintain a hash table of decls we have seen, and
904 record a function decl for later when encountering a new one. */
906 void
907 nvptx_expand_call (rtx retval, rtx address)
909 int nargs = 0;
910 rtx callee = XEXP (address, 0);
911 rtx pat, t;
912 rtvec vec;
913 rtx varargs = NULL_RTX;
914 unsigned parallel = 0;
916 for (t = cfun->machine->call_args; t; t = XEXP (t, 1))
917 nargs++;
919 if (!call_insn_operand (callee, Pmode))
921 callee = force_reg (Pmode, callee);
922 address = change_address (address, QImode, callee);
925 if (GET_CODE (callee) == SYMBOL_REF)
927 tree decl = SYMBOL_REF_DECL (callee);
928 if (decl != NULL_TREE)
930 if (DECL_STATIC_CHAIN (decl))
931 cfun->machine->has_call_with_sc = true;
933 tree attr = get_oacc_fn_attrib (decl);
934 if (attr)
936 tree dims = TREE_VALUE (attr);
938 parallel = GOMP_DIM_MASK (GOMP_DIM_MAX) - 1;
939 for (int ix = 0; ix != GOMP_DIM_MAX; ix++)
941 if (TREE_PURPOSE (dims)
942 && !integer_zerop (TREE_PURPOSE (dims)))
943 break;
944 /* Not on this axis. */
945 parallel ^= GOMP_DIM_MASK (ix);
946 dims = TREE_CHAIN (dims);
952 if (cfun->machine->funtype
953 /* It's possible to construct testcases where we call a variable.
954 See compile/20020129-1.c. stdarg_p will crash so avoid calling it
955 in such a case. */
956 && (TREE_CODE (cfun->machine->funtype) == FUNCTION_TYPE
957 || TREE_CODE (cfun->machine->funtype) == METHOD_TYPE)
958 && stdarg_p (cfun->machine->funtype))
960 varargs = gen_reg_rtx (Pmode);
961 emit_move_insn (varargs, stack_pointer_rtx);
962 cfun->machine->has_call_with_varargs = true;
964 vec = rtvec_alloc (nargs + 1 + (varargs ? 1 : 0));
965 pat = gen_rtx_PARALLEL (VOIDmode, vec);
967 int vec_pos = 0;
969 rtx tmp_retval = retval;
970 t = gen_rtx_CALL (VOIDmode, address, const0_rtx);
971 if (retval != NULL_RTX)
973 if (!nvptx_register_operand (retval, GET_MODE (retval)))
974 tmp_retval = gen_reg_rtx (GET_MODE (retval));
975 t = gen_rtx_SET (tmp_retval, t);
977 XVECEXP (pat, 0, vec_pos++) = t;
979 /* Construct the call insn, including a USE for each argument pseudo
980 register. These will be used when printing the insn. */
981 for (rtx arg = cfun->machine->call_args; arg; arg = XEXP (arg, 1))
983 rtx this_arg = XEXP (arg, 0);
984 XVECEXP (pat, 0, vec_pos++) = gen_rtx_USE (VOIDmode, this_arg);
987 if (varargs)
988 XVECEXP (pat, 0, vec_pos++) = gen_rtx_USE (VOIDmode, varargs);
990 gcc_assert (vec_pos = XVECLEN (pat, 0));
992 nvptx_emit_forking (parallel, true);
993 emit_call_insn (pat);
994 nvptx_emit_joining (parallel, true);
996 if (tmp_retval != retval)
997 emit_move_insn (retval, tmp_retval);
1000 /* Implement TARGET_FUNCTION_ARG. */
1002 static rtx
1003 nvptx_function_arg (cumulative_args_t, machine_mode mode,
1004 const_tree, bool named)
1006 if (mode == VOIDmode)
1007 return NULL_RTX;
1009 if (named)
1010 return gen_reg_rtx (mode);
1011 return NULL_RTX;
1014 /* Implement TARGET_FUNCTION_INCOMING_ARG. */
1016 static rtx
1017 nvptx_function_incoming_arg (cumulative_args_t cum_v, machine_mode mode,
1018 const_tree, bool named)
1020 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1021 if (mode == VOIDmode)
1022 return NULL_RTX;
1024 if (!named)
1025 return NULL_RTX;
1027 /* No need to deal with split modes here, the only case that can
1028 happen is complex modes and those are dealt with by
1029 TARGET_SPLIT_COMPLEX_ARG. */
1030 return gen_rtx_UNSPEC (mode,
1031 gen_rtvec (1, GEN_INT (cum->count)),
1032 UNSPEC_ARG_REG);
1035 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
1037 static void
1038 nvptx_function_arg_advance (cumulative_args_t cum_v,
1039 machine_mode ARG_UNUSED (mode),
1040 const_tree ARG_UNUSED (type),
1041 bool ARG_UNUSED (named))
1043 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1044 cum->count++;
1047 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook.
1049 For nvptx, we know how to handle functions declared as stdarg: by
1050 passing an extra pointer to the unnamed arguments. However, the
1051 Fortran frontend can produce a different situation, where a
1052 function pointer is declared with no arguments, but the actual
1053 function and calls to it take more arguments. In that case, we
1054 want to ensure the call matches the definition of the function. */
1056 static bool
1057 nvptx_strict_argument_naming (cumulative_args_t cum_v)
1059 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1060 return cum->fntype == NULL_TREE || stdarg_p (cum->fntype);
1063 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. */
1065 static unsigned int
1066 nvptx_function_arg_boundary (machine_mode mode, const_tree type)
1068 unsigned int boundary = type ? TYPE_ALIGN (type) : GET_MODE_BITSIZE (mode);
1070 if (boundary > BITS_PER_WORD)
1071 return 2 * BITS_PER_WORD;
1073 if (mode == BLKmode)
1075 HOST_WIDE_INT size = int_size_in_bytes (type);
1076 if (size > 4)
1077 return 2 * BITS_PER_WORD;
1078 if (boundary < BITS_PER_WORD)
1080 if (size >= 3)
1081 return BITS_PER_WORD;
1082 if (size >= 2)
1083 return 2 * BITS_PER_UNIT;
1086 return boundary;
1089 /* TARGET_FUNCTION_VALUE implementation. Returns an RTX representing the place
1090 where function FUNC returns or receives a value of data type TYPE. */
1092 static rtx
1093 nvptx_function_value (const_tree type, const_tree func ATTRIBUTE_UNUSED,
1094 bool outgoing)
1096 int unsignedp = TYPE_UNSIGNED (type);
1097 machine_mode orig_mode = TYPE_MODE (type);
1098 machine_mode mode = promote_function_mode (type, orig_mode,
1099 &unsignedp, NULL_TREE, 1);
1100 if (outgoing)
1101 return gen_rtx_REG (mode, NVPTX_RETURN_REGNUM);
1102 if (cfun->machine->start_call == NULL_RTX)
1103 /* Pretend to return in a hard reg for early uses before pseudos can be
1104 generated. */
1105 return gen_rtx_REG (mode, NVPTX_RETURN_REGNUM);
1106 return gen_reg_rtx (mode);
1109 /* Implement TARGET_LIBCALL_VALUE. */
1111 static rtx
1112 nvptx_libcall_value (machine_mode mode, const_rtx)
1114 if (cfun->machine->start_call == NULL_RTX)
1115 /* Pretend to return in a hard reg for early uses before pseudos can be
1116 generated. */
1117 return gen_rtx_REG (mode, NVPTX_RETURN_REGNUM);
1118 return gen_reg_rtx (mode);
1121 /* Implement TARGET_FUNCTION_VALUE_REGNO_P. */
1123 static bool
1124 nvptx_function_value_regno_p (const unsigned int regno)
1126 return regno == NVPTX_RETURN_REGNUM;
1129 /* Types with a mode other than those supported by the machine are passed by
1130 reference in memory. */
1132 static bool
1133 nvptx_pass_by_reference (cumulative_args_t, machine_mode mode,
1134 const_tree type, bool)
1136 return !PASS_IN_REG_P (mode, type);
1139 /* Implement TARGET_RETURN_IN_MEMORY. */
1141 static bool
1142 nvptx_return_in_memory (const_tree type, const_tree)
1144 machine_mode mode = TYPE_MODE (type);
1145 if (!RETURN_IN_REG_P (mode))
1146 return true;
1147 return false;
1150 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
1152 static machine_mode
1153 nvptx_promote_function_mode (const_tree type, machine_mode mode,
1154 int *punsignedp,
1155 const_tree funtype, int for_return)
1157 if (type == NULL_TREE)
1158 return mode;
1159 if (for_return)
1160 return promote_mode (type, mode, punsignedp);
1161 /* For K&R-style functions, try to match the language promotion rules to
1162 minimize type mismatches at assembly time. */
1163 if (TYPE_ARG_TYPES (funtype) == NULL_TREE
1164 && type != NULL_TREE
1165 && !AGGREGATE_TYPE_P (type))
1167 if (mode == SFmode)
1168 mode = DFmode;
1169 mode = arg_promotion (mode);
1172 return mode;
1175 /* Implement TARGET_STATIC_CHAIN. */
1177 static rtx
1178 nvptx_static_chain (const_tree fndecl, bool incoming_p)
1180 if (!DECL_STATIC_CHAIN (fndecl))
1181 return NULL;
1183 if (incoming_p)
1184 return gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
1185 else
1186 return gen_rtx_REG (Pmode, OUTGOING_STATIC_CHAIN_REGNUM);
1189 /* Emit a comparison COMPARE, and return the new test to be used in the
1190 jump. */
1193 nvptx_expand_compare (rtx compare)
1195 rtx pred = gen_reg_rtx (BImode);
1196 rtx cmp = gen_rtx_fmt_ee (GET_CODE (compare), BImode,
1197 XEXP (compare, 0), XEXP (compare, 1));
1198 emit_insn (gen_rtx_SET (pred, cmp));
1199 return gen_rtx_NE (BImode, pred, const0_rtx);
1202 /* Expand the oacc fork & join primitive into ptx-required unspecs. */
1204 void
1205 nvptx_expand_oacc_fork (unsigned mode)
1207 nvptx_emit_forking (GOMP_DIM_MASK (mode), false);
1210 void
1211 nvptx_expand_oacc_join (unsigned mode)
1213 nvptx_emit_joining (GOMP_DIM_MASK (mode), false);
1216 /* Generate instruction(s) to unpack a 64 bit object into 2 32 bit
1217 objects. */
1219 static rtx
1220 nvptx_gen_unpack (rtx dst0, rtx dst1, rtx src)
1222 rtx res;
1224 switch (GET_MODE (src))
1226 case DImode:
1227 res = gen_unpackdisi2 (dst0, dst1, src);
1228 break;
1229 case DFmode:
1230 res = gen_unpackdfsi2 (dst0, dst1, src);
1231 break;
1232 default: gcc_unreachable ();
1234 return res;
1237 /* Generate instruction(s) to pack 2 32 bit objects into a 64 bit
1238 object. */
1240 static rtx
1241 nvptx_gen_pack (rtx dst, rtx src0, rtx src1)
1243 rtx res;
1245 switch (GET_MODE (dst))
1247 case DImode:
1248 res = gen_packsidi2 (dst, src0, src1);
1249 break;
1250 case DFmode:
1251 res = gen_packsidf2 (dst, src0, src1);
1252 break;
1253 default: gcc_unreachable ();
1255 return res;
1258 /* Generate an instruction or sequence to broadcast register REG
1259 across the vectors of a single warp. */
1261 static rtx
1262 nvptx_gen_shuffle (rtx dst, rtx src, rtx idx, nvptx_shuffle_kind kind)
1264 rtx res;
1266 switch (GET_MODE (dst))
1268 case SImode:
1269 res = gen_nvptx_shufflesi (dst, src, idx, GEN_INT (kind));
1270 break;
1271 case SFmode:
1272 res = gen_nvptx_shufflesf (dst, src, idx, GEN_INT (kind));
1273 break;
1274 case DImode:
1275 case DFmode:
1277 rtx tmp0 = gen_reg_rtx (SImode);
1278 rtx tmp1 = gen_reg_rtx (SImode);
1280 start_sequence ();
1281 emit_insn (nvptx_gen_unpack (tmp0, tmp1, src));
1282 emit_insn (nvptx_gen_shuffle (tmp0, tmp0, idx, kind));
1283 emit_insn (nvptx_gen_shuffle (tmp1, tmp1, idx, kind));
1284 emit_insn (nvptx_gen_pack (dst, tmp0, tmp1));
1285 res = get_insns ();
1286 end_sequence ();
1288 break;
1289 case BImode:
1291 rtx tmp = gen_reg_rtx (SImode);
1293 start_sequence ();
1294 emit_insn (gen_sel_truesi (tmp, src, GEN_INT (1), const0_rtx));
1295 emit_insn (nvptx_gen_shuffle (tmp, tmp, idx, kind));
1296 emit_insn (gen_rtx_SET (dst, gen_rtx_NE (BImode, tmp, const0_rtx)));
1297 res = get_insns ();
1298 end_sequence ();
1300 break;
1302 default:
1303 gcc_unreachable ();
1305 return res;
1308 /* Generate an instruction or sequence to broadcast register REG
1309 across the vectors of a single warp. */
1311 static rtx
1312 nvptx_gen_vcast (rtx reg)
1314 return nvptx_gen_shuffle (reg, reg, const0_rtx, SHUFFLE_IDX);
1317 /* Structure used when generating a worker-level spill or fill. */
1319 struct wcast_data_t
1321 rtx base; /* Register holding base addr of buffer. */
1322 rtx ptr; /* Iteration var, if needed. */
1323 unsigned offset; /* Offset into worker buffer. */
1326 /* Direction of the spill/fill and looping setup/teardown indicator. */
1328 enum propagate_mask
1330 PM_read = 1 << 0,
1331 PM_write = 1 << 1,
1332 PM_loop_begin = 1 << 2,
1333 PM_loop_end = 1 << 3,
1335 PM_read_write = PM_read | PM_write
1338 /* Generate instruction(s) to spill or fill register REG to/from the
1339 worker broadcast array. PM indicates what is to be done, REP
1340 how many loop iterations will be executed (0 for not a loop). */
1342 static rtx
1343 nvptx_gen_wcast (rtx reg, propagate_mask pm, unsigned rep, wcast_data_t *data)
1345 rtx res;
1346 machine_mode mode = GET_MODE (reg);
1348 switch (mode)
1350 case BImode:
1352 rtx tmp = gen_reg_rtx (SImode);
1354 start_sequence ();
1355 if (pm & PM_read)
1356 emit_insn (gen_sel_truesi (tmp, reg, GEN_INT (1), const0_rtx));
1357 emit_insn (nvptx_gen_wcast (tmp, pm, rep, data));
1358 if (pm & PM_write)
1359 emit_insn (gen_rtx_SET (reg, gen_rtx_NE (BImode, tmp, const0_rtx)));
1360 res = get_insns ();
1361 end_sequence ();
1363 break;
1365 default:
1367 rtx addr = data->ptr;
1369 if (!addr)
1371 unsigned align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
1373 if (align > worker_bcast_align)
1374 worker_bcast_align = align;
1375 data->offset = (data->offset + align - 1) & ~(align - 1);
1376 addr = data->base;
1377 if (data->offset)
1378 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (data->offset));
1381 addr = gen_rtx_MEM (mode, addr);
1382 addr = gen_rtx_UNSPEC (mode, gen_rtvec (1, addr), UNSPEC_SHARED_DATA);
1383 if (pm == PM_read)
1384 res = gen_rtx_SET (addr, reg);
1385 else if (pm == PM_write)
1386 res = gen_rtx_SET (reg, addr);
1387 else
1388 gcc_unreachable ();
1390 if (data->ptr)
1392 /* We're using a ptr, increment it. */
1393 start_sequence ();
1395 emit_insn (res);
1396 emit_insn (gen_adddi3 (data->ptr, data->ptr,
1397 GEN_INT (GET_MODE_SIZE (GET_MODE (reg)))));
1398 res = get_insns ();
1399 end_sequence ();
1401 else
1402 rep = 1;
1403 data->offset += rep * GET_MODE_SIZE (GET_MODE (reg));
1405 break;
1407 return res;
1410 /* When loading an operand ORIG_OP, verify whether an address space
1411 conversion to generic is required, and if so, perform it. Check
1412 for SYMBOL_REFs and record them if needed. Return either the
1413 original operand, or the converted one. */
1416 nvptx_maybe_convert_symbolic_operand (rtx op)
1418 if (GET_MODE (op) != Pmode)
1419 return op;
1421 rtx sym = op;
1422 if (GET_CODE (sym) == CONST)
1423 sym = XEXP (sym, 0);
1424 if (GET_CODE (sym) == PLUS)
1425 sym = XEXP (sym, 0);
1427 if (GET_CODE (sym) != SYMBOL_REF)
1428 return op;
1430 nvptx_maybe_record_fnsym (sym);
1432 nvptx_data_area area = SYMBOL_DATA_AREA (sym);
1433 if (area == DATA_AREA_GENERIC)
1434 return op;
1436 rtx dest = gen_reg_rtx (Pmode);
1437 emit_insn (gen_rtx_SET (dest,
1438 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op),
1439 UNSPEC_TO_GENERIC)));
1440 return dest;
1443 /* Returns true if X is a valid address for use in a memory reference. */
1445 static bool
1446 nvptx_legitimate_address_p (machine_mode, rtx x, bool)
1448 enum rtx_code code = GET_CODE (x);
1450 switch (code)
1452 case REG:
1453 return true;
1455 case PLUS:
1456 if (REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)))
1457 return true;
1458 return false;
1460 case CONST:
1461 case SYMBOL_REF:
1462 case LABEL_REF:
1463 return true;
1465 default:
1466 return false;
1470 /* Implement HARD_REGNO_MODE_OK. We barely use hard regs, but we want
1471 to ensure that the return register's mode isn't changed. */
1473 bool
1474 nvptx_hard_regno_mode_ok (int regno, machine_mode mode)
1476 if (regno != NVPTX_RETURN_REGNUM
1477 || cfun == NULL || cfun->machine->ret_reg_mode == VOIDmode)
1478 return true;
1479 return mode == cfun->machine->ret_reg_mode;
1482 /* Machinery to output constant initializers. When beginning an initializer,
1483 we decide on a chunk size (which is visible in ptx in the type used), and
1484 then all initializer data is buffered until a chunk is filled and ready to
1485 be written out. */
1487 /* Used when assembling integers to ensure data is emitted in
1488 pieces whose size matches the declaration we printed. */
1489 static unsigned int decl_chunk_size;
1490 static machine_mode decl_chunk_mode;
1491 /* Used in the same situation, to keep track of the byte offset
1492 into the initializer. */
1493 static unsigned HOST_WIDE_INT decl_offset;
1494 /* The initializer part we are currently processing. */
1495 static HOST_WIDE_INT init_part;
1496 /* The total size of the object. */
1497 static unsigned HOST_WIDE_INT object_size;
1498 /* True if we found a skip extending to the end of the object. Used to
1499 assert that no data follows. */
1500 static bool object_finished;
1502 /* Write the necessary separator string to begin a new initializer value. */
1504 static void
1505 begin_decl_field (void)
1507 /* We never see decl_offset at zero by the time we get here. */
1508 if (decl_offset == decl_chunk_size)
1509 fprintf (asm_out_file, " = { ");
1510 else
1511 fprintf (asm_out_file, ", ");
1514 /* Output the currently stored chunk as an initializer value. */
1516 static void
1517 output_decl_chunk (void)
1519 begin_decl_field ();
1520 output_address (VOIDmode, gen_int_mode (init_part, decl_chunk_mode));
1521 init_part = 0;
1524 /* Add value VAL sized SIZE to the data we're emitting, and keep writing
1525 out chunks as they fill up. */
1527 static void
1528 nvptx_assemble_value (HOST_WIDE_INT val, unsigned int size)
1530 unsigned HOST_WIDE_INT chunk_offset = decl_offset % decl_chunk_size;
1531 gcc_assert (!object_finished);
1532 while (size > 0)
1534 int this_part = size;
1535 if (chunk_offset + this_part > decl_chunk_size)
1536 this_part = decl_chunk_size - chunk_offset;
1537 HOST_WIDE_INT val_part;
1538 HOST_WIDE_INT mask = 2;
1539 mask <<= this_part * BITS_PER_UNIT - 1;
1540 val_part = val & (mask - 1);
1541 init_part |= val_part << (BITS_PER_UNIT * chunk_offset);
1542 val >>= BITS_PER_UNIT * this_part;
1543 size -= this_part;
1544 decl_offset += this_part;
1545 if (decl_offset % decl_chunk_size == 0)
1546 output_decl_chunk ();
1548 chunk_offset = 0;
1552 /* Target hook for assembling integer object X of size SIZE. */
1554 static bool
1555 nvptx_assemble_integer (rtx x, unsigned int size, int ARG_UNUSED (aligned_p))
1557 HOST_WIDE_INT val = 0;
1559 switch (GET_CODE (x))
1561 default:
1562 gcc_unreachable ();
1564 case CONST_INT:
1565 val = INTVAL (x);
1566 nvptx_assemble_value (val, size);
1567 break;
1569 case CONST:
1570 x = XEXP (x, 0);
1571 gcc_assert (GET_CODE (x) == PLUS);
1572 val = INTVAL (XEXP (x, 1));
1573 x = XEXP (x, 0);
1574 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1575 /* FALLTHROUGH */
1577 case SYMBOL_REF:
1578 gcc_assert (size = decl_chunk_size);
1579 if (decl_offset % decl_chunk_size != 0)
1580 sorry ("cannot emit unaligned pointers in ptx assembly");
1581 decl_offset += size;
1582 begin_decl_field ();
1584 nvptx_maybe_record_fnsym (x);
1585 fprintf (asm_out_file, "generic(");
1586 output_address (VOIDmode, x);
1587 fprintf (asm_out_file, ")");
1589 if (val)
1590 fprintf (asm_out_file, " + " HOST_WIDE_INT_PRINT_DEC, val);
1591 break;
1594 return true;
1597 /* Output SIZE zero bytes. We ignore the FILE argument since the
1598 functions we're calling to perform the output just use
1599 asm_out_file. */
1601 void
1602 nvptx_output_skip (FILE *, unsigned HOST_WIDE_INT size)
1604 if (decl_offset + size >= object_size)
1606 if (decl_offset % decl_chunk_size != 0)
1607 nvptx_assemble_value (0, decl_chunk_size);
1608 object_finished = true;
1609 return;
1612 while (size > decl_chunk_size)
1614 nvptx_assemble_value (0, decl_chunk_size);
1615 size -= decl_chunk_size;
1617 while (size-- > 0)
1618 nvptx_assemble_value (0, 1);
1621 /* Output a string STR with length SIZE. As in nvptx_output_skip we
1622 ignore the FILE arg. */
1624 void
1625 nvptx_output_ascii (FILE *, const char *str, unsigned HOST_WIDE_INT size)
1627 for (unsigned HOST_WIDE_INT i = 0; i < size; i++)
1628 nvptx_assemble_value (str[i], 1);
1631 /* Emit a PTX variable decl and prepare for emission of its
1632 initializer. NAME is the symbol name and SETION the PTX data
1633 area. The type is TYPE, object size SIZE and alignment is ALIGN.
1634 The caller has already emitted any indentation and linkage
1635 specifier. It is responsible for any initializer, terminating ;
1636 and newline. SIZE is in bytes, ALIGN is in bits -- confusingly
1637 this is the opposite way round that PTX wants them! */
1639 static void
1640 nvptx_assemble_decl_begin (FILE *file, const char *name, const char *section,
1641 const_tree type, HOST_WIDE_INT size, unsigned align)
1643 while (TREE_CODE (type) == ARRAY_TYPE)
1644 type = TREE_TYPE (type);
1646 if (TREE_CODE (type) == VECTOR_TYPE
1647 || TREE_CODE (type) == COMPLEX_TYPE)
1648 /* Neither vector nor complex types can contain the other. */
1649 type = TREE_TYPE (type);
1651 unsigned elt_size = int_size_in_bytes (type);
1653 /* Largest mode we're prepared to accept. For BLKmode types we
1654 don't know if it'll contain pointer constants, so have to choose
1655 pointer size, otherwise we can choose DImode. */
1656 machine_mode elt_mode = TYPE_MODE (type) == BLKmode ? Pmode : DImode;
1658 elt_size |= GET_MODE_SIZE (elt_mode);
1659 elt_size &= -elt_size; /* Extract LSB set. */
1660 elt_mode = mode_for_size (elt_size * BITS_PER_UNIT, MODE_INT, 0);
1662 decl_chunk_size = elt_size;
1663 decl_chunk_mode = elt_mode;
1664 decl_offset = 0;
1665 init_part = 0;
1667 object_size = size;
1668 object_finished = !size;
1670 fprintf (file, "%s .align %d .u%d ",
1671 section, align / BITS_PER_UNIT,
1672 elt_size * BITS_PER_UNIT);
1673 assemble_name (file, name);
1675 if (size)
1676 /* We make everything an array, to simplify any initialization
1677 emission. */
1678 fprintf (file, "[" HOST_WIDE_INT_PRINT_DEC "]",
1679 (size + elt_size - 1) / elt_size);
1682 /* Called when the initializer for a decl has been completely output through
1683 combinations of the three functions above. */
1685 static void
1686 nvptx_assemble_decl_end (void)
1688 if (decl_offset != 0)
1690 if (!object_finished && decl_offset % decl_chunk_size != 0)
1691 nvptx_assemble_value (0, decl_chunk_size);
1693 fprintf (asm_out_file, " }");
1695 fprintf (asm_out_file, ";\n");
1698 /* Output an uninitialized common or file-scope variable. */
1700 void
1701 nvptx_output_aligned_decl (FILE *file, const char *name,
1702 const_tree decl, HOST_WIDE_INT size, unsigned align)
1704 write_var_marker (file, true, TREE_PUBLIC (decl), name);
1706 /* If this is public, it is common. The nearest thing we have to
1707 common is weak. */
1708 fprintf (file, "\t%s", TREE_PUBLIC (decl) ? ".weak " : "");
1710 nvptx_assemble_decl_begin (file, name, section_for_decl (decl),
1711 TREE_TYPE (decl), size, align);
1712 fprintf (file, ";\n");
1715 /* Implement TARGET_ASM_DECLARE_CONSTANT_NAME. Begin the process of
1716 writing a constant variable EXP with NAME and SIZE and its
1717 initializer to FILE. */
1719 static void
1720 nvptx_asm_declare_constant_name (FILE *file, const char *name,
1721 const_tree exp, HOST_WIDE_INT obj_size)
1723 write_var_marker (file, true, false, name);
1725 fprintf (file, "\t");
1727 tree type = TREE_TYPE (exp);
1728 nvptx_assemble_decl_begin (file, name, ".const", type, obj_size,
1729 TYPE_ALIGN (type));
1732 /* Implement the ASM_DECLARE_OBJECT_NAME macro. Used to start writing
1733 a variable DECL with NAME to FILE. */
1735 void
1736 nvptx_declare_object_name (FILE *file, const char *name, const_tree decl)
1738 write_var_marker (file, true, TREE_PUBLIC (decl), name);
1740 fprintf (file, "\t%s", (!TREE_PUBLIC (decl) ? ""
1741 : DECL_WEAK (decl) ? ".weak " : ".visible "));
1743 tree type = TREE_TYPE (decl);
1744 HOST_WIDE_INT obj_size = tree_to_shwi (DECL_SIZE_UNIT (decl));
1745 nvptx_assemble_decl_begin (file, name, section_for_decl (decl),
1746 type, obj_size, DECL_ALIGN (decl));
1749 /* Implement TARGET_ASM_GLOBALIZE_LABEL by doing nothing. */
1751 static void
1752 nvptx_globalize_label (FILE *, const char *)
1756 /* Implement TARGET_ASM_ASSEMBLE_UNDEFINED_DECL. Write an extern
1757 declaration only for variable DECL with NAME to FILE. */
1759 static void
1760 nvptx_assemble_undefined_decl (FILE *file, const char *name, const_tree decl)
1762 write_var_marker (file, false, TREE_PUBLIC (decl), name);
1764 fprintf (file, "\t.extern ");
1765 tree size = DECL_SIZE_UNIT (decl);
1766 nvptx_assemble_decl_begin (file, name, section_for_decl (decl),
1767 TREE_TYPE (decl), size ? tree_to_shwi (size) : 0,
1768 DECL_ALIGN (decl));
1769 fprintf (file, ";\n");
1772 /* Output a pattern for a move instruction. */
1774 const char *
1775 nvptx_output_mov_insn (rtx dst, rtx src)
1777 machine_mode dst_mode = GET_MODE (dst);
1778 machine_mode dst_inner = (GET_CODE (dst) == SUBREG
1779 ? GET_MODE (XEXP (dst, 0)) : dst_mode);
1780 machine_mode src_inner = (GET_CODE (src) == SUBREG
1781 ? GET_MODE (XEXP (src, 0)) : dst_mode);
1783 if (REG_P (dst) && REGNO (dst) == NVPTX_RETURN_REGNUM && dst_mode == HImode)
1784 /* Special handling for the return register. It's never really an
1785 HI object, and only occurs as the destination of a move
1786 insn. */
1787 dst_inner = SImode;
1789 if (src_inner == dst_inner)
1790 return "%.\tmov%t0\t%0, %1;";
1792 if (CONSTANT_P (src))
1793 return (GET_MODE_CLASS (dst_inner) == MODE_INT
1794 && GET_MODE_CLASS (src_inner) != MODE_FLOAT
1795 ? "%.\tmov%t0\t%0, %1;" : "%.\tmov.b%T0\t%0, %1;");
1797 if (GET_MODE_SIZE (dst_inner) == GET_MODE_SIZE (src_inner))
1798 return "%.\tmov.b%T0\t%0, %1;";
1800 return "%.\tcvt%t0%t1\t%0, %1;";
1803 /* Output INSN, which is a call to CALLEE with result RESULT. For ptx, this
1804 involves writing .param declarations and in/out copies into them. For
1805 indirect calls, also write the .callprototype. */
1807 const char *
1808 nvptx_output_call_insn (rtx_insn *insn, rtx result, rtx callee)
1810 char buf[16];
1811 static int labelno;
1812 bool needs_tgt = register_operand (callee, Pmode);
1813 rtx pat = PATTERN (insn);
1814 int arg_end = XVECLEN (pat, 0);
1815 tree decl = NULL_TREE;
1817 fprintf (asm_out_file, "\t{\n");
1818 if (result != NULL)
1819 fprintf (asm_out_file, "\t\t.param%s %%retval_in;\n",
1820 nvptx_ptx_type_from_mode (arg_promotion (GET_MODE (result)),
1821 false));
1823 /* Ensure we have a ptx declaration in the output if necessary. */
1824 if (GET_CODE (callee) == SYMBOL_REF)
1826 decl = SYMBOL_REF_DECL (callee);
1827 if (!decl
1828 || (DECL_EXTERNAL (decl) && !TYPE_ARG_TYPES (TREE_TYPE (decl))))
1829 nvptx_record_libfunc (callee, result, pat);
1830 else if (DECL_EXTERNAL (decl))
1831 nvptx_record_fndecl (decl);
1834 if (needs_tgt)
1836 ASM_GENERATE_INTERNAL_LABEL (buf, "LCT", labelno);
1837 labelno++;
1838 ASM_OUTPUT_LABEL (asm_out_file, buf);
1839 std::stringstream s;
1840 write_fn_proto_from_insn (s, NULL, result, pat);
1841 fputs (s.str().c_str(), asm_out_file);
1844 for (int argno = 1; argno < arg_end; argno++)
1846 rtx t = XEXP (XVECEXP (pat, 0, argno), 0);
1847 machine_mode mode = GET_MODE (t);
1849 /* Mode splitting has already been done. */
1850 fprintf (asm_out_file, "\t\t.param%s %%out_arg%d%s;\n",
1851 nvptx_ptx_type_from_mode (mode, false), argno,
1852 mode == QImode || mode == HImode ? "[1]" : "");
1853 fprintf (asm_out_file, "\t\tst.param%s [%%out_arg%d], %%r%d;\n",
1854 nvptx_ptx_type_from_mode (mode, false), argno,
1855 REGNO (t));
1858 fprintf (asm_out_file, "\t\tcall ");
1859 if (result != NULL_RTX)
1860 fprintf (asm_out_file, "(%%retval_in), ");
1862 if (decl)
1864 const char *name = get_fnname_from_decl (decl);
1865 name = nvptx_name_replacement (name);
1866 assemble_name (asm_out_file, name);
1868 else
1869 output_address (VOIDmode, callee);
1871 const char *open = "(";
1872 for (int argno = 1; argno < arg_end; argno++)
1874 fprintf (asm_out_file, ", %s%%out_arg%d", open, argno);
1875 open = "";
1877 if (decl && DECL_STATIC_CHAIN (decl))
1879 fprintf (asm_out_file, ", %s%s", open,
1880 reg_names [OUTGOING_STATIC_CHAIN_REGNUM]);
1881 open = "";
1883 if (!open[0])
1884 fprintf (asm_out_file, ")");
1886 if (needs_tgt)
1888 fprintf (asm_out_file, ", ");
1889 assemble_name (asm_out_file, buf);
1891 fprintf (asm_out_file, ";\n");
1893 return result != NULL_RTX ? "\tld.param%t0\t%0, [%%retval_in];\n\t}" : "}";
1896 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
1898 static bool
1899 nvptx_print_operand_punct_valid_p (unsigned char c)
1901 return c == '.' || c== '#';
1904 static void nvptx_print_operand (FILE *, rtx, int);
1906 /* Subroutine of nvptx_print_operand; used to print a memory reference X to FILE. */
1908 static void
1909 nvptx_print_address_operand (FILE *file, rtx x, machine_mode)
1911 rtx off;
1912 if (GET_CODE (x) == CONST)
1913 x = XEXP (x, 0);
1914 switch (GET_CODE (x))
1916 case PLUS:
1917 off = XEXP (x, 1);
1918 output_address (VOIDmode, XEXP (x, 0));
1919 fprintf (file, "+");
1920 output_address (VOIDmode, off);
1921 break;
1923 case SYMBOL_REF:
1924 case LABEL_REF:
1925 output_addr_const (file, x);
1926 break;
1928 default:
1929 gcc_assert (GET_CODE (x) != MEM);
1930 nvptx_print_operand (file, x, 0);
1931 break;
1935 /* Write assembly language output for the address ADDR to FILE. */
1937 static void
1938 nvptx_print_operand_address (FILE *file, machine_mode mode, rtx addr)
1940 nvptx_print_address_operand (file, addr, mode);
1943 /* Print an operand, X, to FILE, with an optional modifier in CODE.
1945 Meaning of CODE:
1946 . -- print the predicate for the instruction or an emptry string for an
1947 unconditional one.
1948 # -- print a rounding mode for the instruction
1950 A -- print a data area for a MEM
1951 c -- print an opcode suffix for a comparison operator, including a type code
1952 D -- print a data area for a MEM operand
1953 S -- print a shuffle kind specified by CONST_INT
1954 t -- print a type opcode suffix, promoting QImode to 32 bits
1955 T -- print a type size in bits
1956 u -- print a type opcode suffix without promotions. */
1958 static void
1959 nvptx_print_operand (FILE *file, rtx x, int code)
1961 if (code == '.')
1963 x = current_insn_predicate;
1964 if (x)
1966 unsigned int regno = REGNO (XEXP (x, 0));
1967 fputs ("[", file);
1968 if (GET_CODE (x) == EQ)
1969 fputs ("!", file);
1970 fputs (reg_names [regno], file);
1971 fputs ("]", file);
1973 return;
1975 else if (code == '#')
1977 fputs (".rn", file);
1978 return;
1981 enum rtx_code x_code = GET_CODE (x);
1982 machine_mode mode = GET_MODE (x);
1984 switch (code)
1986 case 'A':
1987 x = XEXP (x, 0);
1988 /* FALLTHROUGH. */
1990 case 'D':
1991 if (GET_CODE (x) == CONST)
1992 x = XEXP (x, 0);
1993 if (GET_CODE (x) == PLUS)
1994 x = XEXP (x, 0);
1996 if (GET_CODE (x) == SYMBOL_REF)
1997 fputs (section_for_sym (x), file);
1998 break;
2000 case 't':
2001 case 'u':
2002 if (x_code == SUBREG)
2004 mode = GET_MODE (SUBREG_REG (x));
2005 if (mode == TImode)
2006 mode = DImode;
2007 else if (COMPLEX_MODE_P (mode))
2008 mode = GET_MODE_INNER (mode);
2010 fprintf (file, "%s", nvptx_ptx_type_from_mode (mode, code == 't'));
2011 break;
2013 case 'S':
2015 nvptx_shuffle_kind kind = (nvptx_shuffle_kind) UINTVAL (x);
2016 /* Same order as nvptx_shuffle_kind. */
2017 static const char *const kinds[] =
2018 {".up", ".down", ".bfly", ".idx"};
2019 fputs (kinds[kind], file);
2021 break;
2023 case 'T':
2024 fprintf (file, "%d", GET_MODE_BITSIZE (mode));
2025 break;
2027 case 'j':
2028 fprintf (file, "@");
2029 goto common;
2031 case 'J':
2032 fprintf (file, "@!");
2033 goto common;
2035 case 'c':
2036 mode = GET_MODE (XEXP (x, 0));
2037 switch (x_code)
2039 case EQ:
2040 fputs (".eq", file);
2041 break;
2042 case NE:
2043 if (FLOAT_MODE_P (mode))
2044 fputs (".neu", file);
2045 else
2046 fputs (".ne", file);
2047 break;
2048 case LE:
2049 fputs (".le", file);
2050 break;
2051 case GE:
2052 fputs (".ge", file);
2053 break;
2054 case LT:
2055 fputs (".lt", file);
2056 break;
2057 case GT:
2058 fputs (".gt", file);
2059 break;
2060 case LEU:
2061 fputs (".ls", file);
2062 break;
2063 case GEU:
2064 fputs (".hs", file);
2065 break;
2066 case LTU:
2067 fputs (".lo", file);
2068 break;
2069 case GTU:
2070 fputs (".hi", file);
2071 break;
2072 case LTGT:
2073 fputs (".ne", file);
2074 break;
2075 case UNEQ:
2076 fputs (".equ", file);
2077 break;
2078 case UNLE:
2079 fputs (".leu", file);
2080 break;
2081 case UNGE:
2082 fputs (".geu", file);
2083 break;
2084 case UNLT:
2085 fputs (".ltu", file);
2086 break;
2087 case UNGT:
2088 fputs (".gtu", file);
2089 break;
2090 case UNORDERED:
2091 fputs (".nan", file);
2092 break;
2093 case ORDERED:
2094 fputs (".num", file);
2095 break;
2096 default:
2097 gcc_unreachable ();
2099 if (FLOAT_MODE_P (mode)
2100 || x_code == EQ || x_code == NE
2101 || x_code == GEU || x_code == GTU
2102 || x_code == LEU || x_code == LTU)
2103 fputs (nvptx_ptx_type_from_mode (mode, true), file);
2104 else
2105 fprintf (file, ".s%d", GET_MODE_BITSIZE (mode));
2106 break;
2107 default:
2108 common:
2109 switch (x_code)
2111 case SUBREG:
2113 rtx inner_x = SUBREG_REG (x);
2114 machine_mode inner_mode = GET_MODE (inner_x);
2115 machine_mode split = maybe_split_mode (inner_mode);
2117 if (split != VOIDmode
2118 && (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode)))
2119 output_reg (file, REGNO (inner_x), split);
2120 else
2121 output_reg (file, REGNO (inner_x), split, SUBREG_BYTE (x));
2123 break;
2125 case REG:
2126 output_reg (file, REGNO (x), maybe_split_mode (mode));
2127 break;
2129 case MEM:
2130 fputc ('[', file);
2131 nvptx_print_address_operand (file, XEXP (x, 0), mode);
2132 fputc (']', file);
2133 break;
2135 case CONST_INT:
2136 output_addr_const (file, x);
2137 break;
2139 case CONST:
2140 case SYMBOL_REF:
2141 case LABEL_REF:
2142 /* We could use output_addr_const, but that can print things like
2143 "x-8", which breaks ptxas. Need to ensure it is output as
2144 "x+-8". */
2145 nvptx_print_address_operand (file, x, VOIDmode);
2146 break;
2148 case CONST_DOUBLE:
2149 long vals[2];
2150 real_to_target (vals, CONST_DOUBLE_REAL_VALUE (x), mode);
2151 vals[0] &= 0xffffffff;
2152 vals[1] &= 0xffffffff;
2153 if (mode == SFmode)
2154 fprintf (file, "0f%08lx", vals[0]);
2155 else
2156 fprintf (file, "0d%08lx%08lx", vals[1], vals[0]);
2157 break;
2159 default:
2160 output_addr_const (file, x);
2165 /* Record replacement regs used to deal with subreg operands. */
2166 struct reg_replace
2168 rtx replacement[MAX_RECOG_OPERANDS];
2169 machine_mode mode;
2170 int n_allocated;
2171 int n_in_use;
2174 /* Allocate or reuse a replacement in R and return the rtx. */
2176 static rtx
2177 get_replacement (struct reg_replace *r)
2179 if (r->n_allocated == r->n_in_use)
2180 r->replacement[r->n_allocated++] = gen_reg_rtx (r->mode);
2181 return r->replacement[r->n_in_use++];
2184 /* Clean up subreg operands. In ptx assembly, everything is typed, and
2185 the presence of subregs would break the rules for most instructions.
2186 Replace them with a suitable new register of the right size, plus
2187 conversion copyin/copyout instructions. */
2189 static void
2190 nvptx_reorg_subreg (void)
2192 struct reg_replace qiregs, hiregs, siregs, diregs;
2193 rtx_insn *insn, *next;
2195 qiregs.n_allocated = 0;
2196 hiregs.n_allocated = 0;
2197 siregs.n_allocated = 0;
2198 diregs.n_allocated = 0;
2199 qiregs.mode = QImode;
2200 hiregs.mode = HImode;
2201 siregs.mode = SImode;
2202 diregs.mode = DImode;
2204 for (insn = get_insns (); insn; insn = next)
2206 next = NEXT_INSN (insn);
2207 if (!NONDEBUG_INSN_P (insn)
2208 || asm_noperands (PATTERN (insn)) >= 0
2209 || GET_CODE (PATTERN (insn)) == USE
2210 || GET_CODE (PATTERN (insn)) == CLOBBER)
2211 continue;
2213 qiregs.n_in_use = 0;
2214 hiregs.n_in_use = 0;
2215 siregs.n_in_use = 0;
2216 diregs.n_in_use = 0;
2217 extract_insn (insn);
2218 enum attr_subregs_ok s_ok = get_attr_subregs_ok (insn);
2220 for (int i = 0; i < recog_data.n_operands; i++)
2222 rtx op = recog_data.operand[i];
2223 if (GET_CODE (op) != SUBREG)
2224 continue;
2226 rtx inner = SUBREG_REG (op);
2228 machine_mode outer_mode = GET_MODE (op);
2229 machine_mode inner_mode = GET_MODE (inner);
2230 gcc_assert (s_ok);
2231 if (s_ok
2232 && (GET_MODE_PRECISION (inner_mode)
2233 >= GET_MODE_PRECISION (outer_mode)))
2234 continue;
2235 gcc_assert (SCALAR_INT_MODE_P (outer_mode));
2236 struct reg_replace *r = (outer_mode == QImode ? &qiregs
2237 : outer_mode == HImode ? &hiregs
2238 : outer_mode == SImode ? &siregs
2239 : &diregs);
2240 rtx new_reg = get_replacement (r);
2242 if (recog_data.operand_type[i] != OP_OUT)
2244 enum rtx_code code;
2245 if (GET_MODE_PRECISION (inner_mode)
2246 < GET_MODE_PRECISION (outer_mode))
2247 code = ZERO_EXTEND;
2248 else
2249 code = TRUNCATE;
2251 rtx pat = gen_rtx_SET (new_reg,
2252 gen_rtx_fmt_e (code, outer_mode, inner));
2253 emit_insn_before (pat, insn);
2256 if (recog_data.operand_type[i] != OP_IN)
2258 enum rtx_code code;
2259 if (GET_MODE_PRECISION (inner_mode)
2260 < GET_MODE_PRECISION (outer_mode))
2261 code = TRUNCATE;
2262 else
2263 code = ZERO_EXTEND;
2265 rtx pat = gen_rtx_SET (inner,
2266 gen_rtx_fmt_e (code, inner_mode, new_reg));
2267 emit_insn_after (pat, insn);
2269 validate_change (insn, recog_data.operand_loc[i], new_reg, false);
2274 /* Loop structure of the function. The entire function is described as
2275 a NULL loop. */
2277 struct parallel
2279 /* Parent parallel. */
2280 parallel *parent;
2282 /* Next sibling parallel. */
2283 parallel *next;
2285 /* First child parallel. */
2286 parallel *inner;
2288 /* Partitioning mask of the parallel. */
2289 unsigned mask;
2291 /* Partitioning used within inner parallels. */
2292 unsigned inner_mask;
2294 /* Location of parallel forked and join. The forked is the first
2295 block in the parallel and the join is the first block after of
2296 the partition. */
2297 basic_block forked_block;
2298 basic_block join_block;
2300 rtx_insn *forked_insn;
2301 rtx_insn *join_insn;
2303 rtx_insn *fork_insn;
2304 rtx_insn *joining_insn;
2306 /* Basic blocks in this parallel, but not in child parallels. The
2307 FORKED and JOINING blocks are in the partition. The FORK and JOIN
2308 blocks are not. */
2309 auto_vec<basic_block> blocks;
2311 public:
2312 parallel (parallel *parent, unsigned mode);
2313 ~parallel ();
2316 /* Constructor links the new parallel into it's parent's chain of
2317 children. */
2319 parallel::parallel (parallel *parent_, unsigned mask_)
2320 :parent (parent_), next (0), inner (0), mask (mask_), inner_mask (0)
2322 forked_block = join_block = 0;
2323 forked_insn = join_insn = 0;
2324 fork_insn = joining_insn = 0;
2326 if (parent)
2328 next = parent->inner;
2329 parent->inner = this;
2333 parallel::~parallel ()
2335 delete inner;
2336 delete next;
2339 /* Map of basic blocks to insns */
2340 typedef hash_map<basic_block, rtx_insn *> bb_insn_map_t;
2342 /* A tuple of an insn of interest and the BB in which it resides. */
2343 typedef std::pair<rtx_insn *, basic_block> insn_bb_t;
2344 typedef auto_vec<insn_bb_t> insn_bb_vec_t;
2346 /* Split basic blocks such that each forked and join unspecs are at
2347 the start of their basic blocks. Thus afterwards each block will
2348 have a single partitioning mode. We also do the same for return
2349 insns, as they are executed by every thread. Return the
2350 partitioning mode of the function as a whole. Populate MAP with
2351 head and tail blocks. We also clear the BB visited flag, which is
2352 used when finding partitions. */
2354 static void
2355 nvptx_split_blocks (bb_insn_map_t *map)
2357 insn_bb_vec_t worklist;
2358 basic_block block;
2359 rtx_insn *insn;
2361 /* Locate all the reorg instructions of interest. */
2362 FOR_ALL_BB_FN (block, cfun)
2364 bool seen_insn = false;
2366 /* Clear visited flag, for use by parallel locator */
2367 block->flags &= ~BB_VISITED;
2369 FOR_BB_INSNS (block, insn)
2371 if (!INSN_P (insn))
2372 continue;
2373 switch (recog_memoized (insn))
2375 default:
2376 seen_insn = true;
2377 continue;
2378 case CODE_FOR_nvptx_forked:
2379 case CODE_FOR_nvptx_join:
2380 break;
2382 case CODE_FOR_return:
2383 /* We also need to split just before return insns, as
2384 that insn needs executing by all threads, but the
2385 block it is in probably does not. */
2386 break;
2389 if (seen_insn)
2390 /* We've found an instruction that must be at the start of
2391 a block, but isn't. Add it to the worklist. */
2392 worklist.safe_push (insn_bb_t (insn, block));
2393 else
2394 /* It was already the first instruction. Just add it to
2395 the map. */
2396 map->get_or_insert (block) = insn;
2397 seen_insn = true;
2401 /* Split blocks on the worklist. */
2402 unsigned ix;
2403 insn_bb_t *elt;
2404 basic_block remap = 0;
2405 for (ix = 0; worklist.iterate (ix, &elt); ix++)
2407 if (remap != elt->second)
2409 block = elt->second;
2410 remap = block;
2413 /* Split block before insn. The insn is in the new block */
2414 edge e = split_block (block, PREV_INSN (elt->first));
2416 block = e->dest;
2417 map->get_or_insert (block) = elt->first;
2421 /* BLOCK is a basic block containing a head or tail instruction.
2422 Locate the associated prehead or pretail instruction, which must be
2423 in the single predecessor block. */
2425 static rtx_insn *
2426 nvptx_discover_pre (basic_block block, int expected)
2428 gcc_assert (block->preds->length () == 1);
2429 basic_block pre_block = (*block->preds)[0]->src;
2430 rtx_insn *pre_insn;
2432 for (pre_insn = BB_END (pre_block); !INSN_P (pre_insn);
2433 pre_insn = PREV_INSN (pre_insn))
2434 gcc_assert (pre_insn != BB_HEAD (pre_block));
2436 gcc_assert (recog_memoized (pre_insn) == expected);
2437 return pre_insn;
2440 /* Dump this parallel and all its inner parallels. */
2442 static void
2443 nvptx_dump_pars (parallel *par, unsigned depth)
2445 fprintf (dump_file, "%u: mask %d head=%d, tail=%d\n",
2446 depth, par->mask,
2447 par->forked_block ? par->forked_block->index : -1,
2448 par->join_block ? par->join_block->index : -1);
2450 fprintf (dump_file, " blocks:");
2452 basic_block block;
2453 for (unsigned ix = 0; par->blocks.iterate (ix, &block); ix++)
2454 fprintf (dump_file, " %d", block->index);
2455 fprintf (dump_file, "\n");
2456 if (par->inner)
2457 nvptx_dump_pars (par->inner, depth + 1);
2459 if (par->next)
2460 nvptx_dump_pars (par->next, depth);
2463 /* If BLOCK contains a fork/join marker, process it to create or
2464 terminate a loop structure. Add this block to the current loop,
2465 and then walk successor blocks. */
2467 static parallel *
2468 nvptx_find_par (bb_insn_map_t *map, parallel *par, basic_block block)
2470 if (block->flags & BB_VISITED)
2471 return par;
2472 block->flags |= BB_VISITED;
2474 if (rtx_insn **endp = map->get (block))
2476 rtx_insn *end = *endp;
2478 /* This is a block head or tail, or return instruction. */
2479 switch (recog_memoized (end))
2481 case CODE_FOR_return:
2482 /* Return instructions are in their own block, and we
2483 don't need to do anything more. */
2484 return par;
2486 case CODE_FOR_nvptx_forked:
2487 /* Loop head, create a new inner loop and add it into
2488 our parent's child list. */
2490 unsigned mask = UINTVAL (XVECEXP (PATTERN (end), 0, 0));
2492 gcc_assert (mask);
2493 par = new parallel (par, mask);
2494 par->forked_block = block;
2495 par->forked_insn = end;
2496 if (!(mask & GOMP_DIM_MASK (GOMP_DIM_MAX))
2497 && (mask & GOMP_DIM_MASK (GOMP_DIM_WORKER)))
2498 par->fork_insn
2499 = nvptx_discover_pre (block, CODE_FOR_nvptx_fork);
2501 break;
2503 case CODE_FOR_nvptx_join:
2504 /* A loop tail. Finish the current loop and return to
2505 parent. */
2507 unsigned mask = UINTVAL (XVECEXP (PATTERN (end), 0, 0));
2509 gcc_assert (par->mask == mask);
2510 par->join_block = block;
2511 par->join_insn = end;
2512 if (!(mask & GOMP_DIM_MASK (GOMP_DIM_MAX))
2513 && (mask & GOMP_DIM_MASK (GOMP_DIM_WORKER)))
2514 par->joining_insn
2515 = nvptx_discover_pre (block, CODE_FOR_nvptx_joining);
2516 par = par->parent;
2518 break;
2520 default:
2521 gcc_unreachable ();
2525 if (par)
2526 /* Add this block onto the current loop's list of blocks. */
2527 par->blocks.safe_push (block);
2528 else
2529 /* This must be the entry block. Create a NULL parallel. */
2530 par = new parallel (0, 0);
2532 /* Walk successor blocks. */
2533 edge e;
2534 edge_iterator ei;
2536 FOR_EACH_EDGE (e, ei, block->succs)
2537 nvptx_find_par (map, par, e->dest);
2539 return par;
2542 /* DFS walk the CFG looking for fork & join markers. Construct
2543 loop structures as we go. MAP is a mapping of basic blocks
2544 to head & tail markers, discovered when splitting blocks. This
2545 speeds up the discovery. We rely on the BB visited flag having
2546 been cleared when splitting blocks. */
2548 static parallel *
2549 nvptx_discover_pars (bb_insn_map_t *map)
2551 basic_block block;
2553 /* Mark exit blocks as visited. */
2554 block = EXIT_BLOCK_PTR_FOR_FN (cfun);
2555 block->flags |= BB_VISITED;
2557 /* And entry block as not. */
2558 block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
2559 block->flags &= ~BB_VISITED;
2561 parallel *par = nvptx_find_par (map, 0, block);
2563 if (dump_file)
2565 fprintf (dump_file, "\nLoops\n");
2566 nvptx_dump_pars (par, 0);
2567 fprintf (dump_file, "\n");
2570 return par;
2573 /* Analyse a group of BBs within a partitioned region and create N
2574 Single-Entry-Single-Exit regions. Some of those regions will be
2575 trivial ones consisting of a single BB. The blocks of a
2576 partitioned region might form a set of disjoint graphs -- because
2577 the region encloses a differently partitoned sub region.
2579 We use the linear time algorithm described in 'Finding Regions Fast:
2580 Single Entry Single Exit and control Regions in Linear Time'
2581 Johnson, Pearson & Pingali. That algorithm deals with complete
2582 CFGs, where a back edge is inserted from END to START, and thus the
2583 problem becomes one of finding equivalent loops.
2585 In this case we have a partial CFG. We complete it by redirecting
2586 any incoming edge to the graph to be from an arbitrary external BB,
2587 and similarly redirecting any outgoing edge to be to that BB.
2588 Thus we end up with a closed graph.
2590 The algorithm works by building a spanning tree of an undirected
2591 graph and keeping track of back edges from nodes further from the
2592 root in the tree to nodes nearer to the root in the tree. In the
2593 description below, the root is up and the tree grows downwards.
2595 We avoid having to deal with degenerate back-edges to the same
2596 block, by splitting each BB into 3 -- one for input edges, one for
2597 the node itself and one for the output edges. Such back edges are
2598 referred to as 'Brackets'. Cycle equivalent nodes will have the
2599 same set of brackets.
2601 Determining bracket equivalency is done by maintaining a list of
2602 brackets in such a manner that the list length and final bracket
2603 uniquely identify the set.
2605 We use coloring to mark all BBs with cycle equivalency with the
2606 same color. This is the output of the 'Finding Regions Fast'
2607 algorithm. Notice it doesn't actually find the set of nodes within
2608 a particular region, just unorderd sets of nodes that are the
2609 entries and exits of SESE regions.
2611 After determining cycle equivalency, we need to find the minimal
2612 set of SESE regions. Do this with a DFS coloring walk of the
2613 complete graph. We're either 'looking' or 'coloring'. When
2614 looking, and we're in the subgraph, we start coloring the color of
2615 the current node, and remember that node as the start of the
2616 current color's SESE region. Every time we go to a new node, we
2617 decrement the count of nodes with thet color. If it reaches zero,
2618 we remember that node as the end of the current color's SESE region
2619 and return to 'looking'. Otherwise we color the node the current
2620 color.
2622 This way we end up with coloring the inside of non-trivial SESE
2623 regions with the color of that region. */
2625 /* A pair of BBs. We use this to represent SESE regions. */
2626 typedef std::pair<basic_block, basic_block> bb_pair_t;
2627 typedef auto_vec<bb_pair_t> bb_pair_vec_t;
2629 /* A node in the undirected CFG. The discriminator SECOND indicates just
2630 above or just below the BB idicated by FIRST. */
2631 typedef std::pair<basic_block, int> pseudo_node_t;
2633 /* A bracket indicates an edge towards the root of the spanning tree of the
2634 undirected graph. Each bracket has a color, determined
2635 from the currrent set of brackets. */
2636 struct bracket
2638 pseudo_node_t back; /* Back target */
2640 /* Current color and size of set. */
2641 unsigned color;
2642 unsigned size;
2644 bracket (pseudo_node_t back_)
2645 : back (back_), color (~0u), size (~0u)
2649 unsigned get_color (auto_vec<unsigned> &color_counts, unsigned length)
2651 if (length != size)
2653 size = length;
2654 color = color_counts.length ();
2655 color_counts.quick_push (0);
2657 color_counts[color]++;
2658 return color;
2662 typedef auto_vec<bracket> bracket_vec_t;
2664 /* Basic block info for finding SESE regions. */
2666 struct bb_sese
2668 int node; /* Node number in spanning tree. */
2669 int parent; /* Parent node number. */
2671 /* The algorithm splits each node A into Ai, A', Ao. The incoming
2672 edges arrive at pseudo-node Ai and the outgoing edges leave at
2673 pseudo-node Ao. We have to remember which way we arrived at a
2674 particular node when generating the spanning tree. dir > 0 means
2675 we arrived at Ai, dir < 0 means we arrived at Ao. */
2676 int dir;
2678 /* Lowest numbered pseudo-node reached via a backedge from thsis
2679 node, or any descendant. */
2680 pseudo_node_t high;
2682 int color; /* Cycle-equivalence color */
2684 /* Stack of brackets for this node. */
2685 bracket_vec_t brackets;
2687 bb_sese (unsigned node_, unsigned p, int dir_)
2688 :node (node_), parent (p), dir (dir_)
2691 ~bb_sese ();
2693 /* Push a bracket ending at BACK. */
2694 void push (const pseudo_node_t &back)
2696 if (dump_file)
2697 fprintf (dump_file, "Pushing backedge %d:%+d\n",
2698 back.first ? back.first->index : 0, back.second);
2699 brackets.safe_push (bracket (back));
2702 void append (bb_sese *child);
2703 void remove (const pseudo_node_t &);
2705 /* Set node's color. */
2706 void set_color (auto_vec<unsigned> &color_counts)
2708 color = brackets.last ().get_color (color_counts, brackets.length ());
2712 bb_sese::~bb_sese ()
2716 /* Destructively append CHILD's brackets. */
2718 void
2719 bb_sese::append (bb_sese *child)
2721 if (int len = child->brackets.length ())
2723 int ix;
2725 if (dump_file)
2727 for (ix = 0; ix < len; ix++)
2729 const pseudo_node_t &pseudo = child->brackets[ix].back;
2730 fprintf (dump_file, "Appending (%d)'s backedge %d:%+d\n",
2731 child->node, pseudo.first ? pseudo.first->index : 0,
2732 pseudo.second);
2735 if (!brackets.length ())
2736 std::swap (brackets, child->brackets);
2737 else
2739 brackets.reserve (len);
2740 for (ix = 0; ix < len; ix++)
2741 brackets.quick_push (child->brackets[ix]);
2746 /* Remove brackets that terminate at PSEUDO. */
2748 void
2749 bb_sese::remove (const pseudo_node_t &pseudo)
2751 unsigned removed = 0;
2752 int len = brackets.length ();
2754 for (int ix = 0; ix < len; ix++)
2756 if (brackets[ix].back == pseudo)
2758 if (dump_file)
2759 fprintf (dump_file, "Removing backedge %d:%+d\n",
2760 pseudo.first ? pseudo.first->index : 0, pseudo.second);
2761 removed++;
2763 else if (removed)
2764 brackets[ix-removed] = brackets[ix];
2766 while (removed--)
2767 brackets.pop ();
2770 /* Accessors for BB's aux pointer. */
2771 #define BB_SET_SESE(B, S) ((B)->aux = (S))
2772 #define BB_GET_SESE(B) ((bb_sese *)(B)->aux)
2774 /* DFS walk creating SESE data structures. Only cover nodes with
2775 BB_VISITED set. Append discovered blocks to LIST. We number in
2776 increments of 3 so that the above and below pseudo nodes can be
2777 implicitly numbered too. */
2779 static int
2780 nvptx_sese_number (int n, int p, int dir, basic_block b,
2781 auto_vec<basic_block> *list)
2783 if (BB_GET_SESE (b))
2784 return n;
2786 if (dump_file)
2787 fprintf (dump_file, "Block %d(%d), parent (%d), orientation %+d\n",
2788 b->index, n, p, dir);
2790 BB_SET_SESE (b, new bb_sese (n, p, dir));
2791 p = n;
2793 n += 3;
2794 list->quick_push (b);
2796 /* First walk the nodes on the 'other side' of this node, then walk
2797 the nodes on the same side. */
2798 for (unsigned ix = 2; ix; ix--)
2800 vec<edge, va_gc> *edges = dir > 0 ? b->succs : b->preds;
2801 size_t offset = (dir > 0 ? offsetof (edge_def, dest)
2802 : offsetof (edge_def, src));
2803 edge e;
2804 edge_iterator (ei);
2806 FOR_EACH_EDGE (e, ei, edges)
2808 basic_block target = *(basic_block *)((char *)e + offset);
2810 if (target->flags & BB_VISITED)
2811 n = nvptx_sese_number (n, p, dir, target, list);
2813 dir = -dir;
2815 return n;
2818 /* Process pseudo node above (DIR < 0) or below (DIR > 0) ME.
2819 EDGES are the outgoing edges and OFFSET is the offset to the src
2820 or dst block on the edges. */
2822 static void
2823 nvptx_sese_pseudo (basic_block me, bb_sese *sese, int depth, int dir,
2824 vec<edge, va_gc> *edges, size_t offset)
2826 edge e;
2827 edge_iterator (ei);
2828 int hi_back = depth;
2829 pseudo_node_t node_back (0, depth);
2830 int hi_child = depth;
2831 pseudo_node_t node_child (0, depth);
2832 basic_block child = NULL;
2833 unsigned num_children = 0;
2834 int usd = -dir * sese->dir;
2836 if (dump_file)
2837 fprintf (dump_file, "\nProcessing %d(%d) %+d\n",
2838 me->index, sese->node, dir);
2840 if (dir < 0)
2842 /* This is the above pseudo-child. It has the BB itself as an
2843 additional child node. */
2844 node_child = sese->high;
2845 hi_child = node_child.second;
2846 if (node_child.first)
2847 hi_child += BB_GET_SESE (node_child.first)->node;
2848 num_children++;
2851 /* Examine each edge.
2852 - if it is a child (a) append its bracket list and (b) record
2853 whether it is the child with the highest reaching bracket.
2854 - if it is an edge to ancestor, record whether it's the highest
2855 reaching backlink. */
2856 FOR_EACH_EDGE (e, ei, edges)
2858 basic_block target = *(basic_block *)((char *)e + offset);
2860 if (bb_sese *t_sese = BB_GET_SESE (target))
2862 if (t_sese->parent == sese->node && !(t_sese->dir + usd))
2864 /* Child node. Append its bracket list. */
2865 num_children++;
2866 sese->append (t_sese);
2868 /* Compare it's hi value. */
2869 int t_hi = t_sese->high.second;
2871 if (basic_block child_hi_block = t_sese->high.first)
2872 t_hi += BB_GET_SESE (child_hi_block)->node;
2874 if (hi_child > t_hi)
2876 hi_child = t_hi;
2877 node_child = t_sese->high;
2878 child = target;
2881 else if (t_sese->node < sese->node + dir
2882 && !(dir < 0 && sese->parent == t_sese->node))
2884 /* Non-parental ancestor node -- a backlink. */
2885 int d = usd * t_sese->dir;
2886 int back = t_sese->node + d;
2888 if (hi_back > back)
2890 hi_back = back;
2891 node_back = pseudo_node_t (target, d);
2895 else
2896 { /* Fallen off graph, backlink to entry node. */
2897 hi_back = 0;
2898 node_back = pseudo_node_t (0, 0);
2902 /* Remove any brackets that terminate at this pseudo node. */
2903 sese->remove (pseudo_node_t (me, dir));
2905 /* Now push any backlinks from this pseudo node. */
2906 FOR_EACH_EDGE (e, ei, edges)
2908 basic_block target = *(basic_block *)((char *)e + offset);
2909 if (bb_sese *t_sese = BB_GET_SESE (target))
2911 if (t_sese->node < sese->node + dir
2912 && !(dir < 0 && sese->parent == t_sese->node))
2913 /* Non-parental ancestor node - backedge from me. */
2914 sese->push (pseudo_node_t (target, usd * t_sese->dir));
2916 else
2918 /* back edge to entry node */
2919 sese->push (pseudo_node_t (0, 0));
2923 /* If this node leads directly or indirectly to a no-return region of
2924 the graph, then fake a backedge to entry node. */
2925 if (!sese->brackets.length () || !edges || !edges->length ())
2927 hi_back = 0;
2928 node_back = pseudo_node_t (0, 0);
2929 sese->push (node_back);
2932 /* Record the highest reaching backedge from us or a descendant. */
2933 sese->high = hi_back < hi_child ? node_back : node_child;
2935 if (num_children > 1)
2937 /* There is more than one child -- this is a Y shaped piece of
2938 spanning tree. We have to insert a fake backedge from this
2939 node to the highest ancestor reached by not-the-highest
2940 reaching child. Note that there may be multiple children
2941 with backedges to the same highest node. That's ok and we
2942 insert the edge to that highest node. */
2943 hi_child = depth;
2944 if (dir < 0 && child)
2946 node_child = sese->high;
2947 hi_child = node_child.second;
2948 if (node_child.first)
2949 hi_child += BB_GET_SESE (node_child.first)->node;
2952 FOR_EACH_EDGE (e, ei, edges)
2954 basic_block target = *(basic_block *)((char *)e + offset);
2956 if (target == child)
2957 /* Ignore the highest child. */
2958 continue;
2960 bb_sese *t_sese = BB_GET_SESE (target);
2961 if (!t_sese)
2962 continue;
2963 if (t_sese->parent != sese->node)
2964 /* Not a child. */
2965 continue;
2967 /* Compare its hi value. */
2968 int t_hi = t_sese->high.second;
2970 if (basic_block child_hi_block = t_sese->high.first)
2971 t_hi += BB_GET_SESE (child_hi_block)->node;
2973 if (hi_child > t_hi)
2975 hi_child = t_hi;
2976 node_child = t_sese->high;
2980 sese->push (node_child);
2985 /* DFS walk of BB graph. Color node BLOCK according to COLORING then
2986 proceed to successors. Set SESE entry and exit nodes of
2987 REGIONS. */
2989 static void
2990 nvptx_sese_color (auto_vec<unsigned> &color_counts, bb_pair_vec_t &regions,
2991 basic_block block, int coloring)
2993 bb_sese *sese = BB_GET_SESE (block);
2995 if (block->flags & BB_VISITED)
2997 /* If we've already encountered this block, either we must not
2998 be coloring, or it must have been colored the current color. */
2999 gcc_assert (coloring < 0 || (sese && coloring == sese->color));
3000 return;
3003 block->flags |= BB_VISITED;
3005 if (sese)
3007 if (coloring < 0)
3009 /* Start coloring a region. */
3010 regions[sese->color].first = block;
3011 coloring = sese->color;
3014 if (!--color_counts[sese->color] && sese->color == coloring)
3016 /* Found final block of SESE region. */
3017 regions[sese->color].second = block;
3018 coloring = -1;
3020 else
3021 /* Color the node, so we can assert on revisiting the node
3022 that the graph is indeed SESE. */
3023 sese->color = coloring;
3025 else
3026 /* Fallen off the subgraph, we cannot be coloring. */
3027 gcc_assert (coloring < 0);
3029 /* Walk each successor block. */
3030 if (block->succs && block->succs->length ())
3032 edge e;
3033 edge_iterator ei;
3035 FOR_EACH_EDGE (e, ei, block->succs)
3036 nvptx_sese_color (color_counts, regions, e->dest, coloring);
3038 else
3039 gcc_assert (coloring < 0);
3042 /* Find minimal set of SESE regions covering BLOCKS. REGIONS might
3043 end up with NULL entries in it. */
3045 static void
3046 nvptx_find_sese (auto_vec<basic_block> &blocks, bb_pair_vec_t &regions)
3048 basic_block block;
3049 int ix;
3051 /* First clear each BB of the whole function. */
3052 FOR_EACH_BB_FN (block, cfun)
3054 block->flags &= ~BB_VISITED;
3055 BB_SET_SESE (block, 0);
3057 block = EXIT_BLOCK_PTR_FOR_FN (cfun);
3058 block->flags &= ~BB_VISITED;
3059 BB_SET_SESE (block, 0);
3060 block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3061 block->flags &= ~BB_VISITED;
3062 BB_SET_SESE (block, 0);
3064 /* Mark blocks in the function that are in this graph. */
3065 for (ix = 0; blocks.iterate (ix, &block); ix++)
3066 block->flags |= BB_VISITED;
3068 /* Counts of nodes assigned to each color. There cannot be more
3069 colors than blocks (and hopefully there will be fewer). */
3070 auto_vec<unsigned> color_counts;
3071 color_counts.reserve (blocks.length ());
3073 /* Worklist of nodes in the spanning tree. Again, there cannot be
3074 more nodes in the tree than blocks (there will be fewer if the
3075 CFG of blocks is disjoint). */
3076 auto_vec<basic_block> spanlist;
3077 spanlist.reserve (blocks.length ());
3079 /* Make sure every block has its cycle class determined. */
3080 for (ix = 0; blocks.iterate (ix, &block); ix++)
3082 if (BB_GET_SESE (block))
3083 /* We already met this block in an earlier graph solve. */
3084 continue;
3086 if (dump_file)
3087 fprintf (dump_file, "Searching graph starting at %d\n", block->index);
3089 /* Number the nodes reachable from block initial DFS order. */
3090 int depth = nvptx_sese_number (2, 0, +1, block, &spanlist);
3092 /* Now walk in reverse DFS order to find cycle equivalents. */
3093 while (spanlist.length ())
3095 block = spanlist.pop ();
3096 bb_sese *sese = BB_GET_SESE (block);
3098 /* Do the pseudo node below. */
3099 nvptx_sese_pseudo (block, sese, depth, +1,
3100 sese->dir > 0 ? block->succs : block->preds,
3101 (sese->dir > 0 ? offsetof (edge_def, dest)
3102 : offsetof (edge_def, src)));
3103 sese->set_color (color_counts);
3104 /* Do the pseudo node above. */
3105 nvptx_sese_pseudo (block, sese, depth, -1,
3106 sese->dir < 0 ? block->succs : block->preds,
3107 (sese->dir < 0 ? offsetof (edge_def, dest)
3108 : offsetof (edge_def, src)));
3110 if (dump_file)
3111 fprintf (dump_file, "\n");
3114 if (dump_file)
3116 unsigned count;
3117 const char *comma = "";
3119 fprintf (dump_file, "Found %d cycle equivalents\n",
3120 color_counts.length ());
3121 for (ix = 0; color_counts.iterate (ix, &count); ix++)
3123 fprintf (dump_file, "%s%d[%d]={", comma, ix, count);
3125 comma = "";
3126 for (unsigned jx = 0; blocks.iterate (jx, &block); jx++)
3127 if (BB_GET_SESE (block)->color == ix)
3129 block->flags |= BB_VISITED;
3130 fprintf (dump_file, "%s%d", comma, block->index);
3131 comma=",";
3133 fprintf (dump_file, "}");
3134 comma = ", ";
3136 fprintf (dump_file, "\n");
3139 /* Now we've colored every block in the subgraph. We now need to
3140 determine the minimal set of SESE regions that cover that
3141 subgraph. Do this with a DFS walk of the complete function.
3142 During the walk we're either 'looking' or 'coloring'. When we
3143 reach the last node of a particular color, we stop coloring and
3144 return to looking. */
3146 /* There cannot be more SESE regions than colors. */
3147 regions.reserve (color_counts.length ());
3148 for (ix = color_counts.length (); ix--;)
3149 regions.quick_push (bb_pair_t (0, 0));
3151 for (ix = 0; blocks.iterate (ix, &block); ix++)
3152 block->flags &= ~BB_VISITED;
3154 nvptx_sese_color (color_counts, regions, ENTRY_BLOCK_PTR_FOR_FN (cfun), -1);
3156 if (dump_file)
3158 const char *comma = "";
3159 int len = regions.length ();
3161 fprintf (dump_file, "SESE regions:");
3162 for (ix = 0; ix != len; ix++)
3164 basic_block from = regions[ix].first;
3165 basic_block to = regions[ix].second;
3167 if (from)
3169 fprintf (dump_file, "%s %d{%d", comma, ix, from->index);
3170 if (to != from)
3171 fprintf (dump_file, "->%d", to->index);
3173 int color = BB_GET_SESE (from)->color;
3175 /* Print the blocks within the region (excluding ends). */
3176 FOR_EACH_BB_FN (block, cfun)
3178 bb_sese *sese = BB_GET_SESE (block);
3180 if (sese && sese->color == color
3181 && block != from && block != to)
3182 fprintf (dump_file, ".%d", block->index);
3184 fprintf (dump_file, "}");
3186 comma = ",";
3188 fprintf (dump_file, "\n\n");
3191 for (ix = 0; blocks.iterate (ix, &block); ix++)
3192 delete BB_GET_SESE (block);
3195 #undef BB_SET_SESE
3196 #undef BB_GET_SESE
3198 /* Propagate live state at the start of a partitioned region. BLOCK
3199 provides the live register information, and might not contain
3200 INSN. Propagation is inserted just after INSN. RW indicates whether
3201 we are reading and/or writing state. This
3202 separation is needed for worker-level proppagation where we
3203 essentially do a spill & fill. FN is the underlying worker
3204 function to generate the propagation instructions for single
3205 register. DATA is user data.
3207 We propagate the live register set and the entire frame. We could
3208 do better by (a) propagating just the live set that is used within
3209 the partitioned regions and (b) only propagating stack entries that
3210 are used. The latter might be quite hard to determine. */
3212 typedef rtx (*propagator_fn) (rtx, propagate_mask, unsigned, void *);
3214 static void
3215 nvptx_propagate (basic_block block, rtx_insn *insn, propagate_mask rw,
3216 propagator_fn fn, void *data)
3218 bitmap live = DF_LIVE_IN (block);
3219 bitmap_iterator iterator;
3220 unsigned ix;
3222 /* Copy the frame array. */
3223 HOST_WIDE_INT fs = get_frame_size ();
3224 if (fs)
3226 rtx tmp = gen_reg_rtx (DImode);
3227 rtx idx = NULL_RTX;
3228 rtx ptr = gen_reg_rtx (Pmode);
3229 rtx pred = NULL_RTX;
3230 rtx_code_label *label = NULL;
3232 gcc_assert (!(fs & (GET_MODE_SIZE (DImode) - 1)));
3233 fs /= GET_MODE_SIZE (DImode);
3234 /* Detect single iteration loop. */
3235 if (fs == 1)
3236 fs = 0;
3238 start_sequence ();
3239 emit_insn (gen_rtx_SET (ptr, frame_pointer_rtx));
3240 if (fs)
3242 idx = gen_reg_rtx (SImode);
3243 pred = gen_reg_rtx (BImode);
3244 label = gen_label_rtx ();
3246 emit_insn (gen_rtx_SET (idx, GEN_INT (fs)));
3247 /* Allow worker function to initialize anything needed. */
3248 rtx init = fn (tmp, PM_loop_begin, fs, data);
3249 if (init)
3250 emit_insn (init);
3251 emit_label (label);
3252 LABEL_NUSES (label)++;
3253 emit_insn (gen_addsi3 (idx, idx, GEN_INT (-1)));
3255 if (rw & PM_read)
3256 emit_insn (gen_rtx_SET (tmp, gen_rtx_MEM (DImode, ptr)));
3257 emit_insn (fn (tmp, rw, fs, data));
3258 if (rw & PM_write)
3259 emit_insn (gen_rtx_SET (gen_rtx_MEM (DImode, ptr), tmp));
3260 if (fs)
3262 emit_insn (gen_rtx_SET (pred, gen_rtx_NE (BImode, idx, const0_rtx)));
3263 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (GET_MODE_SIZE (DImode))));
3264 emit_insn (gen_br_true_uni (pred, label));
3265 rtx fini = fn (tmp, PM_loop_end, fs, data);
3266 if (fini)
3267 emit_insn (fini);
3268 emit_insn (gen_rtx_CLOBBER (GET_MODE (idx), idx));
3270 emit_insn (gen_rtx_CLOBBER (GET_MODE (tmp), tmp));
3271 emit_insn (gen_rtx_CLOBBER (GET_MODE (ptr), ptr));
3272 rtx cpy = get_insns ();
3273 end_sequence ();
3274 insn = emit_insn_after (cpy, insn);
3277 /* Copy live registers. */
3278 EXECUTE_IF_SET_IN_BITMAP (live, 0, ix, iterator)
3280 rtx reg = regno_reg_rtx[ix];
3282 if (REGNO (reg) >= FIRST_PSEUDO_REGISTER)
3284 rtx bcast = fn (reg, rw, 0, data);
3286 insn = emit_insn_after (bcast, insn);
3291 /* Worker for nvptx_vpropagate. */
3293 static rtx
3294 vprop_gen (rtx reg, propagate_mask pm,
3295 unsigned ARG_UNUSED (count), void *ARG_UNUSED (data))
3297 if (!(pm & PM_read_write))
3298 return 0;
3300 return nvptx_gen_vcast (reg);
3303 /* Propagate state that is live at start of BLOCK across the vectors
3304 of a single warp. Propagation is inserted just after INSN. */
3306 static void
3307 nvptx_vpropagate (basic_block block, rtx_insn *insn)
3309 nvptx_propagate (block, insn, PM_read_write, vprop_gen, 0);
3312 /* Worker for nvptx_wpropagate. */
3314 static rtx
3315 wprop_gen (rtx reg, propagate_mask pm, unsigned rep, void *data_)
3317 wcast_data_t *data = (wcast_data_t *)data_;
3319 if (pm & PM_loop_begin)
3321 /* Starting a loop, initialize pointer. */
3322 unsigned align = GET_MODE_ALIGNMENT (GET_MODE (reg)) / BITS_PER_UNIT;
3324 if (align > worker_bcast_align)
3325 worker_bcast_align = align;
3326 data->offset = (data->offset + align - 1) & ~(align - 1);
3328 data->ptr = gen_reg_rtx (Pmode);
3330 return gen_adddi3 (data->ptr, data->base, GEN_INT (data->offset));
3332 else if (pm & PM_loop_end)
3334 rtx clobber = gen_rtx_CLOBBER (GET_MODE (data->ptr), data->ptr);
3335 data->ptr = NULL_RTX;
3336 return clobber;
3338 else
3339 return nvptx_gen_wcast (reg, pm, rep, data);
3342 /* Spill or fill live state that is live at start of BLOCK. PRE_P
3343 indicates if this is just before partitioned mode (do spill), or
3344 just after it starts (do fill). Sequence is inserted just after
3345 INSN. */
3347 static void
3348 nvptx_wpropagate (bool pre_p, basic_block block, rtx_insn *insn)
3350 wcast_data_t data;
3352 data.base = gen_reg_rtx (Pmode);
3353 data.offset = 0;
3354 data.ptr = NULL_RTX;
3356 nvptx_propagate (block, insn, pre_p ? PM_read : PM_write, wprop_gen, &data);
3357 if (data.offset)
3359 /* Stuff was emitted, initialize the base pointer now. */
3360 rtx init = gen_rtx_SET (data.base, worker_bcast_sym);
3361 emit_insn_after (init, insn);
3363 if (worker_bcast_size < data.offset)
3364 worker_bcast_size = data.offset;
3368 /* Emit a worker-level synchronization barrier. We use different
3369 markers for before and after synchronizations. */
3371 static rtx
3372 nvptx_wsync (bool after)
3374 return gen_nvptx_barsync (GEN_INT (after));
3377 /* Single neutering according to MASK. FROM is the incoming block and
3378 TO is the outgoing block. These may be the same block. Insert at
3379 start of FROM:
3381 if (tid.<axis>) goto end.
3383 and insert before ending branch of TO (if there is such an insn):
3385 end:
3386 <possibly-broadcast-cond>
3387 <branch>
3389 We currently only use differnt FROM and TO when skipping an entire
3390 loop. We could do more if we detected superblocks. */
3392 static void
3393 nvptx_single (unsigned mask, basic_block from, basic_block to)
3395 rtx_insn *head = BB_HEAD (from);
3396 rtx_insn *tail = BB_END (to);
3397 unsigned skip_mask = mask;
3399 /* Find first insn of from block */
3400 while (head != BB_END (from) && !INSN_P (head))
3401 head = NEXT_INSN (head);
3403 /* Find last insn of to block */
3404 rtx_insn *limit = from == to ? head : BB_HEAD (to);
3405 while (tail != limit && !INSN_P (tail) && !LABEL_P (tail))
3406 tail = PREV_INSN (tail);
3408 /* Detect if tail is a branch. */
3409 rtx tail_branch = NULL_RTX;
3410 rtx cond_branch = NULL_RTX;
3411 if (tail && INSN_P (tail))
3413 tail_branch = PATTERN (tail);
3414 if (GET_CODE (tail_branch) != SET || SET_DEST (tail_branch) != pc_rtx)
3415 tail_branch = NULL_RTX;
3416 else
3418 cond_branch = SET_SRC (tail_branch);
3419 if (GET_CODE (cond_branch) != IF_THEN_ELSE)
3420 cond_branch = NULL_RTX;
3424 if (tail == head)
3426 /* If this is empty, do nothing. */
3427 if (!head || !INSN_P (head))
3428 return;
3430 /* If this is a dummy insn, do nothing. */
3431 switch (recog_memoized (head))
3433 default:
3434 break;
3435 case CODE_FOR_nvptx_fork:
3436 case CODE_FOR_nvptx_forked:
3437 case CODE_FOR_nvptx_joining:
3438 case CODE_FOR_nvptx_join:
3439 return;
3442 if (cond_branch)
3444 /* If we're only doing vector single, there's no need to
3445 emit skip code because we'll not insert anything. */
3446 if (!(mask & GOMP_DIM_MASK (GOMP_DIM_VECTOR)))
3447 skip_mask = 0;
3449 else if (tail_branch)
3450 /* Block with only unconditional branch. Nothing to do. */
3451 return;
3454 /* Insert the vector test inside the worker test. */
3455 unsigned mode;
3456 rtx_insn *before = tail;
3457 for (mode = GOMP_DIM_WORKER; mode <= GOMP_DIM_VECTOR; mode++)
3458 if (GOMP_DIM_MASK (mode) & skip_mask)
3460 rtx_code_label *label = gen_label_rtx ();
3461 rtx pred = cfun->machine->axis_predicate[mode - GOMP_DIM_WORKER];
3463 if (!pred)
3465 pred = gen_reg_rtx (BImode);
3466 cfun->machine->axis_predicate[mode - GOMP_DIM_WORKER] = pred;
3469 rtx br;
3470 if (mode == GOMP_DIM_VECTOR)
3471 br = gen_br_true (pred, label);
3472 else
3473 br = gen_br_true_uni (pred, label);
3474 emit_insn_before (br, head);
3476 LABEL_NUSES (label)++;
3477 if (tail_branch)
3478 before = emit_label_before (label, before);
3479 else
3480 emit_label_after (label, tail);
3483 /* Now deal with propagating the branch condition. */
3484 if (cond_branch)
3486 rtx pvar = XEXP (XEXP (cond_branch, 0), 0);
3488 if (GOMP_DIM_MASK (GOMP_DIM_VECTOR) == mask)
3490 /* Vector mode only, do a shuffle. */
3491 emit_insn_before (nvptx_gen_vcast (pvar), tail);
3493 else
3495 /* Includes worker mode, do spill & fill. By construction
3496 we should never have worker mode only. */
3497 wcast_data_t data;
3499 data.base = worker_bcast_sym;
3500 data.ptr = 0;
3502 if (worker_bcast_size < GET_MODE_SIZE (SImode))
3503 worker_bcast_size = GET_MODE_SIZE (SImode);
3505 data.offset = 0;
3506 emit_insn_before (nvptx_gen_wcast (pvar, PM_read, 0, &data),
3507 before);
3508 /* Barrier so other workers can see the write. */
3509 emit_insn_before (nvptx_wsync (false), tail);
3510 data.offset = 0;
3511 emit_insn_before (nvptx_gen_wcast (pvar, PM_write, 0, &data), tail);
3512 /* This barrier is needed to avoid worker zero clobbering
3513 the broadcast buffer before all the other workers have
3514 had a chance to read this instance of it. */
3515 emit_insn_before (nvptx_wsync (true), tail);
3518 extract_insn (tail);
3519 rtx unsp = gen_rtx_UNSPEC (BImode, gen_rtvec (1, pvar),
3520 UNSPEC_BR_UNIFIED);
3521 validate_change (tail, recog_data.operand_loc[0], unsp, false);
3525 /* PAR is a parallel that is being skipped in its entirety according to
3526 MASK. Treat this as skipping a superblock starting at forked
3527 and ending at joining. */
3529 static void
3530 nvptx_skip_par (unsigned mask, parallel *par)
3532 basic_block tail = par->join_block;
3533 gcc_assert (tail->preds->length () == 1);
3535 basic_block pre_tail = (*tail->preds)[0]->src;
3536 gcc_assert (pre_tail->succs->length () == 1);
3538 nvptx_single (mask, par->forked_block, pre_tail);
3541 /* If PAR has a single inner parallel and PAR itself only contains
3542 empty entry and exit blocks, swallow the inner PAR. */
3544 static void
3545 nvptx_optimize_inner (parallel *par)
3547 parallel *inner = par->inner;
3549 /* We mustn't be the outer dummy par. */
3550 if (!par->mask)
3551 return;
3553 /* We must have a single inner par. */
3554 if (!inner || inner->next)
3555 return;
3557 /* We must only contain 2 blocks ourselves -- the head and tail of
3558 the inner par. */
3559 if (par->blocks.length () != 2)
3560 return;
3562 /* We must be disjoint partitioning. As we only have vector and
3563 worker partitioning, this is sufficient to guarantee the pars
3564 have adjacent partitioning. */
3565 if ((par->mask & inner->mask) & (GOMP_DIM_MASK (GOMP_DIM_MAX) - 1))
3566 /* This indicates malformed code generation. */
3567 return;
3569 /* The outer forked insn should be immediately followed by the inner
3570 fork insn. */
3571 rtx_insn *forked = par->forked_insn;
3572 rtx_insn *fork = BB_END (par->forked_block);
3574 if (NEXT_INSN (forked) != fork)
3575 return;
3576 gcc_checking_assert (recog_memoized (fork) == CODE_FOR_nvptx_fork);
3578 /* The outer joining insn must immediately follow the inner join
3579 insn. */
3580 rtx_insn *joining = par->joining_insn;
3581 rtx_insn *join = inner->join_insn;
3582 if (NEXT_INSN (join) != joining)
3583 return;
3585 /* Preconditions met. Swallow the inner par. */
3586 if (dump_file)
3587 fprintf (dump_file, "Merging loop %x [%d,%d] into %x [%d,%d]\n",
3588 inner->mask, inner->forked_block->index,
3589 inner->join_block->index,
3590 par->mask, par->forked_block->index, par->join_block->index);
3592 par->mask |= inner->mask & (GOMP_DIM_MASK (GOMP_DIM_MAX) - 1);
3594 par->blocks.reserve (inner->blocks.length ());
3595 while (inner->blocks.length ())
3596 par->blocks.quick_push (inner->blocks.pop ());
3598 par->inner = inner->inner;
3599 inner->inner = NULL;
3601 delete inner;
3604 /* Process the parallel PAR and all its contained
3605 parallels. We do everything but the neutering. Return mask of
3606 partitioned modes used within this parallel. */
3608 static unsigned
3609 nvptx_process_pars (parallel *par)
3611 if (nvptx_optimize)
3612 nvptx_optimize_inner (par);
3614 unsigned inner_mask = par->mask;
3616 /* Do the inner parallels first. */
3617 if (par->inner)
3619 par->inner_mask = nvptx_process_pars (par->inner);
3620 inner_mask |= par->inner_mask;
3623 if (par->mask & GOMP_DIM_MASK (GOMP_DIM_MAX))
3624 /* No propagation needed for a call. */;
3625 else if (par->mask & GOMP_DIM_MASK (GOMP_DIM_WORKER))
3627 nvptx_wpropagate (false, par->forked_block, par->forked_insn);
3628 nvptx_wpropagate (true, par->forked_block, par->fork_insn);
3629 /* Insert begin and end synchronizations. */
3630 emit_insn_after (nvptx_wsync (false), par->forked_insn);
3631 emit_insn_before (nvptx_wsync (true), par->joining_insn);
3633 else if (par->mask & GOMP_DIM_MASK (GOMP_DIM_VECTOR))
3634 nvptx_vpropagate (par->forked_block, par->forked_insn);
3636 /* Now do siblings. */
3637 if (par->next)
3638 inner_mask |= nvptx_process_pars (par->next);
3639 return inner_mask;
3642 /* Neuter the parallel described by PAR. We recurse in depth-first
3643 order. MODES are the partitioning of the execution and OUTER is
3644 the partitioning of the parallels we are contained in. */
3646 static void
3647 nvptx_neuter_pars (parallel *par, unsigned modes, unsigned outer)
3649 unsigned me = (par->mask
3650 & (GOMP_DIM_MASK (GOMP_DIM_WORKER)
3651 | GOMP_DIM_MASK (GOMP_DIM_VECTOR)));
3652 unsigned skip_mask = 0, neuter_mask = 0;
3654 if (par->inner)
3655 nvptx_neuter_pars (par->inner, modes, outer | me);
3657 for (unsigned mode = GOMP_DIM_WORKER; mode <= GOMP_DIM_VECTOR; mode++)
3659 if ((outer | me) & GOMP_DIM_MASK (mode))
3660 {} /* Mode is partitioned: no neutering. */
3661 else if (!(modes & GOMP_DIM_MASK (mode)))
3662 {} /* Mode is not used: nothing to do. */
3663 else if (par->inner_mask & GOMP_DIM_MASK (mode)
3664 || !par->forked_insn)
3665 /* Partitioned in inner parallels, or we're not a partitioned
3666 at all: neuter individual blocks. */
3667 neuter_mask |= GOMP_DIM_MASK (mode);
3668 else if (!par->parent || !par->parent->forked_insn
3669 || par->parent->inner_mask & GOMP_DIM_MASK (mode))
3670 /* Parent isn't a parallel or contains this paralleling: skip
3671 parallel at this level. */
3672 skip_mask |= GOMP_DIM_MASK (mode);
3673 else
3674 {} /* Parent will skip this parallel itself. */
3677 if (neuter_mask)
3679 int ix, len;
3681 if (nvptx_optimize)
3683 /* Neuter whole SESE regions. */
3684 bb_pair_vec_t regions;
3686 nvptx_find_sese (par->blocks, regions);
3687 len = regions.length ();
3688 for (ix = 0; ix != len; ix++)
3690 basic_block from = regions[ix].first;
3691 basic_block to = regions[ix].second;
3693 if (from)
3694 nvptx_single (neuter_mask, from, to);
3695 else
3696 gcc_assert (!to);
3699 else
3701 /* Neuter each BB individually. */
3702 len = par->blocks.length ();
3703 for (ix = 0; ix != len; ix++)
3705 basic_block block = par->blocks[ix];
3707 nvptx_single (neuter_mask, block, block);
3712 if (skip_mask)
3713 nvptx_skip_par (skip_mask, par);
3715 if (par->next)
3716 nvptx_neuter_pars (par->next, modes, outer);
3719 /* PTX-specific reorganization
3720 - Split blocks at fork and join instructions
3721 - Compute live registers
3722 - Mark now-unused registers, so function begin doesn't declare
3723 unused registers.
3724 - Insert state propagation when entering partitioned mode
3725 - Insert neutering instructions when in single mode
3726 - Replace subregs with suitable sequences.
3729 static void
3730 nvptx_reorg (void)
3732 /* We are freeing block_for_insn in the toplev to keep compatibility
3733 with old MDEP_REORGS that are not CFG based. Recompute it now. */
3734 compute_bb_for_insn ();
3736 thread_prologue_and_epilogue_insns ();
3738 /* Split blocks and record interesting unspecs. */
3739 bb_insn_map_t bb_insn_map;
3741 nvptx_split_blocks (&bb_insn_map);
3743 /* Compute live regs */
3744 df_clear_flags (DF_LR_RUN_DCE);
3745 df_set_flags (DF_NO_INSN_RESCAN | DF_NO_HARD_REGS);
3746 df_live_add_problem ();
3747 df_live_set_all_dirty ();
3748 df_analyze ();
3749 regstat_init_n_sets_and_refs ();
3751 if (dump_file)
3752 df_dump (dump_file);
3754 /* Mark unused regs as unused. */
3755 int max_regs = max_reg_num ();
3756 for (int i = LAST_VIRTUAL_REGISTER + 1; i < max_regs; i++)
3757 if (REG_N_SETS (i) == 0 && REG_N_REFS (i) == 0)
3758 regno_reg_rtx[i] = const0_rtx;
3760 /* Determine launch dimensions of the function. If it is not an
3761 offloaded function (i.e. this is a regular compiler), the
3762 function has no neutering. */
3763 tree attr = get_oacc_fn_attrib (current_function_decl);
3764 if (attr)
3766 /* If we determined this mask before RTL expansion, we could
3767 elide emission of some levels of forks and joins. */
3768 unsigned mask = 0;
3769 tree dims = TREE_VALUE (attr);
3770 unsigned ix;
3772 for (ix = 0; ix != GOMP_DIM_MAX; ix++, dims = TREE_CHAIN (dims))
3774 int size = TREE_INT_CST_LOW (TREE_VALUE (dims));
3775 tree allowed = TREE_PURPOSE (dims);
3777 if (size != 1 && !(allowed && integer_zerop (allowed)))
3778 mask |= GOMP_DIM_MASK (ix);
3780 /* If there is worker neutering, there must be vector
3781 neutering. Otherwise the hardware will fail. */
3782 gcc_assert (!(mask & GOMP_DIM_MASK (GOMP_DIM_WORKER))
3783 || (mask & GOMP_DIM_MASK (GOMP_DIM_VECTOR)));
3785 /* Discover & process partitioned regions. */
3786 parallel *pars = nvptx_discover_pars (&bb_insn_map);
3787 nvptx_process_pars (pars);
3788 nvptx_neuter_pars (pars, mask, 0);
3789 delete pars;
3792 /* Replace subregs. */
3793 nvptx_reorg_subreg ();
3795 regstat_free_n_sets_and_refs ();
3797 df_finish_pass (true);
3800 /* Handle a "kernel" attribute; arguments as in
3801 struct attribute_spec.handler. */
3803 static tree
3804 nvptx_handle_kernel_attribute (tree *node, tree name, tree ARG_UNUSED (args),
3805 int ARG_UNUSED (flags), bool *no_add_attrs)
3807 tree decl = *node;
3809 if (TREE_CODE (decl) != FUNCTION_DECL)
3811 error ("%qE attribute only applies to functions", name);
3812 *no_add_attrs = true;
3815 else if (TREE_TYPE (TREE_TYPE (decl)) != void_type_node)
3817 error ("%qE attribute requires a void return type", name);
3818 *no_add_attrs = true;
3821 return NULL_TREE;
3824 /* Table of valid machine attributes. */
3825 static const struct attribute_spec nvptx_attribute_table[] =
3827 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
3828 affects_type_identity } */
3829 { "kernel", 0, 0, true, false, false, nvptx_handle_kernel_attribute, false },
3830 { NULL, 0, 0, false, false, false, NULL, false }
3833 /* Limit vector alignments to BIGGEST_ALIGNMENT. */
3835 static HOST_WIDE_INT
3836 nvptx_vector_alignment (const_tree type)
3838 HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type));
3840 return MIN (align, BIGGEST_ALIGNMENT);
3843 /* Indicate that INSN cannot be duplicated. */
3845 static bool
3846 nvptx_cannot_copy_insn_p (rtx_insn *insn)
3848 switch (recog_memoized (insn))
3850 case CODE_FOR_nvptx_shufflesi:
3851 case CODE_FOR_nvptx_shufflesf:
3852 case CODE_FOR_nvptx_barsync:
3853 case CODE_FOR_nvptx_fork:
3854 case CODE_FOR_nvptx_forked:
3855 case CODE_FOR_nvptx_joining:
3856 case CODE_FOR_nvptx_join:
3857 return true;
3858 default:
3859 return false;
3863 /* Section anchors do not work. Initialization for flag_section_anchor
3864 probes the existence of the anchoring target hooks and prevents
3865 anchoring if they don't exist. However, we may be being used with
3866 a host-side compiler that does support anchoring, and hence see
3867 the anchor flag set (as it's not recalculated). So provide an
3868 implementation denying anchoring. */
3870 static bool
3871 nvptx_use_anchors_for_symbol_p (const_rtx ARG_UNUSED (a))
3873 return false;
3876 /* Record a symbol for mkoffload to enter into the mapping table. */
3878 static void
3879 nvptx_record_offload_symbol (tree decl)
3881 switch (TREE_CODE (decl))
3883 case VAR_DECL:
3884 fprintf (asm_out_file, "//:VAR_MAP \"%s\"\n",
3885 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
3886 break;
3888 case FUNCTION_DECL:
3890 tree attr = get_oacc_fn_attrib (decl);
3891 tree dims = TREE_VALUE (attr);
3892 unsigned ix;
3894 fprintf (asm_out_file, "//:FUNC_MAP \"%s\"",
3895 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
3897 for (ix = 0; ix != GOMP_DIM_MAX; ix++, dims = TREE_CHAIN (dims))
3899 int size = TREE_INT_CST_LOW (TREE_VALUE (dims));
3901 gcc_assert (!TREE_PURPOSE (dims));
3902 fprintf (asm_out_file, ", %#x", size);
3905 fprintf (asm_out_file, "\n");
3907 break;
3909 default:
3910 gcc_unreachable ();
3914 /* Implement TARGET_ASM_FILE_START. Write the kinds of things ptxas expects
3915 at the start of a file. */
3917 static void
3918 nvptx_file_start (void)
3920 fputs ("// BEGIN PREAMBLE\n", asm_out_file);
3921 fputs ("\t.version\t3.1\n", asm_out_file);
3922 fputs ("\t.target\tsm_30\n", asm_out_file);
3923 fprintf (asm_out_file, "\t.address_size %d\n", GET_MODE_BITSIZE (Pmode));
3924 fputs ("// END PREAMBLE\n", asm_out_file);
3927 /* Write out the function declarations we've collected and declare storage
3928 for the broadcast buffer. */
3930 static void
3931 nvptx_file_end (void)
3933 hash_table<tree_hasher>::iterator iter;
3934 tree decl;
3935 FOR_EACH_HASH_TABLE_ELEMENT (*needed_fndecls_htab, decl, tree, iter)
3936 nvptx_record_fndecl (decl);
3937 fputs (func_decls.str().c_str(), asm_out_file);
3939 if (worker_bcast_size)
3941 /* Define the broadcast buffer. */
3943 worker_bcast_size = (worker_bcast_size + worker_bcast_align - 1)
3944 & ~(worker_bcast_align - 1);
3946 write_var_marker (asm_out_file, true, false, worker_bcast_name);
3947 fprintf (asm_out_file, ".shared .align %d .u8 %s[%d];\n",
3948 worker_bcast_align,
3949 worker_bcast_name, worker_bcast_size);
3952 if (worker_red_size)
3954 /* Define the reduction buffer. */
3956 worker_red_size = ((worker_red_size + worker_red_align - 1)
3957 & ~(worker_red_align - 1));
3959 write_var_marker (asm_out_file, true, false, worker_red_name);
3960 fprintf (asm_out_file, ".shared .align %d .u8 %s[%d];\n",
3961 worker_red_align,
3962 worker_red_name, worker_red_size);
3966 /* Expander for the shuffle builtins. */
3968 static rtx
3969 nvptx_expand_shuffle (tree exp, rtx target, machine_mode mode, int ignore)
3971 if (ignore)
3972 return target;
3974 rtx src = expand_expr (CALL_EXPR_ARG (exp, 0),
3975 NULL_RTX, mode, EXPAND_NORMAL);
3976 if (!REG_P (src))
3977 src = copy_to_mode_reg (mode, src);
3979 rtx idx = expand_expr (CALL_EXPR_ARG (exp, 1),
3980 NULL_RTX, SImode, EXPAND_NORMAL);
3981 rtx op = expand_expr (CALL_EXPR_ARG (exp, 2),
3982 NULL_RTX, SImode, EXPAND_NORMAL);
3984 if (!REG_P (idx) && GET_CODE (idx) != CONST_INT)
3985 idx = copy_to_mode_reg (SImode, idx);
3987 rtx pat = nvptx_gen_shuffle (target, src, idx,
3988 (nvptx_shuffle_kind) INTVAL (op));
3989 if (pat)
3990 emit_insn (pat);
3992 return target;
3995 /* Worker reduction address expander. */
3997 static rtx
3998 nvptx_expand_worker_addr (tree exp, rtx target,
3999 machine_mode ARG_UNUSED (mode), int ignore)
4001 if (ignore)
4002 return target;
4004 unsigned align = TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 2));
4005 if (align > worker_red_align)
4006 worker_red_align = align;
4008 unsigned offset = TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 0));
4009 unsigned size = TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1));
4010 if (size + offset > worker_red_size)
4011 worker_red_size = size + offset;
4013 rtx addr = worker_red_sym;
4014 if (offset)
4016 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (offset));
4017 addr = gen_rtx_CONST (Pmode, addr);
4020 emit_move_insn (target, addr);
4022 return target;
4025 /* Expand the CMP_SWAP PTX builtins. We have our own versions that do
4026 not require taking the address of any object, other than the memory
4027 cell being operated on. */
4029 static rtx
4030 nvptx_expand_cmp_swap (tree exp, rtx target,
4031 machine_mode ARG_UNUSED (m), int ARG_UNUSED (ignore))
4033 machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
4035 if (!target)
4036 target = gen_reg_rtx (mode);
4038 rtx mem = expand_expr (CALL_EXPR_ARG (exp, 0),
4039 NULL_RTX, Pmode, EXPAND_NORMAL);
4040 rtx cmp = expand_expr (CALL_EXPR_ARG (exp, 1),
4041 NULL_RTX, mode, EXPAND_NORMAL);
4042 rtx src = expand_expr (CALL_EXPR_ARG (exp, 2),
4043 NULL_RTX, mode, EXPAND_NORMAL);
4044 rtx pat;
4046 mem = gen_rtx_MEM (mode, mem);
4047 if (!REG_P (cmp))
4048 cmp = copy_to_mode_reg (mode, cmp);
4049 if (!REG_P (src))
4050 src = copy_to_mode_reg (mode, src);
4052 if (mode == SImode)
4053 pat = gen_atomic_compare_and_swapsi_1 (target, mem, cmp, src, const0_rtx);
4054 else
4055 pat = gen_atomic_compare_and_swapdi_1 (target, mem, cmp, src, const0_rtx);
4057 emit_insn (pat);
4059 return target;
4063 /* Codes for all the NVPTX builtins. */
4064 enum nvptx_builtins
4066 NVPTX_BUILTIN_SHUFFLE,
4067 NVPTX_BUILTIN_SHUFFLELL,
4068 NVPTX_BUILTIN_WORKER_ADDR,
4069 NVPTX_BUILTIN_CMP_SWAP,
4070 NVPTX_BUILTIN_CMP_SWAPLL,
4071 NVPTX_BUILTIN_MAX
4074 static GTY(()) tree nvptx_builtin_decls[NVPTX_BUILTIN_MAX];
4076 /* Return the NVPTX builtin for CODE. */
4078 static tree
4079 nvptx_builtin_decl (unsigned code, bool ARG_UNUSED (initialize_p))
4081 if (code >= NVPTX_BUILTIN_MAX)
4082 return error_mark_node;
4084 return nvptx_builtin_decls[code];
4087 /* Set up all builtin functions for this target. */
4089 static void
4090 nvptx_init_builtins (void)
4092 #define DEF(ID, NAME, T) \
4093 (nvptx_builtin_decls[NVPTX_BUILTIN_ ## ID] \
4094 = add_builtin_function ("__builtin_nvptx_" NAME, \
4095 build_function_type_list T, \
4096 NVPTX_BUILTIN_ ## ID, BUILT_IN_MD, NULL, NULL))
4097 #define ST sizetype
4098 #define UINT unsigned_type_node
4099 #define LLUINT long_long_unsigned_type_node
4100 #define PTRVOID ptr_type_node
4102 DEF (SHUFFLE, "shuffle", (UINT, UINT, UINT, UINT, NULL_TREE));
4103 DEF (SHUFFLELL, "shufflell", (LLUINT, LLUINT, UINT, UINT, NULL_TREE));
4104 DEF (WORKER_ADDR, "worker_addr",
4105 (PTRVOID, ST, UINT, UINT, NULL_TREE));
4106 DEF (CMP_SWAP, "cmp_swap", (UINT, PTRVOID, UINT, UINT, NULL_TREE));
4107 DEF (CMP_SWAPLL, "cmp_swapll", (LLUINT, PTRVOID, LLUINT, LLUINT, NULL_TREE));
4109 #undef DEF
4110 #undef ST
4111 #undef UINT
4112 #undef LLUINT
4113 #undef PTRVOID
4116 /* Expand an expression EXP that calls a built-in function,
4117 with result going to TARGET if that's convenient
4118 (and in mode MODE if that's convenient).
4119 SUBTARGET may be used as the target for computing one of EXP's operands.
4120 IGNORE is nonzero if the value is to be ignored. */
4122 static rtx
4123 nvptx_expand_builtin (tree exp, rtx target, rtx ARG_UNUSED (subtarget),
4124 machine_mode mode, int ignore)
4126 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
4127 switch (DECL_FUNCTION_CODE (fndecl))
4129 case NVPTX_BUILTIN_SHUFFLE:
4130 case NVPTX_BUILTIN_SHUFFLELL:
4131 return nvptx_expand_shuffle (exp, target, mode, ignore);
4133 case NVPTX_BUILTIN_WORKER_ADDR:
4134 return nvptx_expand_worker_addr (exp, target, mode, ignore);
4136 case NVPTX_BUILTIN_CMP_SWAP:
4137 case NVPTX_BUILTIN_CMP_SWAPLL:
4138 return nvptx_expand_cmp_swap (exp, target, mode, ignore);
4140 default: gcc_unreachable ();
4144 /* Define dimension sizes for known hardware. */
4145 #define PTX_VECTOR_LENGTH 32
4146 #define PTX_WORKER_LENGTH 32
4148 /* Validate compute dimensions of an OpenACC offload or routine, fill
4149 in non-unity defaults. FN_LEVEL indicates the level at which a
4150 routine might spawn a loop. It is negative for non-routines. */
4152 static bool
4153 nvptx_goacc_validate_dims (tree decl, int dims[], int fn_level)
4155 bool changed = false;
4157 /* The vector size must be 32, unless this is a SEQ routine. */
4158 if (fn_level <= GOMP_DIM_VECTOR
4159 && dims[GOMP_DIM_VECTOR] != PTX_VECTOR_LENGTH)
4161 if (dims[GOMP_DIM_VECTOR] >= 0 && fn_level < 0)
4162 warning_at (DECL_SOURCE_LOCATION (decl), 0,
4163 dims[GOMP_DIM_VECTOR]
4164 ? "using vector_length (%d), ignoring %d"
4165 : "using vector_length (%d), ignoring runtime setting",
4166 PTX_VECTOR_LENGTH, dims[GOMP_DIM_VECTOR]);
4167 dims[GOMP_DIM_VECTOR] = PTX_VECTOR_LENGTH;
4168 changed = true;
4171 /* Check the num workers is not too large. */
4172 if (dims[GOMP_DIM_WORKER] > PTX_WORKER_LENGTH)
4174 warning_at (DECL_SOURCE_LOCATION (decl), 0,
4175 "using num_workers (%d), ignoring %d",
4176 PTX_WORKER_LENGTH, dims[GOMP_DIM_WORKER]);
4177 dims[GOMP_DIM_WORKER] = PTX_WORKER_LENGTH;
4178 changed = true;
4181 return changed;
4184 /* Return maximum dimension size, or zero for unbounded. */
4186 static int
4187 nvptx_dim_limit (int axis)
4189 switch (axis)
4191 case GOMP_DIM_WORKER:
4192 return PTX_WORKER_LENGTH;
4194 case GOMP_DIM_VECTOR:
4195 return PTX_VECTOR_LENGTH;
4197 default:
4198 break;
4200 return 0;
4203 /* Determine whether fork & joins are needed. */
4205 static bool
4206 nvptx_goacc_fork_join (gcall *call, const int dims[],
4207 bool ARG_UNUSED (is_fork))
4209 tree arg = gimple_call_arg (call, 2);
4210 unsigned axis = TREE_INT_CST_LOW (arg);
4212 /* We only care about worker and vector partitioning. */
4213 if (axis < GOMP_DIM_WORKER)
4214 return false;
4216 /* If the size is 1, there's no partitioning. */
4217 if (dims[axis] == 1)
4218 return false;
4220 return true;
4223 /* Generate a PTX builtin function call that returns the address in
4224 the worker reduction buffer at OFFSET. TYPE is the type of the
4225 data at that location. */
4227 static tree
4228 nvptx_get_worker_red_addr (tree type, tree offset)
4230 machine_mode mode = TYPE_MODE (type);
4231 tree fndecl = nvptx_builtin_decl (NVPTX_BUILTIN_WORKER_ADDR, true);
4232 tree size = build_int_cst (unsigned_type_node, GET_MODE_SIZE (mode));
4233 tree align = build_int_cst (unsigned_type_node,
4234 GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT);
4235 tree call = build_call_expr (fndecl, 3, offset, size, align);
4237 return fold_convert (build_pointer_type (type), call);
4240 /* Emit a SHFL.DOWN using index SHFL of VAR into DEST_VAR. This function
4241 will cast the variable if necessary. */
4243 static void
4244 nvptx_generate_vector_shuffle (location_t loc,
4245 tree dest_var, tree var, unsigned shift,
4246 gimple_seq *seq)
4248 unsigned fn = NVPTX_BUILTIN_SHUFFLE;
4249 tree_code code = NOP_EXPR;
4250 tree arg_type = unsigned_type_node;
4251 tree var_type = TREE_TYPE (var);
4252 tree dest_type = var_type;
4254 if (TREE_CODE (var_type) == COMPLEX_TYPE)
4255 var_type = TREE_TYPE (var_type);
4257 if (TREE_CODE (var_type) == REAL_TYPE)
4258 code = VIEW_CONVERT_EXPR;
4260 if (TYPE_SIZE (var_type)
4261 == TYPE_SIZE (long_long_unsigned_type_node))
4263 fn = NVPTX_BUILTIN_SHUFFLELL;
4264 arg_type = long_long_unsigned_type_node;
4267 tree call = nvptx_builtin_decl (fn, true);
4268 tree bits = build_int_cst (unsigned_type_node, shift);
4269 tree kind = build_int_cst (unsigned_type_node, SHUFFLE_DOWN);
4270 tree expr;
4272 if (var_type != dest_type)
4274 /* Do real and imaginary parts separately. */
4275 tree real = fold_build1 (REALPART_EXPR, var_type, var);
4276 real = fold_build1 (code, arg_type, real);
4277 real = build_call_expr_loc (loc, call, 3, real, bits, kind);
4278 real = fold_build1 (code, var_type, real);
4280 tree imag = fold_build1 (IMAGPART_EXPR, var_type, var);
4281 imag = fold_build1 (code, arg_type, imag);
4282 imag = build_call_expr_loc (loc, call, 3, imag, bits, kind);
4283 imag = fold_build1 (code, var_type, imag);
4285 expr = fold_build2 (COMPLEX_EXPR, dest_type, real, imag);
4287 else
4289 expr = fold_build1 (code, arg_type, var);
4290 expr = build_call_expr_loc (loc, call, 3, expr, bits, kind);
4291 expr = fold_build1 (code, dest_type, expr);
4294 gimplify_assign (dest_var, expr, seq);
4297 /* Lazily generate the global lock var decl and return its address. */
4299 static tree
4300 nvptx_global_lock_addr ()
4302 tree v = global_lock_var;
4304 if (!v)
4306 tree name = get_identifier ("__reduction_lock");
4307 tree type = build_qualified_type (unsigned_type_node,
4308 TYPE_QUAL_VOLATILE);
4309 v = build_decl (BUILTINS_LOCATION, VAR_DECL, name, type);
4310 global_lock_var = v;
4311 DECL_ARTIFICIAL (v) = 1;
4312 DECL_EXTERNAL (v) = 1;
4313 TREE_STATIC (v) = 1;
4314 TREE_PUBLIC (v) = 1;
4315 TREE_USED (v) = 1;
4316 mark_addressable (v);
4317 mark_decl_referenced (v);
4320 return build_fold_addr_expr (v);
4323 /* Insert code to locklessly update *PTR with *PTR OP VAR just before
4324 GSI. We use a lockless scheme for nearly all case, which looks
4325 like:
4326 actual = initval(OP);
4327 do {
4328 guess = actual;
4329 write = guess OP myval;
4330 actual = cmp&swap (ptr, guess, write)
4331 } while (actual bit-different-to guess);
4332 return write;
4334 This relies on a cmp&swap instruction, which is available for 32-
4335 and 64-bit types. Larger types must use a locking scheme. */
4337 static tree
4338 nvptx_lockless_update (location_t loc, gimple_stmt_iterator *gsi,
4339 tree ptr, tree var, tree_code op)
4341 unsigned fn = NVPTX_BUILTIN_CMP_SWAP;
4342 tree_code code = NOP_EXPR;
4343 tree arg_type = unsigned_type_node;
4344 tree var_type = TREE_TYPE (var);
4346 if (TREE_CODE (var_type) == COMPLEX_TYPE
4347 || TREE_CODE (var_type) == REAL_TYPE)
4348 code = VIEW_CONVERT_EXPR;
4350 if (TYPE_SIZE (var_type) == TYPE_SIZE (long_long_unsigned_type_node))
4352 arg_type = long_long_unsigned_type_node;
4353 fn = NVPTX_BUILTIN_CMP_SWAPLL;
4356 tree swap_fn = nvptx_builtin_decl (fn, true);
4358 gimple_seq init_seq = NULL;
4359 tree init_var = make_ssa_name (arg_type);
4360 tree init_expr = omp_reduction_init_op (loc, op, var_type);
4361 init_expr = fold_build1 (code, arg_type, init_expr);
4362 gimplify_assign (init_var, init_expr, &init_seq);
4363 gimple *init_end = gimple_seq_last (init_seq);
4365 gsi_insert_seq_before (gsi, init_seq, GSI_SAME_STMT);
4367 /* Split the block just after the init stmts. */
4368 basic_block pre_bb = gsi_bb (*gsi);
4369 edge pre_edge = split_block (pre_bb, init_end);
4370 basic_block loop_bb = pre_edge->dest;
4371 pre_bb = pre_edge->src;
4372 /* Reset the iterator. */
4373 *gsi = gsi_for_stmt (gsi_stmt (*gsi));
4375 tree expect_var = make_ssa_name (arg_type);
4376 tree actual_var = make_ssa_name (arg_type);
4377 tree write_var = make_ssa_name (arg_type);
4379 /* Build and insert the reduction calculation. */
4380 gimple_seq red_seq = NULL;
4381 tree write_expr = fold_build1 (code, var_type, expect_var);
4382 write_expr = fold_build2 (op, var_type, write_expr, var);
4383 write_expr = fold_build1 (code, arg_type, write_expr);
4384 gimplify_assign (write_var, write_expr, &red_seq);
4386 gsi_insert_seq_before (gsi, red_seq, GSI_SAME_STMT);
4388 /* Build & insert the cmp&swap sequence. */
4389 gimple_seq latch_seq = NULL;
4390 tree swap_expr = build_call_expr_loc (loc, swap_fn, 3,
4391 ptr, expect_var, write_var);
4392 gimplify_assign (actual_var, swap_expr, &latch_seq);
4394 gcond *cond = gimple_build_cond (EQ_EXPR, actual_var, expect_var,
4395 NULL_TREE, NULL_TREE);
4396 gimple_seq_add_stmt (&latch_seq, cond);
4398 gimple *latch_end = gimple_seq_last (latch_seq);
4399 gsi_insert_seq_before (gsi, latch_seq, GSI_SAME_STMT);
4401 /* Split the block just after the latch stmts. */
4402 edge post_edge = split_block (loop_bb, latch_end);
4403 basic_block post_bb = post_edge->dest;
4404 loop_bb = post_edge->src;
4405 *gsi = gsi_for_stmt (gsi_stmt (*gsi));
4407 post_edge->flags ^= EDGE_TRUE_VALUE | EDGE_FALLTHRU;
4408 edge loop_edge = make_edge (loop_bb, loop_bb, EDGE_FALSE_VALUE);
4409 set_immediate_dominator (CDI_DOMINATORS, loop_bb, pre_bb);
4410 set_immediate_dominator (CDI_DOMINATORS, post_bb, loop_bb);
4412 gphi *phi = create_phi_node (expect_var, loop_bb);
4413 add_phi_arg (phi, init_var, pre_edge, loc);
4414 add_phi_arg (phi, actual_var, loop_edge, loc);
4416 loop *loop = alloc_loop ();
4417 loop->header = loop_bb;
4418 loop->latch = loop_bb;
4419 add_loop (loop, loop_bb->loop_father);
4421 return fold_build1 (code, var_type, write_var);
4424 /* Insert code to lockfully update *PTR with *PTR OP VAR just before
4425 GSI. This is necessary for types larger than 64 bits, where there
4426 is no cmp&swap instruction to implement a lockless scheme. We use
4427 a lock variable in global memory.
4429 while (cmp&swap (&lock_var, 0, 1))
4430 continue;
4431 T accum = *ptr;
4432 accum = accum OP var;
4433 *ptr = accum;
4434 cmp&swap (&lock_var, 1, 0);
4435 return accum;
4437 A lock in global memory is necessary to force execution engine
4438 descheduling and avoid resource starvation that can occur if the
4439 lock is in .shared memory. */
4441 static tree
4442 nvptx_lockfull_update (location_t loc, gimple_stmt_iterator *gsi,
4443 tree ptr, tree var, tree_code op)
4445 tree var_type = TREE_TYPE (var);
4446 tree swap_fn = nvptx_builtin_decl (NVPTX_BUILTIN_CMP_SWAP, true);
4447 tree uns_unlocked = build_int_cst (unsigned_type_node, 0);
4448 tree uns_locked = build_int_cst (unsigned_type_node, 1);
4450 /* Split the block just before the gsi. Insert a gimple nop to make
4451 this easier. */
4452 gimple *nop = gimple_build_nop ();
4453 gsi_insert_before (gsi, nop, GSI_SAME_STMT);
4454 basic_block entry_bb = gsi_bb (*gsi);
4455 edge entry_edge = split_block (entry_bb, nop);
4456 basic_block lock_bb = entry_edge->dest;
4457 /* Reset the iterator. */
4458 *gsi = gsi_for_stmt (gsi_stmt (*gsi));
4460 /* Build and insert the locking sequence. */
4461 gimple_seq lock_seq = NULL;
4462 tree lock_var = make_ssa_name (unsigned_type_node);
4463 tree lock_expr = nvptx_global_lock_addr ();
4464 lock_expr = build_call_expr_loc (loc, swap_fn, 3, lock_expr,
4465 uns_unlocked, uns_locked);
4466 gimplify_assign (lock_var, lock_expr, &lock_seq);
4467 gcond *cond = gimple_build_cond (EQ_EXPR, lock_var, uns_unlocked,
4468 NULL_TREE, NULL_TREE);
4469 gimple_seq_add_stmt (&lock_seq, cond);
4470 gimple *lock_end = gimple_seq_last (lock_seq);
4471 gsi_insert_seq_before (gsi, lock_seq, GSI_SAME_STMT);
4473 /* Split the block just after the lock sequence. */
4474 edge locked_edge = split_block (lock_bb, lock_end);
4475 basic_block update_bb = locked_edge->dest;
4476 lock_bb = locked_edge->src;
4477 *gsi = gsi_for_stmt (gsi_stmt (*gsi));
4479 /* Create the lock loop ... */
4480 locked_edge->flags ^= EDGE_TRUE_VALUE | EDGE_FALLTHRU;
4481 make_edge (lock_bb, lock_bb, EDGE_FALSE_VALUE);
4482 set_immediate_dominator (CDI_DOMINATORS, lock_bb, entry_bb);
4483 set_immediate_dominator (CDI_DOMINATORS, update_bb, lock_bb);
4485 /* ... and the loop structure. */
4486 loop *lock_loop = alloc_loop ();
4487 lock_loop->header = lock_bb;
4488 lock_loop->latch = lock_bb;
4489 lock_loop->nb_iterations_estimate = 1;
4490 lock_loop->any_estimate = true;
4491 add_loop (lock_loop, entry_bb->loop_father);
4493 /* Build and insert the reduction calculation. */
4494 gimple_seq red_seq = NULL;
4495 tree acc_in = make_ssa_name (var_type);
4496 tree ref_in = build_simple_mem_ref (ptr);
4497 TREE_THIS_VOLATILE (ref_in) = 1;
4498 gimplify_assign (acc_in, ref_in, &red_seq);
4500 tree acc_out = make_ssa_name (var_type);
4501 tree update_expr = fold_build2 (op, var_type, ref_in, var);
4502 gimplify_assign (acc_out, update_expr, &red_seq);
4504 tree ref_out = build_simple_mem_ref (ptr);
4505 TREE_THIS_VOLATILE (ref_out) = 1;
4506 gimplify_assign (ref_out, acc_out, &red_seq);
4508 gsi_insert_seq_before (gsi, red_seq, GSI_SAME_STMT);
4510 /* Build & insert the unlock sequence. */
4511 gimple_seq unlock_seq = NULL;
4512 tree unlock_expr = nvptx_global_lock_addr ();
4513 unlock_expr = build_call_expr_loc (loc, swap_fn, 3, unlock_expr,
4514 uns_locked, uns_unlocked);
4515 gimplify_and_add (unlock_expr, &unlock_seq);
4516 gsi_insert_seq_before (gsi, unlock_seq, GSI_SAME_STMT);
4518 return acc_out;
4521 /* Emit a sequence to update a reduction accumlator at *PTR with the
4522 value held in VAR using operator OP. Return the updated value.
4524 TODO: optimize for atomic ops and indepedent complex ops. */
4526 static tree
4527 nvptx_reduction_update (location_t loc, gimple_stmt_iterator *gsi,
4528 tree ptr, tree var, tree_code op)
4530 tree type = TREE_TYPE (var);
4531 tree size = TYPE_SIZE (type);
4533 if (size == TYPE_SIZE (unsigned_type_node)
4534 || size == TYPE_SIZE (long_long_unsigned_type_node))
4535 return nvptx_lockless_update (loc, gsi, ptr, var, op);
4536 else
4537 return nvptx_lockfull_update (loc, gsi, ptr, var, op);
4540 /* NVPTX implementation of GOACC_REDUCTION_SETUP. */
4542 static void
4543 nvptx_goacc_reduction_setup (gcall *call)
4545 gimple_stmt_iterator gsi = gsi_for_stmt (call);
4546 tree lhs = gimple_call_lhs (call);
4547 tree var = gimple_call_arg (call, 2);
4548 int level = TREE_INT_CST_LOW (gimple_call_arg (call, 3));
4549 gimple_seq seq = NULL;
4551 push_gimplify_context (true);
4553 if (level != GOMP_DIM_GANG)
4555 /* Copy the receiver object. */
4556 tree ref_to_res = gimple_call_arg (call, 1);
4558 if (!integer_zerop (ref_to_res))
4559 var = build_simple_mem_ref (ref_to_res);
4562 if (level == GOMP_DIM_WORKER)
4564 /* Store incoming value to worker reduction buffer. */
4565 tree offset = gimple_call_arg (call, 5);
4566 tree call = nvptx_get_worker_red_addr (TREE_TYPE (var), offset);
4567 tree ptr = make_ssa_name (TREE_TYPE (call));
4569 gimplify_assign (ptr, call, &seq);
4570 tree ref = build_simple_mem_ref (ptr);
4571 TREE_THIS_VOLATILE (ref) = 1;
4572 gimplify_assign (ref, var, &seq);
4575 if (lhs)
4576 gimplify_assign (lhs, var, &seq);
4578 pop_gimplify_context (NULL);
4579 gsi_replace_with_seq (&gsi, seq, true);
4582 /* NVPTX implementation of GOACC_REDUCTION_INIT. */
4584 static void
4585 nvptx_goacc_reduction_init (gcall *call)
4587 gimple_stmt_iterator gsi = gsi_for_stmt (call);
4588 tree lhs = gimple_call_lhs (call);
4589 tree var = gimple_call_arg (call, 2);
4590 int level = TREE_INT_CST_LOW (gimple_call_arg (call, 3));
4591 enum tree_code rcode
4592 = (enum tree_code)TREE_INT_CST_LOW (gimple_call_arg (call, 4));
4593 tree init = omp_reduction_init_op (gimple_location (call), rcode,
4594 TREE_TYPE (var));
4595 gimple_seq seq = NULL;
4597 push_gimplify_context (true);
4599 if (level == GOMP_DIM_VECTOR)
4601 /* Initialize vector-non-zeroes to INIT_VAL (OP). */
4602 tree tid = make_ssa_name (integer_type_node);
4603 tree dim_vector = gimple_call_arg (call, 3);
4604 gimple *tid_call = gimple_build_call_internal (IFN_GOACC_DIM_POS, 1,
4605 dim_vector);
4606 gimple *cond_stmt = gimple_build_cond (NE_EXPR, tid, integer_zero_node,
4607 NULL_TREE, NULL_TREE);
4609 gimple_call_set_lhs (tid_call, tid);
4610 gimple_seq_add_stmt (&seq, tid_call);
4611 gimple_seq_add_stmt (&seq, cond_stmt);
4613 /* Split the block just after the call. */
4614 edge init_edge = split_block (gsi_bb (gsi), call);
4615 basic_block init_bb = init_edge->dest;
4616 basic_block call_bb = init_edge->src;
4618 /* Fixup flags from call_bb to init_bb. */
4619 init_edge->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE;
4621 /* Set the initialization stmts. */
4622 gimple_seq init_seq = NULL;
4623 tree init_var = make_ssa_name (TREE_TYPE (var));
4624 gimplify_assign (init_var, init, &init_seq);
4625 gsi = gsi_start_bb (init_bb);
4626 gsi_insert_seq_before (&gsi, init_seq, GSI_SAME_STMT);
4628 /* Split block just after the init stmt. */
4629 gsi_prev (&gsi);
4630 edge inited_edge = split_block (gsi_bb (gsi), gsi_stmt (gsi));
4631 basic_block dst_bb = inited_edge->dest;
4633 /* Create false edge from call_bb to dst_bb. */
4634 edge nop_edge = make_edge (call_bb, dst_bb, EDGE_FALSE_VALUE);
4636 /* Create phi node in dst block. */
4637 gphi *phi = create_phi_node (lhs, dst_bb);
4638 add_phi_arg (phi, init_var, inited_edge, gimple_location (call));
4639 add_phi_arg (phi, var, nop_edge, gimple_location (call));
4641 /* Reset dominator of dst bb. */
4642 set_immediate_dominator (CDI_DOMINATORS, dst_bb, call_bb);
4644 /* Reset the gsi. */
4645 gsi = gsi_for_stmt (call);
4647 else
4649 if (level == GOMP_DIM_GANG)
4651 /* If there's no receiver object, propagate the incoming VAR. */
4652 tree ref_to_res = gimple_call_arg (call, 1);
4653 if (integer_zerop (ref_to_res))
4654 init = var;
4657 gimplify_assign (lhs, init, &seq);
4660 pop_gimplify_context (NULL);
4661 gsi_replace_with_seq (&gsi, seq, true);
4664 /* NVPTX implementation of GOACC_REDUCTION_FINI. */
4666 static void
4667 nvptx_goacc_reduction_fini (gcall *call)
4669 gimple_stmt_iterator gsi = gsi_for_stmt (call);
4670 tree lhs = gimple_call_lhs (call);
4671 tree ref_to_res = gimple_call_arg (call, 1);
4672 tree var = gimple_call_arg (call, 2);
4673 int level = TREE_INT_CST_LOW (gimple_call_arg (call, 3));
4674 enum tree_code op
4675 = (enum tree_code)TREE_INT_CST_LOW (gimple_call_arg (call, 4));
4676 gimple_seq seq = NULL;
4677 tree r = NULL_TREE;;
4679 push_gimplify_context (true);
4681 if (level == GOMP_DIM_VECTOR)
4683 /* Emit binary shuffle tree. TODO. Emit this as an actual loop,
4684 but that requires a method of emitting a unified jump at the
4685 gimple level. */
4686 for (int shfl = PTX_VECTOR_LENGTH / 2; shfl > 0; shfl = shfl >> 1)
4688 tree other_var = make_ssa_name (TREE_TYPE (var));
4689 nvptx_generate_vector_shuffle (gimple_location (call),
4690 other_var, var, shfl, &seq);
4692 r = make_ssa_name (TREE_TYPE (var));
4693 gimplify_assign (r, fold_build2 (op, TREE_TYPE (var),
4694 var, other_var), &seq);
4695 var = r;
4698 else
4700 tree accum = NULL_TREE;
4702 if (level == GOMP_DIM_WORKER)
4704 /* Get reduction buffer address. */
4705 tree offset = gimple_call_arg (call, 5);
4706 tree call = nvptx_get_worker_red_addr (TREE_TYPE (var), offset);
4707 tree ptr = make_ssa_name (TREE_TYPE (call));
4709 gimplify_assign (ptr, call, &seq);
4710 accum = ptr;
4712 else if (integer_zerop (ref_to_res))
4713 r = var;
4714 else
4715 accum = ref_to_res;
4717 if (accum)
4719 /* UPDATE the accumulator. */
4720 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
4721 seq = NULL;
4722 r = nvptx_reduction_update (gimple_location (call), &gsi,
4723 accum, var, op);
4727 if (lhs)
4728 gimplify_assign (lhs, r, &seq);
4729 pop_gimplify_context (NULL);
4731 gsi_replace_with_seq (&gsi, seq, true);
4734 /* NVPTX implementation of GOACC_REDUCTION_TEARDOWN. */
4736 static void
4737 nvptx_goacc_reduction_teardown (gcall *call)
4739 gimple_stmt_iterator gsi = gsi_for_stmt (call);
4740 tree lhs = gimple_call_lhs (call);
4741 tree var = gimple_call_arg (call, 2);
4742 int level = TREE_INT_CST_LOW (gimple_call_arg (call, 3));
4743 gimple_seq seq = NULL;
4745 push_gimplify_context (true);
4746 if (level == GOMP_DIM_WORKER)
4748 /* Read the worker reduction buffer. */
4749 tree offset = gimple_call_arg (call, 5);
4750 tree call = nvptx_get_worker_red_addr(TREE_TYPE (var), offset);
4751 tree ptr = make_ssa_name (TREE_TYPE (call));
4753 gimplify_assign (ptr, call, &seq);
4754 var = build_simple_mem_ref (ptr);
4755 TREE_THIS_VOLATILE (var) = 1;
4758 if (level != GOMP_DIM_GANG)
4760 /* Write to the receiver object. */
4761 tree ref_to_res = gimple_call_arg (call, 1);
4763 if (!integer_zerop (ref_to_res))
4764 gimplify_assign (build_simple_mem_ref (ref_to_res), var, &seq);
4767 if (lhs)
4768 gimplify_assign (lhs, var, &seq);
4770 pop_gimplify_context (NULL);
4772 gsi_replace_with_seq (&gsi, seq, true);
4775 /* NVPTX reduction expander. */
4777 void
4778 nvptx_goacc_reduction (gcall *call)
4780 unsigned code = (unsigned)TREE_INT_CST_LOW (gimple_call_arg (call, 0));
4782 switch (code)
4784 case IFN_GOACC_REDUCTION_SETUP:
4785 nvptx_goacc_reduction_setup (call);
4786 break;
4788 case IFN_GOACC_REDUCTION_INIT:
4789 nvptx_goacc_reduction_init (call);
4790 break;
4792 case IFN_GOACC_REDUCTION_FINI:
4793 nvptx_goacc_reduction_fini (call);
4794 break;
4796 case IFN_GOACC_REDUCTION_TEARDOWN:
4797 nvptx_goacc_reduction_teardown (call);
4798 break;
4800 default:
4801 gcc_unreachable ();
4805 #undef TARGET_OPTION_OVERRIDE
4806 #define TARGET_OPTION_OVERRIDE nvptx_option_override
4808 #undef TARGET_ATTRIBUTE_TABLE
4809 #define TARGET_ATTRIBUTE_TABLE nvptx_attribute_table
4811 #undef TARGET_LEGITIMATE_ADDRESS_P
4812 #define TARGET_LEGITIMATE_ADDRESS_P nvptx_legitimate_address_p
4814 #undef TARGET_PROMOTE_FUNCTION_MODE
4815 #define TARGET_PROMOTE_FUNCTION_MODE nvptx_promote_function_mode
4817 #undef TARGET_FUNCTION_ARG
4818 #define TARGET_FUNCTION_ARG nvptx_function_arg
4819 #undef TARGET_FUNCTION_INCOMING_ARG
4820 #define TARGET_FUNCTION_INCOMING_ARG nvptx_function_incoming_arg
4821 #undef TARGET_FUNCTION_ARG_ADVANCE
4822 #define TARGET_FUNCTION_ARG_ADVANCE nvptx_function_arg_advance
4823 #undef TARGET_FUNCTION_ARG_BOUNDARY
4824 #define TARGET_FUNCTION_ARG_BOUNDARY nvptx_function_arg_boundary
4825 #undef TARGET_FUNCTION_ARG_ROUND_BOUNDARY
4826 #define TARGET_FUNCTION_ARG_ROUND_BOUNDARY nvptx_function_arg_boundary
4827 #undef TARGET_PASS_BY_REFERENCE
4828 #define TARGET_PASS_BY_REFERENCE nvptx_pass_by_reference
4829 #undef TARGET_FUNCTION_VALUE_REGNO_P
4830 #define TARGET_FUNCTION_VALUE_REGNO_P nvptx_function_value_regno_p
4831 #undef TARGET_FUNCTION_VALUE
4832 #define TARGET_FUNCTION_VALUE nvptx_function_value
4833 #undef TARGET_LIBCALL_VALUE
4834 #define TARGET_LIBCALL_VALUE nvptx_libcall_value
4835 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
4836 #define TARGET_FUNCTION_OK_FOR_SIBCALL nvptx_function_ok_for_sibcall
4837 #undef TARGET_GET_DRAP_RTX
4838 #define TARGET_GET_DRAP_RTX nvptx_get_drap_rtx
4839 #undef TARGET_SPLIT_COMPLEX_ARG
4840 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
4841 #undef TARGET_RETURN_IN_MEMORY
4842 #define TARGET_RETURN_IN_MEMORY nvptx_return_in_memory
4843 #undef TARGET_OMIT_STRUCT_RETURN_REG
4844 #define TARGET_OMIT_STRUCT_RETURN_REG true
4845 #undef TARGET_STRICT_ARGUMENT_NAMING
4846 #define TARGET_STRICT_ARGUMENT_NAMING nvptx_strict_argument_naming
4847 #undef TARGET_STATIC_CHAIN
4848 #define TARGET_STATIC_CHAIN nvptx_static_chain
4850 #undef TARGET_CALL_ARGS
4851 #define TARGET_CALL_ARGS nvptx_call_args
4852 #undef TARGET_END_CALL_ARGS
4853 #define TARGET_END_CALL_ARGS nvptx_end_call_args
4855 #undef TARGET_ASM_FILE_START
4856 #define TARGET_ASM_FILE_START nvptx_file_start
4857 #undef TARGET_ASM_FILE_END
4858 #define TARGET_ASM_FILE_END nvptx_file_end
4859 #undef TARGET_ASM_GLOBALIZE_LABEL
4860 #define TARGET_ASM_GLOBALIZE_LABEL nvptx_globalize_label
4861 #undef TARGET_ASM_ASSEMBLE_UNDEFINED_DECL
4862 #define TARGET_ASM_ASSEMBLE_UNDEFINED_DECL nvptx_assemble_undefined_decl
4863 #undef TARGET_PRINT_OPERAND
4864 #define TARGET_PRINT_OPERAND nvptx_print_operand
4865 #undef TARGET_PRINT_OPERAND_ADDRESS
4866 #define TARGET_PRINT_OPERAND_ADDRESS nvptx_print_operand_address
4867 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
4868 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P nvptx_print_operand_punct_valid_p
4869 #undef TARGET_ASM_INTEGER
4870 #define TARGET_ASM_INTEGER nvptx_assemble_integer
4871 #undef TARGET_ASM_DECL_END
4872 #define TARGET_ASM_DECL_END nvptx_assemble_decl_end
4873 #undef TARGET_ASM_DECLARE_CONSTANT_NAME
4874 #define TARGET_ASM_DECLARE_CONSTANT_NAME nvptx_asm_declare_constant_name
4875 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
4876 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
4877 #undef TARGET_ASM_NEED_VAR_DECL_BEFORE_USE
4878 #define TARGET_ASM_NEED_VAR_DECL_BEFORE_USE true
4880 #undef TARGET_MACHINE_DEPENDENT_REORG
4881 #define TARGET_MACHINE_DEPENDENT_REORG nvptx_reorg
4882 #undef TARGET_NO_REGISTER_ALLOCATION
4883 #define TARGET_NO_REGISTER_ALLOCATION true
4885 #undef TARGET_ENCODE_SECTION_INFO
4886 #define TARGET_ENCODE_SECTION_INFO nvptx_encode_section_info
4887 #undef TARGET_RECORD_OFFLOAD_SYMBOL
4888 #define TARGET_RECORD_OFFLOAD_SYMBOL nvptx_record_offload_symbol
4890 #undef TARGET_VECTOR_ALIGNMENT
4891 #define TARGET_VECTOR_ALIGNMENT nvptx_vector_alignment
4893 #undef TARGET_CANNOT_COPY_INSN_P
4894 #define TARGET_CANNOT_COPY_INSN_P nvptx_cannot_copy_insn_p
4896 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
4897 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P nvptx_use_anchors_for_symbol_p
4899 #undef TARGET_INIT_BUILTINS
4900 #define TARGET_INIT_BUILTINS nvptx_init_builtins
4901 #undef TARGET_EXPAND_BUILTIN
4902 #define TARGET_EXPAND_BUILTIN nvptx_expand_builtin
4903 #undef TARGET_BUILTIN_DECL
4904 #define TARGET_BUILTIN_DECL nvptx_builtin_decl
4906 #undef TARGET_GOACC_VALIDATE_DIMS
4907 #define TARGET_GOACC_VALIDATE_DIMS nvptx_goacc_validate_dims
4909 #undef TARGET_GOACC_DIM_LIMIT
4910 #define TARGET_GOACC_DIM_LIMIT nvptx_dim_limit
4912 #undef TARGET_GOACC_FORK_JOIN
4913 #define TARGET_GOACC_FORK_JOIN nvptx_goacc_fork_join
4915 #undef TARGET_GOACC_REDUCTION
4916 #define TARGET_GOACC_REDUCTION nvptx_goacc_reduction
4918 struct gcc_target targetm = TARGET_INITIALIZER;
4920 #include "gt-nvptx.h"