c++: Allow exporting a typedef redeclaration [PR102341]
[official-gcc.git] / gcc / fold-mem-offsets.cc
blob6263fc7bd1580155bc1900bd78c4ae09fd915969
1 /* Late RTL pass to fold memory offsets.
2 Copyright (C) 2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "expr.h"
27 #include "backend.h"
28 #include "regs.h"
29 #include "target.h"
30 #include "memmodel.h"
31 #include "emit-rtl.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "predict.h"
35 #include "df.h"
36 #include "tree-pass.h"
37 #include "cfgrtl.h"
39 /* This pass tries to optimize memory offset calculations by moving constants
40 from add instructions to the memory instructions (loads / stores).
41 For example it can transform code like this:
43 add t4, sp, 16
44 add t2, a6, t4
45 shl t3, t2, 1
46 ld a2, 0(t3)
47 add a2, 1
48 sd a2, 8(t2)
50 into the following (one instruction less):
52 add t2, a6, sp
53 shl t3, t2, 1
54 ld a2, 32(t3)
55 add a2, 1
56 sd a2, 24(t2)
58 Although the previous passes try to emit efficient offset calculations
59 this pass is still beneficial because:
61 - The mechanisms that optimize memory offsets usually work with specific
62 patterns or have limitations. This pass is designed to fold offsets
63 through complex calculations that affect multiple memory operations
64 and have partially overlapping calculations.
66 - There are cases where add instructions are introduced in late rtl passes
67 and the rest of the pipeline cannot eliminate them. Arrays and structs
68 allocated on the stack can result in unwanted add instructions that
69 cannot be eliminated easily.
71 This pass works on a basic block level and consists of 4 phases:
73 - Phase 1 (Analysis): Find "foldable" instructions.
74 Foldable instructions are those that we know how to propagate
75 a constant addition through (add, shift, move, ...) and only have other
76 foldable instructions for uses. In that phase a DFS traversal on the
77 definition tree is performed and foldable instructions are marked on
78 a bitmap. The add immediate instructions that are reachable in this
79 DFS are candidates for folding since all the intermediate calculations
80 affected by them are also foldable.
82 - Phase 2 (Validity): Traverse and calculate the offsets that would result
83 from folding the add immediate instructions. Check whether the
84 calculated offsets result in a valid instruction for the target.
86 - Phase 3 (Commit offsets): Traverse again. It is now known which folds
87 are valid so at this point change the offsets in the memory instructions.
89 - Phase 4 (Commit instruction deletions): Scan all instructions and delete
90 or simplify (reduce to move) all add immediate instructions that were
91 folded.
93 This pass should run before hard register propagation because it creates
94 register moves that we expect to be eliminated. */
96 namespace {
98 const pass_data pass_data_fold_mem =
100 RTL_PASS, /* type */
101 "fold_mem_offsets", /* name */
102 OPTGROUP_NONE, /* optinfo_flags */
103 TV_NONE, /* tv_id */
104 0, /* properties_required */
105 0, /* properties_provided */
106 0, /* properties_destroyed */
107 0, /* todo_flags_start */
108 TODO_df_finish, /* todo_flags_finish */
111 class pass_fold_mem_offsets : public rtl_opt_pass
113 public:
114 pass_fold_mem_offsets (gcc::context *ctxt)
115 : rtl_opt_pass (pass_data_fold_mem, ctxt)
118 /* opt_pass methods: */
119 virtual bool gate (function *)
121 return flag_fold_mem_offsets && optimize >= 2;
124 virtual unsigned int execute (function *);
125 }; // class pass_fold_mem_offsets
127 /* Class that holds in FOLD_INSNS the instructions that if folded the offset
128 of a memory instruction would increase by ADDED_OFFSET. */
129 class fold_mem_info {
130 public:
131 auto_bitmap fold_insns;
132 HOST_WIDE_INT added_offset;
135 typedef hash_map<rtx_insn *, fold_mem_info *> fold_info_map;
137 /* Tracks which instructions can be reached through instructions that can
138 propagate offsets for folding. */
139 static bitmap_head can_fold_insns;
141 /* Marks instructions that are currently eligible for folding. */
142 static bitmap_head candidate_fold_insns;
144 /* Tracks instructions that cannot be folded because it turned out that
145 folding will result in creating an invalid memory instruction.
146 An instruction can be in both CANDIDATE_FOLD_INSNS and CANNOT_FOLD_INSNS
147 at the same time, in which case it is not legal to fold. */
148 static bitmap_head cannot_fold_insns;
150 /* The number of instructions that were simplified or eliminated. */
151 static int stats_fold_count;
153 /* Get the single reaching definition of an instruction inside a BB.
154 The definition is desired for REG used in INSN.
155 Return the definition insn or NULL if there's no definition with
156 the desired criteria. */
157 static rtx_insn*
158 get_single_def_in_bb (rtx_insn *insn, rtx reg)
160 df_ref use;
161 struct df_link *ref_chain, *ref_link;
163 FOR_EACH_INSN_USE (use, insn)
165 if (GET_CODE (DF_REF_REG (use)) == SUBREG)
166 return NULL;
167 if (REGNO (DF_REF_REG (use)) == REGNO (reg))
168 break;
171 if (!use)
172 return NULL;
174 ref_chain = DF_REF_CHAIN (use);
176 if (!ref_chain)
177 return NULL;
179 for (ref_link = ref_chain; ref_link; ref_link = ref_link->next)
181 /* Problem getting some definition for this instruction. */
182 if (ref_link->ref == NULL)
183 return NULL;
184 if (DF_REF_INSN_INFO (ref_link->ref) == NULL)
185 return NULL;
186 if (global_regs[REGNO (reg)]
187 && !set_of (reg, DF_REF_INSN (ref_link->ref)))
188 return NULL;
191 if (ref_chain->next)
192 return NULL;
194 rtx_insn *def = DF_REF_INSN (ref_chain->ref);
196 if (BLOCK_FOR_INSN (def) != BLOCK_FOR_INSN (insn))
197 return NULL;
199 if (DF_INSN_LUID (def) > DF_INSN_LUID (insn))
200 return NULL;
202 return def;
205 /* Get all uses of REG which is set in INSN. Return the use list or NULL if a
206 use is missing / irregular. If SUCCESS is not NULL then set it to false if
207 there are missing / irregular uses and true otherwise. */
208 static struct df_link*
209 get_uses (rtx_insn *insn, rtx reg, bool *success)
211 df_ref def;
212 struct df_link *ref_chain, *ref_link;
214 if (success)
215 *success = false;
217 FOR_EACH_INSN_DEF (def, insn)
218 if (REGNO (DF_REF_REG (def)) == REGNO (reg))
219 break;
221 if (!def)
222 return NULL;
224 ref_chain = DF_REF_CHAIN (def);
226 for (ref_link = ref_chain; ref_link; ref_link = ref_link->next)
228 /* Problem getting a use for this instruction. */
229 if (ref_link->ref == NULL)
230 return NULL;
231 if (DF_REF_CLASS (ref_link->ref) != DF_REF_REGULAR)
232 return NULL;
233 /* We do not handle REG_EQUIV/REG_EQ notes for now. */
234 if (DF_REF_FLAGS (ref_link->ref) & DF_REF_IN_NOTE)
235 return NULL;
238 if (success)
239 *success = true;
241 return ref_chain;
244 static HOST_WIDE_INT
245 fold_offsets (rtx_insn *insn, rtx reg, bool analyze, bitmap foldable_insns);
247 /* Helper function for fold_offsets.
249 If DO_RECURSION is false and ANALYZE is true this function returns true iff
250 it understands the structure of INSN and knows how to propagate constants
251 through it. In this case OFFSET_OUT and FOLDABLE_INSNS are unused.
253 If DO_RECURSION is true then it also calls fold_offsets for each recognized
254 part of INSN with the appropriate arguments.
256 If DO_RECURSION is true and ANALYZE is false then offset that would result
257 from folding is computed and is returned through the pointer OFFSET_OUT.
258 The instructions that can be folded are recorded in FOLDABLE_INSNS.
260 static bool
261 fold_offsets_1 (rtx_insn *insn, bool analyze, bool do_recursion,
262 HOST_WIDE_INT *offset_out, bitmap foldable_insns)
264 /* Doesn't make sense if both DO_RECURSION and ANALYZE are false. */
265 gcc_checking_assert (do_recursion || analyze);
266 gcc_checking_assert (GET_CODE (PATTERN (insn)) == SET);
268 rtx src = SET_SRC (PATTERN (insn));
269 HOST_WIDE_INT offset = 0;
271 switch (GET_CODE (src))
273 case PLUS:
275 /* Propagate through add. */
276 rtx arg1 = XEXP (src, 0);
277 rtx arg2 = XEXP (src, 1);
279 if (REG_P (arg1))
281 if (do_recursion)
282 offset += fold_offsets (insn, arg1, analyze, foldable_insns);
284 else if (GET_CODE (arg1) == ASHIFT
285 && REG_P (XEXP (arg1, 0))
286 && CONST_INT_P (XEXP (arg1, 1)))
288 /* Handle R1 = (R2 << C) + ... */
289 if (do_recursion)
291 HOST_WIDE_INT scale
292 = (HOST_WIDE_INT_1U << INTVAL (XEXP (arg1, 1)));
293 offset += scale * fold_offsets (insn, XEXP (arg1, 0), analyze,
294 foldable_insns);
297 else if (GET_CODE (arg1) == PLUS
298 && REG_P (XEXP (arg1, 0))
299 && REG_P (XEXP (arg1, 1)))
301 /* Handle R1 = (R2 + R3) + ... */
302 if (do_recursion)
304 offset += fold_offsets (insn, XEXP (arg1, 0), analyze,
305 foldable_insns);
306 offset += fold_offsets (insn, XEXP (arg1, 1), analyze,
307 foldable_insns);
310 else if (GET_CODE (arg1) == PLUS
311 && GET_CODE (XEXP (arg1, 0)) == ASHIFT
312 && REG_P (XEXP (XEXP (arg1, 0), 0))
313 && CONST_INT_P (XEXP (XEXP (arg1, 0), 1))
314 && REG_P (XEXP (arg1, 1)))
316 /* Handle R1 = ((R2 << C) + R3) + ... */
317 if (do_recursion)
319 HOST_WIDE_INT scale
320 = (HOST_WIDE_INT_1U << INTVAL (XEXP (XEXP (arg1, 0), 1)));
321 offset += scale * fold_offsets (insn, XEXP (XEXP (arg1, 0), 0),
322 analyze, foldable_insns);
323 offset += fold_offsets (insn, XEXP (arg1, 1), analyze,
324 foldable_insns);
327 else
328 return false;
330 if (REG_P (arg2))
332 if (do_recursion)
333 offset += fold_offsets (insn, arg2, analyze, foldable_insns);
335 else if (CONST_INT_P (arg2))
337 if (REG_P (arg1))
339 offset += INTVAL (arg2);
340 /* This is a R1 = R2 + C instruction, candidate for folding. */
341 if (!analyze)
342 bitmap_set_bit (foldable_insns, INSN_UID (insn));
345 else
346 return false;
348 /* Pattern recognized for folding. */
349 break;
351 case MINUS:
353 /* Propagate through minus. */
354 rtx arg1 = XEXP (src, 0);
355 rtx arg2 = XEXP (src, 1);
357 if (REG_P (arg1))
359 if (do_recursion)
360 offset += fold_offsets (insn, arg1, analyze, foldable_insns);
362 else
363 return false;
365 if (REG_P (arg2))
367 if (do_recursion)
368 offset -= fold_offsets (insn, arg2, analyze, foldable_insns);
370 else if (CONST_INT_P (arg2))
372 if (REG_P (arg1))
374 offset -= INTVAL (arg2);
375 /* This is a R1 = R2 - C instruction, candidate for folding. */
376 if (!analyze)
377 bitmap_set_bit (foldable_insns, INSN_UID (insn));
380 else
381 return false;
383 /* Pattern recognized for folding. */
384 break;
386 case NEG:
388 /* Propagate through negation. */
389 rtx arg1 = XEXP (src, 0);
390 if (REG_P (arg1))
392 if (do_recursion)
393 offset = -fold_offsets (insn, arg1, analyze, foldable_insns);
395 else
396 return false;
398 /* Pattern recognized for folding. */
399 break;
401 case MULT:
403 /* Propagate through multiply by constant. */
404 rtx arg1 = XEXP (src, 0);
405 rtx arg2 = XEXP (src, 1);
407 if (REG_P (arg1) && CONST_INT_P (arg2))
409 if (do_recursion)
411 HOST_WIDE_INT scale = INTVAL (arg2);
412 offset = scale * fold_offsets (insn, arg1, analyze,
413 foldable_insns);
416 else
417 return false;
419 /* Pattern recognized for folding. */
420 break;
422 case ASHIFT:
424 /* Propagate through shift left by constant. */
425 rtx arg1 = XEXP (src, 0);
426 rtx arg2 = XEXP (src, 1);
428 if (REG_P (arg1) && CONST_INT_P (arg2))
430 if (do_recursion)
432 HOST_WIDE_INT scale = (HOST_WIDE_INT_1U << INTVAL (arg2));
433 offset = scale * fold_offsets (insn, arg1, analyze,
434 foldable_insns);
437 else
438 return false;
440 /* Pattern recognized for folding. */
441 break;
443 case REG:
445 /* Propagate through register move. */
446 if (do_recursion)
447 offset = fold_offsets (insn, src, analyze, foldable_insns);
449 /* Pattern recognized for folding. */
450 break;
452 case CONST_INT:
454 offset = INTVAL (src);
455 /* R1 = C is candidate for folding. */
456 if (!analyze)
457 bitmap_set_bit (foldable_insns, INSN_UID (insn));
459 /* Pattern recognized for folding. */
460 break;
462 default:
463 /* Cannot recognize. */
464 return false;
467 if (do_recursion && !analyze)
468 *offset_out = offset;
470 return true;
473 /* Function that computes the offset that would have to be added to all uses
474 of REG if the instructions marked in FOLDABLE_INSNS were to be eliminated.
476 If ANALYZE is true then mark in CAN_FOLD_INSNS which instructions
477 transitively only affect other instructions found in CAN_FOLD_INSNS.
478 If ANALYZE is false then compute the offset required for folding. */
479 static HOST_WIDE_INT
480 fold_offsets (rtx_insn *insn, rtx reg, bool analyze, bitmap foldable_insns)
482 rtx_insn *def = get_single_def_in_bb (insn, reg);
484 if (!def || GET_CODE (PATTERN (def)) != SET)
485 return 0;
487 rtx dest = SET_DEST (PATTERN (def));
489 if (!REG_P (dest))
490 return 0;
492 /* We can only affect the values of GPR registers. */
493 unsigned int dest_regno = REGNO (dest);
494 if (fixed_regs[dest_regno]
495 || !TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], dest_regno))
496 return 0;
498 if (analyze)
500 /* Check if we know how to handle DEF. */
501 if (!fold_offsets_1 (def, true, false, NULL, NULL))
502 return 0;
504 /* We only fold through instructions that are transitively used as
505 memory addresses and do not have other uses. Use the same logic
506 from offset calculation to visit instructions that can propagate
507 offsets and keep track of them in CAN_FOLD_INSNS. */
508 bool success;
509 struct df_link *uses = get_uses (def, dest, &success), *ref_link;
511 if (!success)
512 return 0;
514 for (ref_link = uses; ref_link; ref_link = ref_link->next)
516 rtx_insn *use = DF_REF_INSN (ref_link->ref);
518 if (DEBUG_INSN_P (use))
519 continue;
521 /* Punt if the use is anything more complicated than a set
522 (clobber, use, etc). */
523 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != SET)
524 return 0;
526 /* This use affects instructions outside of CAN_FOLD_INSNS. */
527 if (!bitmap_bit_p (&can_fold_insns, INSN_UID (use)))
528 return 0;
530 rtx use_set = PATTERN (use);
532 /* Special case: A foldable memory store is not foldable if it
533 mentions DEST outside of the address calculation. */
534 if (use_set && MEM_P (SET_DEST (use_set))
535 && reg_mentioned_p (dest, SET_SRC (use_set)))
536 return 0;
539 bitmap_set_bit (&can_fold_insns, INSN_UID (def));
541 if (dump_file && (dump_flags & TDF_DETAILS))
543 fprintf (dump_file, "Instruction marked for propagation: ");
544 print_rtl_single (dump_file, def);
547 else
549 /* We cannot propagate through this instruction. */
550 if (!bitmap_bit_p (&can_fold_insns, INSN_UID (def)))
551 return 0;
554 HOST_WIDE_INT offset = 0;
555 bool recognized = fold_offsets_1 (def, analyze, true, &offset,
556 foldable_insns);
558 if (!recognized)
559 return 0;
561 return offset;
564 /* Test if INSN is a memory load / store that can have an offset folded to it.
565 Return true iff INSN is such an instruction and return through MEM_OUT,
566 REG_OUT and OFFSET_OUT the RTX that has a MEM code, the register that is
567 used as a base address and the offset accordingly.
568 All of the out pointers may be NULL in which case they will be ignored. */
569 bool
570 get_fold_mem_root (rtx_insn *insn, rtx *mem_out, rtx *reg_out,
571 HOST_WIDE_INT *offset_out)
573 rtx set = single_set (insn);
574 rtx mem = NULL_RTX;
576 if (set != NULL_RTX)
578 rtx src = SET_SRC (set);
579 rtx dest = SET_DEST (set);
581 /* Don't fold when we have unspec / volatile. */
582 if (GET_CODE (src) == UNSPEC
583 || GET_CODE (src) == UNSPEC_VOLATILE
584 || GET_CODE (dest) == UNSPEC
585 || GET_CODE (dest) == UNSPEC_VOLATILE)
586 return false;
588 if (MEM_P (src))
589 mem = src;
590 else if (MEM_P (dest))
591 mem = dest;
592 else if ((GET_CODE (src) == SIGN_EXTEND
593 || GET_CODE (src) == ZERO_EXTEND)
594 && MEM_P (XEXP (src, 0)))
595 mem = XEXP (src, 0);
598 if (mem == NULL_RTX)
599 return false;
601 rtx mem_addr = XEXP (mem, 0);
602 rtx reg;
603 HOST_WIDE_INT offset;
605 if (REG_P (mem_addr))
607 reg = mem_addr;
608 offset = 0;
610 else if (GET_CODE (mem_addr) == PLUS
611 && REG_P (XEXP (mem_addr, 0))
612 && CONST_INT_P (XEXP (mem_addr, 1)))
614 reg = XEXP (mem_addr, 0);
615 offset = INTVAL (XEXP (mem_addr, 1));
617 else
618 return false;
620 if (mem_out)
621 *mem_out = mem;
622 if (reg_out)
623 *reg_out = reg;
624 if (offset_out)
625 *offset_out = offset;
627 return true;
630 /* If INSN is a root memory instruction then do a DFS traversal on its
631 definitions and find folding candidates. */
632 static void
633 do_analysis (rtx_insn *insn)
635 rtx reg;
636 if (!get_fold_mem_root (insn, NULL, &reg, NULL))
637 return;
639 if (dump_file && (dump_flags & TDF_DETAILS))
641 fprintf (dump_file, "Starting analysis from root: ");
642 print_rtl_single (dump_file, insn);
645 /* Analyse folding opportunities for this memory instruction. */
646 bitmap_set_bit (&can_fold_insns, INSN_UID (insn));
647 fold_offsets (insn, reg, true, NULL);
650 static void
651 do_fold_info_calculation (rtx_insn *insn, fold_info_map *fold_info)
653 rtx mem, reg;
654 HOST_WIDE_INT cur_offset;
655 if (!get_fold_mem_root (insn, &mem, &reg, &cur_offset))
656 return;
658 fold_mem_info *info = new fold_mem_info;
659 info->added_offset = fold_offsets (insn, reg, false, info->fold_insns);
661 fold_info->put (insn, info);
664 /* If INSN is a root memory instruction then compute a potentially new offset
665 for it and test if the resulting instruction is valid. */
666 static void
667 do_check_validity (rtx_insn *insn, fold_mem_info *info)
669 rtx mem, reg;
670 HOST_WIDE_INT cur_offset;
671 if (!get_fold_mem_root (insn, &mem, &reg, &cur_offset))
672 return;
674 HOST_WIDE_INT new_offset = cur_offset + info->added_offset;
676 /* Test if it is valid to change MEM's address offset to NEW_OFFSET. */
677 int icode = INSN_CODE (insn);
678 INSN_CODE (insn) = -1;
679 rtx mem_addr = XEXP (mem, 0);
680 machine_mode mode = GET_MODE (mem_addr);
681 if (new_offset != 0)
682 XEXP (mem, 0) = gen_rtx_PLUS (mode, reg, gen_int_mode (new_offset, mode));
683 else
684 XEXP (mem, 0) = reg;
686 bool illegal = insn_invalid_p (insn, false)
687 || !memory_address_addr_space_p (mode, XEXP (mem, 0),
688 MEM_ADDR_SPACE (mem));
690 /* Restore the instruction. */
691 XEXP (mem, 0) = mem_addr;
692 INSN_CODE (insn) = icode;
694 if (illegal)
695 bitmap_ior_into (&cannot_fold_insns, info->fold_insns);
696 else
697 bitmap_ior_into (&candidate_fold_insns, info->fold_insns);
700 static bool
701 compute_validity_closure (fold_info_map *fold_info)
703 /* Let's say we have an arbitrary chain of foldable instructions xN = xN + C
704 and memory operations rN that use xN as shown below. If folding x1 in r1
705 turns out to be invalid for whatever reason then it's also invalid to fold
706 any of the other xN into any rN. That means that we need the transitive
707 closure of validity to determine whether we can fold a xN instruction.
709 +--------------+ +-------------------+ +-------------------+
710 | r1 = mem[x1] | | r2 = mem[x1 + x2] | | r3 = mem[x2 + x3] | ...
711 +--------------+ +-------------------+ +-------------------+
712 ^ ^ ^ ^ ^
713 | / | / | ...
714 | / | / |
715 +-------------+ / +-------------+ / +-------------+
716 | x1 = x1 + 1 |-----+ | x2 = x2 + 1 |-----+ | x3 = x3 + 1 |--- ...
717 +-------------+ +-------------+ +-------------+
718 ^ ^ ^
719 | | |
720 ... ... ...
723 /* In general three iterations should be enough for most cases, but allow up
724 to five when -fexpensive-optimizations is used. */
725 int max_iters = 3 + 2 * flag_expensive_optimizations;
726 for (int pass = 0; pass < max_iters; pass++)
728 bool made_changes = false;
729 for (fold_info_map::iterator iter = fold_info->begin ();
730 iter != fold_info->end (); ++iter)
732 fold_mem_info *info = (*iter).second;
733 if (bitmap_intersect_p (&cannot_fold_insns, info->fold_insns))
734 made_changes |= bitmap_ior_into (&cannot_fold_insns,
735 info->fold_insns);
738 if (!made_changes)
739 return true;
742 return false;
745 /* If INSN is a root memory instruction that was affected by any folding
746 then update its offset as necessary. */
747 static void
748 do_commit_offset (rtx_insn *insn, fold_mem_info *info)
750 rtx mem, reg;
751 HOST_WIDE_INT cur_offset;
752 if (!get_fold_mem_root (insn, &mem, &reg, &cur_offset))
753 return;
755 HOST_WIDE_INT new_offset = cur_offset + info->added_offset;
757 if (new_offset == cur_offset)
758 return;
760 gcc_assert (!bitmap_empty_p (info->fold_insns));
762 if (bitmap_intersect_p (&cannot_fold_insns, info->fold_insns))
763 return;
765 if (dump_file)
767 fprintf (dump_file, "Memory offset changed from "
768 HOST_WIDE_INT_PRINT_DEC " to " HOST_WIDE_INT_PRINT_DEC
769 " for instruction:\n", cur_offset, new_offset);
770 print_rtl_single (dump_file, insn);
773 machine_mode mode = GET_MODE (XEXP (mem, 0));
774 if (new_offset != 0)
775 XEXP (mem, 0) = gen_rtx_PLUS (mode, reg, gen_int_mode (new_offset, mode));
776 else
777 XEXP (mem, 0) = reg;
778 INSN_CODE (insn) = recog (PATTERN (insn), insn, 0);
779 df_insn_rescan (insn);
782 /* If INSN is a move / add instruction that was folded then replace its
783 constant part with zero. */
784 static void
785 do_commit_insn (rtx_insn *insn)
787 if (bitmap_bit_p (&candidate_fold_insns, INSN_UID (insn))
788 && !bitmap_bit_p (&cannot_fold_insns, INSN_UID (insn)))
790 if (dump_file)
792 fprintf (dump_file, "Instruction folded:");
793 print_rtl_single (dump_file, insn);
796 stats_fold_count++;
798 rtx set = single_set (insn);
799 rtx dest = SET_DEST (set);
800 rtx src = SET_SRC (set);
802 /* Emit a move and let subsequent passes eliminate it if possible. */
803 if (GET_CODE (src) == CONST_INT)
805 /* INSN is R1 = C.
806 Replace it with R1 = 0 because C was folded. */
807 rtx mov_rtx
808 = gen_move_insn (dest, gen_int_mode (0, GET_MODE (dest)));
809 df_insn_rescan (emit_insn_after (mov_rtx, insn));
811 else
813 /* INSN is R1 = R2 + C.
814 Replace it with R1 = R2 because C was folded. */
815 rtx arg1 = XEXP (src, 0);
817 /* If the DEST == ARG1 then the move is a no-op. */
818 if (REGNO (dest) != REGNO (arg1))
820 gcc_checking_assert (GET_MODE (dest) == GET_MODE (arg1));
821 rtx mov_rtx = gen_move_insn (dest, arg1);
822 df_insn_rescan (emit_insn_after (mov_rtx, insn));
826 /* Delete the original move / add instruction. */
827 delete_insn (insn);
831 unsigned int
832 pass_fold_mem_offsets::execute (function *fn)
834 df_set_flags (DF_EQ_NOTES + DF_RD_PRUNE_DEAD_DEFS + DF_DEFER_INSN_RESCAN);
835 df_chain_add_problem (DF_UD_CHAIN + DF_DU_CHAIN);
836 df_analyze ();
838 bitmap_initialize (&can_fold_insns, NULL);
839 bitmap_initialize (&candidate_fold_insns, NULL);
840 bitmap_initialize (&cannot_fold_insns, NULL);
842 stats_fold_count = 0;
844 basic_block bb;
845 rtx_insn *insn;
846 FOR_ALL_BB_FN (bb, fn)
848 /* There is a conflict between this pass and RISCV's shorten-memrefs
849 pass. For now disable folding if optimizing for size because
850 otherwise this cancels the effects of shorten-memrefs. */
851 if (optimize_bb_for_size_p (bb))
852 continue;
854 fold_info_map fold_info;
856 bitmap_clear (&can_fold_insns);
857 bitmap_clear (&candidate_fold_insns);
858 bitmap_clear (&cannot_fold_insns);
860 FOR_BB_INSNS (bb, insn)
861 do_analysis (insn);
863 FOR_BB_INSNS (bb, insn)
864 do_fold_info_calculation (insn, &fold_info);
866 FOR_BB_INSNS (bb, insn)
867 if (fold_mem_info **info = fold_info.get (insn))
868 do_check_validity (insn, *info);
870 if (compute_validity_closure (&fold_info))
872 FOR_BB_INSNS (bb, insn)
873 if (fold_mem_info **info = fold_info.get (insn))
874 do_commit_offset (insn, *info);
876 FOR_BB_INSNS (bb, insn)
877 do_commit_insn (insn);
880 for (fold_info_map::iterator iter = fold_info.begin ();
881 iter != fold_info.end (); ++iter)
882 delete (*iter).second;
885 statistics_counter_event (cfun, "Number of folded instructions",
886 stats_fold_count);
888 bitmap_release (&can_fold_insns);
889 bitmap_release (&candidate_fold_insns);
890 bitmap_release (&cannot_fold_insns);
892 return 0;
895 } // anon namespace
897 rtl_opt_pass *
898 make_pass_fold_mem_offsets (gcc::context *ctxt)
900 return new pass_fold_mem_offsets (ctxt);