1 /* Late RTL pass to fold memory offsets.
2 Copyright (C) 2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
32 #include "insn-config.h"
36 #include "tree-pass.h"
39 /* This pass tries to optimize memory offset calculations by moving constants
40 from add instructions to the memory instructions (loads / stores).
41 For example it can transform code like this:
50 into the following (one instruction less):
58 Although the previous passes try to emit efficient offset calculations
59 this pass is still beneficial because:
61 - The mechanisms that optimize memory offsets usually work with specific
62 patterns or have limitations. This pass is designed to fold offsets
63 through complex calculations that affect multiple memory operations
64 and have partially overlapping calculations.
66 - There are cases where add instructions are introduced in late rtl passes
67 and the rest of the pipeline cannot eliminate them. Arrays and structs
68 allocated on the stack can result in unwanted add instructions that
69 cannot be eliminated easily.
71 This pass works on a basic block level and consists of 4 phases:
73 - Phase 1 (Analysis): Find "foldable" instructions.
74 Foldable instructions are those that we know how to propagate
75 a constant addition through (add, shift, move, ...) and only have other
76 foldable instructions for uses. In that phase a DFS traversal on the
77 definition tree is performed and foldable instructions are marked on
78 a bitmap. The add immediate instructions that are reachable in this
79 DFS are candidates for folding since all the intermediate calculations
80 affected by them are also foldable.
82 - Phase 2 (Validity): Traverse and calculate the offsets that would result
83 from folding the add immediate instructions. Check whether the
84 calculated offsets result in a valid instruction for the target.
86 - Phase 3 (Commit offsets): Traverse again. It is now known which folds
87 are valid so at this point change the offsets in the memory instructions.
89 - Phase 4 (Commit instruction deletions): Scan all instructions and delete
90 or simplify (reduce to move) all add immediate instructions that were
93 This pass should run before hard register propagation because it creates
94 register moves that we expect to be eliminated. */
98 const pass_data pass_data_fold_mem
=
101 "fold_mem_offsets", /* name */
102 OPTGROUP_NONE
, /* optinfo_flags */
104 0, /* properties_required */
105 0, /* properties_provided */
106 0, /* properties_destroyed */
107 0, /* todo_flags_start */
108 TODO_df_finish
, /* todo_flags_finish */
111 class pass_fold_mem_offsets
: public rtl_opt_pass
114 pass_fold_mem_offsets (gcc::context
*ctxt
)
115 : rtl_opt_pass (pass_data_fold_mem
, ctxt
)
118 /* opt_pass methods: */
119 virtual bool gate (function
*)
121 return flag_fold_mem_offsets
&& optimize
>= 2;
124 virtual unsigned int execute (function
*);
125 }; // class pass_fold_mem_offsets
127 /* Class that holds in FOLD_INSNS the instructions that if folded the offset
128 of a memory instruction would increase by ADDED_OFFSET. */
129 class fold_mem_info
{
131 auto_bitmap fold_insns
;
132 HOST_WIDE_INT added_offset
;
135 typedef hash_map
<rtx_insn
*, fold_mem_info
*> fold_info_map
;
137 /* Tracks which instructions can be reached through instructions that can
138 propagate offsets for folding. */
139 static bitmap_head can_fold_insns
;
141 /* Marks instructions that are currently eligible for folding. */
142 static bitmap_head candidate_fold_insns
;
144 /* Tracks instructions that cannot be folded because it turned out that
145 folding will result in creating an invalid memory instruction.
146 An instruction can be in both CANDIDATE_FOLD_INSNS and CANNOT_FOLD_INSNS
147 at the same time, in which case it is not legal to fold. */
148 static bitmap_head cannot_fold_insns
;
150 /* The number of instructions that were simplified or eliminated. */
151 static int stats_fold_count
;
153 /* Get the single reaching definition of an instruction inside a BB.
154 The definition is desired for REG used in INSN.
155 Return the definition insn or NULL if there's no definition with
156 the desired criteria. */
158 get_single_def_in_bb (rtx_insn
*insn
, rtx reg
)
161 struct df_link
*ref_chain
, *ref_link
;
163 FOR_EACH_INSN_USE (use
, insn
)
165 if (GET_CODE (DF_REF_REG (use
)) == SUBREG
)
167 if (REGNO (DF_REF_REG (use
)) == REGNO (reg
))
174 ref_chain
= DF_REF_CHAIN (use
);
179 for (ref_link
= ref_chain
; ref_link
; ref_link
= ref_link
->next
)
181 /* Problem getting some definition for this instruction. */
182 if (ref_link
->ref
== NULL
)
184 if (DF_REF_INSN_INFO (ref_link
->ref
) == NULL
)
186 if (global_regs
[REGNO (reg
)]
187 && !set_of (reg
, DF_REF_INSN (ref_link
->ref
)))
194 rtx_insn
*def
= DF_REF_INSN (ref_chain
->ref
);
196 if (BLOCK_FOR_INSN (def
) != BLOCK_FOR_INSN (insn
))
199 if (DF_INSN_LUID (def
) > DF_INSN_LUID (insn
))
205 /* Get all uses of REG which is set in INSN. Return the use list or NULL if a
206 use is missing / irregular. If SUCCESS is not NULL then set it to false if
207 there are missing / irregular uses and true otherwise. */
209 get_uses (rtx_insn
*insn
, rtx reg
, bool *success
)
216 FOR_EACH_INSN_DEF (def
, insn
)
217 if (REGNO (DF_REF_REG (def
)) == REGNO (reg
))
223 df_link
*ref_chain
= DF_REF_CHAIN (def
);
224 int insn_luid
= DF_INSN_LUID (insn
);
225 basic_block insn_bb
= BLOCK_FOR_INSN (insn
);
227 for (df_link
*ref_link
= ref_chain
; ref_link
; ref_link
= ref_link
->next
)
229 /* Problem getting a use for this instruction. */
230 if (ref_link
->ref
== NULL
)
232 if (DF_REF_CLASS (ref_link
->ref
) != DF_REF_REGULAR
)
235 rtx_insn
*use
= DF_REF_INSN (ref_link
->ref
);
236 if (DEBUG_INSN_P (use
))
239 /* We do not handle REG_EQUIV/REG_EQ notes for now. */
240 if (DF_REF_FLAGS (ref_link
->ref
) & DF_REF_IN_NOTE
)
242 if (BLOCK_FOR_INSN (use
) != insn_bb
)
244 /* Punt if use appears before def in the basic block. See PR111601. */
245 if (DF_INSN_LUID (use
) < insn_luid
)
256 fold_offsets (rtx_insn
*insn
, rtx reg
, bool analyze
, bitmap foldable_insns
);
258 /* Helper function for fold_offsets.
260 If DO_RECURSION is false and ANALYZE is true this function returns true iff
261 it understands the structure of INSN and knows how to propagate constants
262 through it. In this case OFFSET_OUT and FOLDABLE_INSNS are unused.
264 If DO_RECURSION is true then it also calls fold_offsets for each recognized
265 part of INSN with the appropriate arguments.
267 If DO_RECURSION is true and ANALYZE is false then offset that would result
268 from folding is computed and is returned through the pointer OFFSET_OUT.
269 The instructions that can be folded are recorded in FOLDABLE_INSNS. */
271 fold_offsets_1 (rtx_insn
*insn
, bool analyze
, bool do_recursion
,
272 HOST_WIDE_INT
*offset_out
, bitmap foldable_insns
)
274 /* Doesn't make sense if both DO_RECURSION and ANALYZE are false. */
275 gcc_checking_assert (do_recursion
|| analyze
);
276 gcc_checking_assert (GET_CODE (PATTERN (insn
)) == SET
);
278 rtx src
= SET_SRC (PATTERN (insn
));
279 HOST_WIDE_INT offset
= 0;
281 switch (GET_CODE (src
))
285 /* Propagate through add. */
286 rtx arg1
= XEXP (src
, 0);
287 rtx arg2
= XEXP (src
, 1);
292 offset
+= fold_offsets (insn
, arg1
, analyze
, foldable_insns
);
294 else if (GET_CODE (arg1
) == ASHIFT
295 && REG_P (XEXP (arg1
, 0))
296 && CONST_INT_P (XEXP (arg1
, 1)))
298 /* Handle R1 = (R2 << C) + ... */
302 = (HOST_WIDE_INT_1U
<< INTVAL (XEXP (arg1
, 1)));
303 offset
+= scale
* fold_offsets (insn
, XEXP (arg1
, 0), analyze
,
307 else if (GET_CODE (arg1
) == PLUS
308 && REG_P (XEXP (arg1
, 0))
309 && REG_P (XEXP (arg1
, 1)))
311 /* Handle R1 = (R2 + R3) + ... */
314 offset
+= fold_offsets (insn
, XEXP (arg1
, 0), analyze
,
316 offset
+= fold_offsets (insn
, XEXP (arg1
, 1), analyze
,
320 else if (GET_CODE (arg1
) == PLUS
321 && GET_CODE (XEXP (arg1
, 0)) == ASHIFT
322 && REG_P (XEXP (XEXP (arg1
, 0), 0))
323 && CONST_INT_P (XEXP (XEXP (arg1
, 0), 1))
324 && REG_P (XEXP (arg1
, 1)))
326 /* Handle R1 = ((R2 << C) + R3) + ... */
330 = (HOST_WIDE_INT_1U
<< INTVAL (XEXP (XEXP (arg1
, 0), 1)));
331 offset
+= scale
* fold_offsets (insn
, XEXP (XEXP (arg1
, 0), 0),
332 analyze
, foldable_insns
);
333 offset
+= fold_offsets (insn
, XEXP (arg1
, 1), analyze
,
343 offset
+= fold_offsets (insn
, arg2
, analyze
, foldable_insns
);
345 else if (CONST_INT_P (arg2
))
349 offset
+= INTVAL (arg2
);
350 /* This is a R1 = R2 + C instruction, candidate for folding. */
352 bitmap_set_bit (foldable_insns
, INSN_UID (insn
));
358 /* Pattern recognized for folding. */
363 /* Propagate through minus. */
364 rtx arg1
= XEXP (src
, 0);
365 rtx arg2
= XEXP (src
, 1);
370 offset
+= fold_offsets (insn
, arg1
, analyze
, foldable_insns
);
378 offset
-= fold_offsets (insn
, arg2
, analyze
, foldable_insns
);
380 else if (CONST_INT_P (arg2
))
384 offset
-= INTVAL (arg2
);
385 /* This is a R1 = R2 - C instruction, candidate for folding. */
387 bitmap_set_bit (foldable_insns
, INSN_UID (insn
));
393 /* Pattern recognized for folding. */
398 /* Propagate through negation. */
399 rtx arg1
= XEXP (src
, 0);
403 offset
= -fold_offsets (insn
, arg1
, analyze
, foldable_insns
);
408 /* Pattern recognized for folding. */
413 /* Propagate through multiply by constant. */
414 rtx arg1
= XEXP (src
, 0);
415 rtx arg2
= XEXP (src
, 1);
417 if (REG_P (arg1
) && CONST_INT_P (arg2
))
421 HOST_WIDE_INT scale
= INTVAL (arg2
);
422 offset
= scale
* fold_offsets (insn
, arg1
, analyze
,
429 /* Pattern recognized for folding. */
434 /* Propagate through shift left by constant. */
435 rtx arg1
= XEXP (src
, 0);
436 rtx arg2
= XEXP (src
, 1);
438 if (REG_P (arg1
) && CONST_INT_P (arg2
))
442 HOST_WIDE_INT scale
= (HOST_WIDE_INT_1U
<< INTVAL (arg2
));
443 offset
= scale
* fold_offsets (insn
, arg1
, analyze
,
450 /* Pattern recognized for folding. */
455 /* Propagate through register move. */
457 offset
= fold_offsets (insn
, src
, analyze
, foldable_insns
);
459 /* Pattern recognized for folding. */
464 offset
= INTVAL (src
);
465 /* R1 = C is candidate for folding. */
467 bitmap_set_bit (foldable_insns
, INSN_UID (insn
));
469 /* Pattern recognized for folding. */
473 /* Cannot recognize. */
477 if (do_recursion
&& !analyze
)
478 *offset_out
= offset
;
483 /* Function that computes the offset that would have to be added to all uses
484 of REG if the instructions marked in FOLDABLE_INSNS were to be eliminated.
486 If ANALYZE is true then mark in CAN_FOLD_INSNS which instructions
487 transitively only affect other instructions found in CAN_FOLD_INSNS.
488 If ANALYZE is false then compute the offset required for folding. */
490 fold_offsets (rtx_insn
*insn
, rtx reg
, bool analyze
, bitmap foldable_insns
)
492 rtx_insn
*def
= get_single_def_in_bb (insn
, reg
);
494 if (!def
|| GET_CODE (PATTERN (def
)) != SET
)
497 rtx dest
= SET_DEST (PATTERN (def
));
502 /* We can only affect the values of GPR registers. */
503 unsigned int dest_regno
= REGNO (dest
);
504 if (fixed_regs
[dest_regno
]
505 || !TEST_HARD_REG_BIT (reg_class_contents
[GENERAL_REGS
], dest_regno
))
510 /* Check if we know how to handle DEF. */
511 if (!fold_offsets_1 (def
, true, false, NULL
, NULL
))
514 /* We only fold through instructions that are transitively used as
515 memory addresses and do not have other uses. Use the same logic
516 from offset calculation to visit instructions that can propagate
517 offsets and keep track of them in CAN_FOLD_INSNS. */
519 struct df_link
*uses
= get_uses (def
, dest
, &success
), *ref_link
;
524 for (ref_link
= uses
; ref_link
; ref_link
= ref_link
->next
)
526 rtx_insn
*use
= DF_REF_INSN (ref_link
->ref
);
528 if (DEBUG_INSN_P (use
))
531 /* Punt if the use is anything more complicated than a set
532 (clobber, use, etc). */
533 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != SET
)
536 /* This use affects instructions outside of CAN_FOLD_INSNS. */
537 if (!bitmap_bit_p (&can_fold_insns
, INSN_UID (use
)))
540 rtx use_set
= PATTERN (use
);
542 /* Special case: A foldable memory store is not foldable if it
543 mentions DEST outside of the address calculation. */
544 if (use_set
&& MEM_P (SET_DEST (use_set
))
545 && reg_mentioned_p (dest
, SET_SRC (use_set
)))
549 bitmap_set_bit (&can_fold_insns
, INSN_UID (def
));
551 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
553 fprintf (dump_file
, "Instruction marked for propagation: ");
554 print_rtl_single (dump_file
, def
);
559 /* We cannot propagate through this instruction. */
560 if (!bitmap_bit_p (&can_fold_insns
, INSN_UID (def
)))
564 HOST_WIDE_INT offset
= 0;
565 bool recognized
= fold_offsets_1 (def
, analyze
, true, &offset
,
574 /* Test if INSN is a memory load / store that can have an offset folded to it.
575 Return true iff INSN is such an instruction and return through MEM_OUT,
576 REG_OUT and OFFSET_OUT the RTX that has a MEM code, the register that is
577 used as a base address and the offset accordingly.
578 All of the out pointers may be NULL in which case they will be ignored. */
580 get_fold_mem_root (rtx_insn
*insn
, rtx
*mem_out
, rtx
*reg_out
,
581 HOST_WIDE_INT
*offset_out
)
583 rtx set
= single_set (insn
);
588 rtx src
= SET_SRC (set
);
589 rtx dest
= SET_DEST (set
);
591 /* Don't fold when we have unspec / volatile. */
592 if (GET_CODE (src
) == UNSPEC
593 || GET_CODE (src
) == UNSPEC_VOLATILE
594 || GET_CODE (dest
) == UNSPEC
595 || GET_CODE (dest
) == UNSPEC_VOLATILE
)
600 else if (MEM_P (dest
))
602 else if ((GET_CODE (src
) == SIGN_EXTEND
603 || GET_CODE (src
) == ZERO_EXTEND
)
604 && MEM_P (XEXP (src
, 0)))
611 rtx mem_addr
= XEXP (mem
, 0);
613 HOST_WIDE_INT offset
;
615 if (REG_P (mem_addr
))
620 else if (GET_CODE (mem_addr
) == PLUS
621 && REG_P (XEXP (mem_addr
, 0))
622 && CONST_INT_P (XEXP (mem_addr
, 1)))
624 reg
= XEXP (mem_addr
, 0);
625 offset
= INTVAL (XEXP (mem_addr
, 1));
635 *offset_out
= offset
;
640 /* If INSN is a root memory instruction then do a DFS traversal on its
641 definitions and find folding candidates. */
643 do_analysis (rtx_insn
*insn
)
646 if (!get_fold_mem_root (insn
, NULL
, ®
, NULL
))
649 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
651 fprintf (dump_file
, "Starting analysis from root: ");
652 print_rtl_single (dump_file
, insn
);
655 /* Analyse folding opportunities for this memory instruction. */
656 bitmap_set_bit (&can_fold_insns
, INSN_UID (insn
));
657 fold_offsets (insn
, reg
, true, NULL
);
661 do_fold_info_calculation (rtx_insn
*insn
, fold_info_map
*fold_info
)
664 HOST_WIDE_INT cur_offset
;
665 if (!get_fold_mem_root (insn
, &mem
, ®
, &cur_offset
))
668 fold_mem_info
*info
= new fold_mem_info
;
669 info
->added_offset
= fold_offsets (insn
, reg
, false, info
->fold_insns
);
671 fold_info
->put (insn
, info
);
674 /* If INSN is a root memory instruction then compute a potentially new offset
675 for it and test if the resulting instruction is valid. */
677 do_check_validity (rtx_insn
*insn
, fold_mem_info
*info
)
680 HOST_WIDE_INT cur_offset
;
681 if (!get_fold_mem_root (insn
, &mem
, ®
, &cur_offset
))
684 HOST_WIDE_INT new_offset
= cur_offset
+ info
->added_offset
;
686 /* Test if it is valid to change MEM's address offset to NEW_OFFSET. */
687 int icode
= INSN_CODE (insn
);
688 INSN_CODE (insn
) = -1;
689 rtx mem_addr
= XEXP (mem
, 0);
690 machine_mode mode
= GET_MODE (mem_addr
);
692 XEXP (mem
, 0) = gen_rtx_PLUS (mode
, reg
, gen_int_mode (new_offset
, mode
));
696 bool illegal
= insn_invalid_p (insn
, false)
697 || !memory_address_addr_space_p (mode
, XEXP (mem
, 0),
698 MEM_ADDR_SPACE (mem
));
700 /* Restore the instruction. */
701 XEXP (mem
, 0) = mem_addr
;
702 INSN_CODE (insn
) = icode
;
705 bitmap_ior_into (&cannot_fold_insns
, info
->fold_insns
);
707 bitmap_ior_into (&candidate_fold_insns
, info
->fold_insns
);
711 compute_validity_closure (fold_info_map
*fold_info
)
713 /* Let's say we have an arbitrary chain of foldable instructions xN = xN + C
714 and memory operations rN that use xN as shown below. If folding x1 in r1
715 turns out to be invalid for whatever reason then it's also invalid to fold
716 any of the other xN into any rN. That means that we need the transitive
717 closure of validity to determine whether we can fold a xN instruction.
719 +--------------+ +-------------------+ +-------------------+
720 | r1 = mem[x1] | | r2 = mem[x1 + x2] | | r3 = mem[x2 + x3] | ...
721 +--------------+ +-------------------+ +-------------------+
725 +-------------+ / +-------------+ / +-------------+
726 | x1 = x1 + 1 |-----+ | x2 = x2 + 1 |-----+ | x3 = x3 + 1 |--- ...
727 +-------------+ +-------------+ +-------------+
733 /* In general three iterations should be enough for most cases, but allow up
734 to five when -fexpensive-optimizations is used. */
735 int max_iters
= 3 + 2 * flag_expensive_optimizations
;
736 for (int pass
= 0; pass
< max_iters
; pass
++)
738 bool made_changes
= false;
739 for (fold_info_map::iterator iter
= fold_info
->begin ();
740 iter
!= fold_info
->end (); ++iter
)
742 fold_mem_info
*info
= (*iter
).second
;
743 if (bitmap_intersect_p (&cannot_fold_insns
, info
->fold_insns
))
744 made_changes
|= bitmap_ior_into (&cannot_fold_insns
,
755 /* If INSN is a root memory instruction that was affected by any folding
756 then update its offset as necessary. */
758 do_commit_offset (rtx_insn
*insn
, fold_mem_info
*info
)
761 HOST_WIDE_INT cur_offset
;
762 if (!get_fold_mem_root (insn
, &mem
, ®
, &cur_offset
))
765 HOST_WIDE_INT new_offset
= cur_offset
+ info
->added_offset
;
767 if (new_offset
== cur_offset
)
770 gcc_assert (!bitmap_empty_p (info
->fold_insns
));
772 if (bitmap_intersect_p (&cannot_fold_insns
, info
->fold_insns
))
777 fprintf (dump_file
, "Memory offset changed from "
778 HOST_WIDE_INT_PRINT_DEC
" to " HOST_WIDE_INT_PRINT_DEC
779 " for instruction:\n", cur_offset
, new_offset
);
780 print_rtl_single (dump_file
, insn
);
783 machine_mode mode
= GET_MODE (XEXP (mem
, 0));
785 XEXP (mem
, 0) = gen_rtx_PLUS (mode
, reg
, gen_int_mode (new_offset
, mode
));
788 INSN_CODE (insn
) = recog (PATTERN (insn
), insn
, 0);
789 df_insn_rescan (insn
);
792 /* If INSN is a move / add instruction that was folded then replace its
793 constant part with zero. */
795 do_commit_insn (rtx_insn
*insn
)
797 if (bitmap_bit_p (&candidate_fold_insns
, INSN_UID (insn
))
798 && !bitmap_bit_p (&cannot_fold_insns
, INSN_UID (insn
)))
802 fprintf (dump_file
, "Instruction folded:");
803 print_rtl_single (dump_file
, insn
);
808 rtx set
= single_set (insn
);
809 rtx dest
= SET_DEST (set
);
810 rtx src
= SET_SRC (set
);
812 /* Emit a move and let subsequent passes eliminate it if possible. */
813 if (GET_CODE (src
) == CONST_INT
)
816 Replace it with R1 = 0 because C was folded. */
818 = gen_move_insn (dest
, gen_int_mode (0, GET_MODE (dest
)));
819 df_insn_rescan (emit_insn_after (mov_rtx
, insn
));
823 /* INSN is R1 = R2 + C.
824 Replace it with R1 = R2 because C was folded. */
825 rtx arg1
= XEXP (src
, 0);
827 /* If the DEST == ARG1 then the move is a no-op. */
828 if (REGNO (dest
) != REGNO (arg1
))
830 gcc_checking_assert (GET_MODE (dest
) == GET_MODE (arg1
));
831 rtx mov_rtx
= gen_move_insn (dest
, arg1
);
832 df_insn_rescan (emit_insn_after (mov_rtx
, insn
));
836 /* Delete the original move / add instruction. */
842 pass_fold_mem_offsets::execute (function
*fn
)
844 df_set_flags (DF_EQ_NOTES
+ DF_RD_PRUNE_DEAD_DEFS
+ DF_DEFER_INSN_RESCAN
);
845 df_chain_add_problem (DF_UD_CHAIN
+ DF_DU_CHAIN
);
848 bitmap_initialize (&can_fold_insns
, NULL
);
849 bitmap_initialize (&candidate_fold_insns
, NULL
);
850 bitmap_initialize (&cannot_fold_insns
, NULL
);
852 stats_fold_count
= 0;
856 FOR_ALL_BB_FN (bb
, fn
)
858 /* There is a conflict between this pass and RISCV's shorten-memrefs
859 pass. For now disable folding if optimizing for size because
860 otherwise this cancels the effects of shorten-memrefs. */
861 if (optimize_bb_for_size_p (bb
))
864 fold_info_map fold_info
;
866 bitmap_clear (&can_fold_insns
);
867 bitmap_clear (&candidate_fold_insns
);
868 bitmap_clear (&cannot_fold_insns
);
870 FOR_BB_INSNS (bb
, insn
)
873 FOR_BB_INSNS (bb
, insn
)
874 do_fold_info_calculation (insn
, &fold_info
);
876 FOR_BB_INSNS (bb
, insn
)
877 if (fold_mem_info
**info
= fold_info
.get (insn
))
878 do_check_validity (insn
, *info
);
880 if (compute_validity_closure (&fold_info
))
882 FOR_BB_INSNS (bb
, insn
)
883 if (fold_mem_info
**info
= fold_info
.get (insn
))
884 do_commit_offset (insn
, *info
);
886 FOR_BB_INSNS (bb
, insn
)
887 do_commit_insn (insn
);
890 for (fold_info_map::iterator iter
= fold_info
.begin ();
891 iter
!= fold_info
.end (); ++iter
)
892 delete (*iter
).second
;
895 statistics_counter_event (cfun
, "Number of folded instructions",
898 bitmap_release (&can_fold_insns
);
899 bitmap_release (&candidate_fold_insns
);
900 bitmap_release (&cannot_fold_insns
);
908 make_pass_fold_mem_offsets (gcc::context
*ctxt
)
910 return new pass_fold_mem_offsets (ctxt
);