1 /* Late RTL pass to fold memory offsets.
2 Copyright (C) 2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
32 #include "insn-config.h"
36 #include "tree-pass.h"
39 /* This pass tries to optimize memory offset calculations by moving constants
40 from add instructions to the memory instructions (loads / stores).
41 For example it can transform code like this:
50 into the following (one instruction less):
58 Although the previous passes try to emit efficient offset calculations
59 this pass is still beneficial because:
61 - The mechanisms that optimize memory offsets usually work with specific
62 patterns or have limitations. This pass is designed to fold offsets
63 through complex calculations that affect multiple memory operations
64 and have partially overlapping calculations.
66 - There are cases where add instructions are introduced in late rtl passes
67 and the rest of the pipeline cannot eliminate them. Arrays and structs
68 allocated on the stack can result in unwanted add instructions that
69 cannot be eliminated easily.
71 This pass works on a basic block level and consists of 4 phases:
73 - Phase 1 (Analysis): Find "foldable" instructions.
74 Foldable instructions are those that we know how to propagate
75 a constant addition through (add, shift, move, ...) and only have other
76 foldable instructions for uses. In that phase a DFS traversal on the
77 definition tree is performed and foldable instructions are marked on
78 a bitmap. The add immediate instructions that are reachable in this
79 DFS are candidates for folding since all the intermediate calculations
80 affected by them are also foldable.
82 - Phase 2 (Validity): Traverse and calculate the offsets that would result
83 from folding the add immediate instructions. Check whether the
84 calculated offsets result in a valid instruction for the target.
86 - Phase 3 (Commit offsets): Traverse again. It is now known which folds
87 are valid so at this point change the offsets in the memory instructions.
89 - Phase 4 (Commit instruction deletions): Scan all instructions and delete
90 or simplify (reduce to move) all add immediate instructions that were
93 This pass should run before hard register propagation because it creates
94 register moves that we expect to be eliminated. */
98 const pass_data pass_data_fold_mem
=
101 "fold_mem_offsets", /* name */
102 OPTGROUP_NONE
, /* optinfo_flags */
104 0, /* properties_required */
105 0, /* properties_provided */
106 0, /* properties_destroyed */
107 0, /* todo_flags_start */
108 TODO_df_finish
, /* todo_flags_finish */
111 class pass_fold_mem_offsets
: public rtl_opt_pass
114 pass_fold_mem_offsets (gcc::context
*ctxt
)
115 : rtl_opt_pass (pass_data_fold_mem
, ctxt
)
118 /* opt_pass methods: */
119 virtual bool gate (function
*)
121 return flag_fold_mem_offsets
&& optimize
>= 2;
124 virtual unsigned int execute (function
*);
125 }; // class pass_fold_mem_offsets
127 /* Class that holds in FOLD_INSNS the instructions that if folded the offset
128 of a memory instruction would increase by ADDED_OFFSET. */
129 class fold_mem_info
{
131 auto_bitmap fold_insns
;
132 HOST_WIDE_INT added_offset
;
135 typedef hash_map
<rtx_insn
*, fold_mem_info
*> fold_info_map
;
137 /* Tracks which instructions can be reached through instructions that can
138 propagate offsets for folding. */
139 static bitmap_head can_fold_insns
;
141 /* Marks instructions that are currently eligible for folding. */
142 static bitmap_head candidate_fold_insns
;
144 /* Tracks instructions that cannot be folded because it turned out that
145 folding will result in creating an invalid memory instruction.
146 An instruction can be in both CANDIDATE_FOLD_INSNS and CANNOT_FOLD_INSNS
147 at the same time, in which case it is not legal to fold. */
148 static bitmap_head cannot_fold_insns
;
150 /* The number of instructions that were simplified or eliminated. */
151 static int stats_fold_count
;
153 /* Get the single reaching definition of an instruction inside a BB.
154 The definition is desired for REG used in INSN.
155 Return the definition insn or NULL if there's no definition with
156 the desired criteria. */
158 get_single_def_in_bb (rtx_insn
*insn
, rtx reg
)
161 struct df_link
*ref_chain
, *ref_link
;
163 FOR_EACH_INSN_USE (use
, insn
)
165 if (GET_CODE (DF_REF_REG (use
)) == SUBREG
)
167 if (REGNO (DF_REF_REG (use
)) == REGNO (reg
))
174 ref_chain
= DF_REF_CHAIN (use
);
179 for (ref_link
= ref_chain
; ref_link
; ref_link
= ref_link
->next
)
181 /* Problem getting some definition for this instruction. */
182 if (ref_link
->ref
== NULL
)
184 if (DF_REF_INSN_INFO (ref_link
->ref
) == NULL
)
186 if (global_regs
[REGNO (reg
)]
187 && !set_of (reg
, DF_REF_INSN (ref_link
->ref
)))
194 rtx_insn
*def
= DF_REF_INSN (ref_chain
->ref
);
196 if (BLOCK_FOR_INSN (def
) != BLOCK_FOR_INSN (insn
))
199 if (DF_INSN_LUID (def
) > DF_INSN_LUID (insn
))
205 /* Get all uses of REG which is set in INSN. Return the use list or NULL if a
206 use is missing / irregular. If SUCCESS is not NULL then set it to false if
207 there are missing / irregular uses and true otherwise. */
208 static struct df_link
*
209 get_uses (rtx_insn
*insn
, rtx reg
, bool *success
)
212 struct df_link
*ref_chain
, *ref_link
;
217 FOR_EACH_INSN_DEF (def
, insn
)
218 if (REGNO (DF_REF_REG (def
)) == REGNO (reg
))
224 ref_chain
= DF_REF_CHAIN (def
);
226 for (ref_link
= ref_chain
; ref_link
; ref_link
= ref_link
->next
)
228 /* Problem getting a use for this instruction. */
229 if (ref_link
->ref
== NULL
)
231 if (DF_REF_CLASS (ref_link
->ref
) != DF_REF_REGULAR
)
233 /* We do not handle REG_EQUIV/REG_EQ notes for now. */
234 if (DF_REF_FLAGS (ref_link
->ref
) & DF_REF_IN_NOTE
)
245 fold_offsets (rtx_insn
*insn
, rtx reg
, bool analyze
, bitmap foldable_insns
);
247 /* Helper function for fold_offsets.
249 If DO_RECURSION is false and ANALYZE is true this function returns true iff
250 it understands the structure of INSN and knows how to propagate constants
251 through it. In this case OFFSET_OUT and FOLDABLE_INSNS are unused.
253 If DO_RECURSION is true then it also calls fold_offsets for each recognized
254 part of INSN with the appropriate arguments.
256 If DO_RECURSION is true and ANALYZE is false then offset that would result
257 from folding is computed and is returned through the pointer OFFSET_OUT.
258 The instructions that can be folded are recorded in FOLDABLE_INSNS.
261 fold_offsets_1 (rtx_insn
*insn
, bool analyze
, bool do_recursion
,
262 HOST_WIDE_INT
*offset_out
, bitmap foldable_insns
)
264 /* Doesn't make sense if both DO_RECURSION and ANALYZE are false. */
265 gcc_checking_assert (do_recursion
|| analyze
);
266 gcc_checking_assert (GET_CODE (PATTERN (insn
)) == SET
);
268 rtx src
= SET_SRC (PATTERN (insn
));
269 HOST_WIDE_INT offset
= 0;
271 switch (GET_CODE (src
))
275 /* Propagate through add. */
276 rtx arg1
= XEXP (src
, 0);
277 rtx arg2
= XEXP (src
, 1);
282 offset
+= fold_offsets (insn
, arg1
, analyze
, foldable_insns
);
284 else if (GET_CODE (arg1
) == ASHIFT
285 && REG_P (XEXP (arg1
, 0))
286 && CONST_INT_P (XEXP (arg1
, 1)))
288 /* Handle R1 = (R2 << C) + ... */
292 = (HOST_WIDE_INT_1U
<< INTVAL (XEXP (arg1
, 1)));
293 offset
+= scale
* fold_offsets (insn
, XEXP (arg1
, 0), analyze
,
297 else if (GET_CODE (arg1
) == PLUS
298 && REG_P (XEXP (arg1
, 0))
299 && REG_P (XEXP (arg1
, 1)))
301 /* Handle R1 = (R2 + R3) + ... */
304 offset
+= fold_offsets (insn
, XEXP (arg1
, 0), analyze
,
306 offset
+= fold_offsets (insn
, XEXP (arg1
, 1), analyze
,
310 else if (GET_CODE (arg1
) == PLUS
311 && GET_CODE (XEXP (arg1
, 0)) == ASHIFT
312 && REG_P (XEXP (XEXP (arg1
, 0), 0))
313 && CONST_INT_P (XEXP (XEXP (arg1
, 0), 1))
314 && REG_P (XEXP (arg1
, 1)))
316 /* Handle R1 = ((R2 << C) + R3) + ... */
320 = (HOST_WIDE_INT_1U
<< INTVAL (XEXP (XEXP (arg1
, 0), 1)));
321 offset
+= scale
* fold_offsets (insn
, XEXP (XEXP (arg1
, 0), 0),
322 analyze
, foldable_insns
);
323 offset
+= fold_offsets (insn
, XEXP (arg1
, 1), analyze
,
333 offset
+= fold_offsets (insn
, arg2
, analyze
, foldable_insns
);
335 else if (CONST_INT_P (arg2
))
339 offset
+= INTVAL (arg2
);
340 /* This is a R1 = R2 + C instruction, candidate for folding. */
342 bitmap_set_bit (foldable_insns
, INSN_UID (insn
));
348 /* Pattern recognized for folding. */
353 /* Propagate through minus. */
354 rtx arg1
= XEXP (src
, 0);
355 rtx arg2
= XEXP (src
, 1);
360 offset
+= fold_offsets (insn
, arg1
, analyze
, foldable_insns
);
368 offset
-= fold_offsets (insn
, arg2
, analyze
, foldable_insns
);
370 else if (CONST_INT_P (arg2
))
374 offset
-= INTVAL (arg2
);
375 /* This is a R1 = R2 - C instruction, candidate for folding. */
377 bitmap_set_bit (foldable_insns
, INSN_UID (insn
));
383 /* Pattern recognized for folding. */
388 /* Propagate through negation. */
389 rtx arg1
= XEXP (src
, 0);
393 offset
= -fold_offsets (insn
, arg1
, analyze
, foldable_insns
);
398 /* Pattern recognized for folding. */
403 /* Propagate through multiply by constant. */
404 rtx arg1
= XEXP (src
, 0);
405 rtx arg2
= XEXP (src
, 1);
407 if (REG_P (arg1
) && CONST_INT_P (arg2
))
411 HOST_WIDE_INT scale
= INTVAL (arg2
);
412 offset
= scale
* fold_offsets (insn
, arg1
, analyze
,
419 /* Pattern recognized for folding. */
424 /* Propagate through shift left by constant. */
425 rtx arg1
= XEXP (src
, 0);
426 rtx arg2
= XEXP (src
, 1);
428 if (REG_P (arg1
) && CONST_INT_P (arg2
))
432 HOST_WIDE_INT scale
= (HOST_WIDE_INT_1U
<< INTVAL (arg2
));
433 offset
= scale
* fold_offsets (insn
, arg1
, analyze
,
440 /* Pattern recognized for folding. */
445 /* Propagate through register move. */
447 offset
= fold_offsets (insn
, src
, analyze
, foldable_insns
);
449 /* Pattern recognized for folding. */
454 offset
= INTVAL (src
);
455 /* R1 = C is candidate for folding. */
457 bitmap_set_bit (foldable_insns
, INSN_UID (insn
));
459 /* Pattern recognized for folding. */
463 /* Cannot recognize. */
467 if (do_recursion
&& !analyze
)
468 *offset_out
= offset
;
473 /* Function that computes the offset that would have to be added to all uses
474 of REG if the instructions marked in FOLDABLE_INSNS were to be eliminated.
476 If ANALYZE is true then mark in CAN_FOLD_INSNS which instructions
477 transitively only affect other instructions found in CAN_FOLD_INSNS.
478 If ANALYZE is false then compute the offset required for folding. */
480 fold_offsets (rtx_insn
*insn
, rtx reg
, bool analyze
, bitmap foldable_insns
)
482 rtx_insn
*def
= get_single_def_in_bb (insn
, reg
);
484 if (!def
|| GET_CODE (PATTERN (def
)) != SET
)
487 rtx dest
= SET_DEST (PATTERN (def
));
492 /* We can only affect the values of GPR registers. */
493 unsigned int dest_regno
= REGNO (dest
);
494 if (fixed_regs
[dest_regno
]
495 || !TEST_HARD_REG_BIT (reg_class_contents
[GENERAL_REGS
], dest_regno
))
500 /* Check if we know how to handle DEF. */
501 if (!fold_offsets_1 (def
, true, false, NULL
, NULL
))
504 /* We only fold through instructions that are transitively used as
505 memory addresses and do not have other uses. Use the same logic
506 from offset calculation to visit instructions that can propagate
507 offsets and keep track of them in CAN_FOLD_INSNS. */
509 struct df_link
*uses
= get_uses (def
, dest
, &success
), *ref_link
;
514 for (ref_link
= uses
; ref_link
; ref_link
= ref_link
->next
)
516 rtx_insn
*use
= DF_REF_INSN (ref_link
->ref
);
518 if (DEBUG_INSN_P (use
))
521 /* Punt if the use is anything more complicated than a set
522 (clobber, use, etc). */
523 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != SET
)
526 /* This use affects instructions outside of CAN_FOLD_INSNS. */
527 if (!bitmap_bit_p (&can_fold_insns
, INSN_UID (use
)))
530 rtx use_set
= PATTERN (use
);
532 /* Special case: A foldable memory store is not foldable if it
533 mentions DEST outside of the address calculation. */
534 if (use_set
&& MEM_P (SET_DEST (use_set
))
535 && reg_mentioned_p (dest
, SET_SRC (use_set
)))
539 bitmap_set_bit (&can_fold_insns
, INSN_UID (def
));
541 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
543 fprintf (dump_file
, "Instruction marked for propagation: ");
544 print_rtl_single (dump_file
, def
);
549 /* We cannot propagate through this instruction. */
550 if (!bitmap_bit_p (&can_fold_insns
, INSN_UID (def
)))
554 HOST_WIDE_INT offset
= 0;
555 bool recognized
= fold_offsets_1 (def
, analyze
, true, &offset
,
564 /* Test if INSN is a memory load / store that can have an offset folded to it.
565 Return true iff INSN is such an instruction and return through MEM_OUT,
566 REG_OUT and OFFSET_OUT the RTX that has a MEM code, the register that is
567 used as a base address and the offset accordingly.
568 All of the out pointers may be NULL in which case they will be ignored. */
570 get_fold_mem_root (rtx_insn
*insn
, rtx
*mem_out
, rtx
*reg_out
,
571 HOST_WIDE_INT
*offset_out
)
573 rtx set
= single_set (insn
);
578 rtx src
= SET_SRC (set
);
579 rtx dest
= SET_DEST (set
);
581 /* Don't fold when we have unspec / volatile. */
582 if (GET_CODE (src
) == UNSPEC
583 || GET_CODE (src
) == UNSPEC_VOLATILE
584 || GET_CODE (dest
) == UNSPEC
585 || GET_CODE (dest
) == UNSPEC_VOLATILE
)
590 else if (MEM_P (dest
))
592 else if ((GET_CODE (src
) == SIGN_EXTEND
593 || GET_CODE (src
) == ZERO_EXTEND
)
594 && MEM_P (XEXP (src
, 0)))
601 rtx mem_addr
= XEXP (mem
, 0);
603 HOST_WIDE_INT offset
;
605 if (REG_P (mem_addr
))
610 else if (GET_CODE (mem_addr
) == PLUS
611 && REG_P (XEXP (mem_addr
, 0))
612 && CONST_INT_P (XEXP (mem_addr
, 1)))
614 reg
= XEXP (mem_addr
, 0);
615 offset
= INTVAL (XEXP (mem_addr
, 1));
625 *offset_out
= offset
;
630 /* If INSN is a root memory instruction then do a DFS traversal on its
631 definitions and find folding candidates. */
633 do_analysis (rtx_insn
*insn
)
636 if (!get_fold_mem_root (insn
, NULL
, ®
, NULL
))
639 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
641 fprintf (dump_file
, "Starting analysis from root: ");
642 print_rtl_single (dump_file
, insn
);
645 /* Analyse folding opportunities for this memory instruction. */
646 bitmap_set_bit (&can_fold_insns
, INSN_UID (insn
));
647 fold_offsets (insn
, reg
, true, NULL
);
651 do_fold_info_calculation (rtx_insn
*insn
, fold_info_map
*fold_info
)
654 HOST_WIDE_INT cur_offset
;
655 if (!get_fold_mem_root (insn
, &mem
, ®
, &cur_offset
))
658 fold_mem_info
*info
= new fold_mem_info
;
659 info
->added_offset
= fold_offsets (insn
, reg
, false, info
->fold_insns
);
661 fold_info
->put (insn
, info
);
664 /* If INSN is a root memory instruction then compute a potentially new offset
665 for it and test if the resulting instruction is valid. */
667 do_check_validity (rtx_insn
*insn
, fold_mem_info
*info
)
670 HOST_WIDE_INT cur_offset
;
671 if (!get_fold_mem_root (insn
, &mem
, ®
, &cur_offset
))
674 HOST_WIDE_INT new_offset
= cur_offset
+ info
->added_offset
;
676 /* Test if it is valid to change MEM's address offset to NEW_OFFSET. */
677 int icode
= INSN_CODE (insn
);
678 INSN_CODE (insn
) = -1;
679 rtx mem_addr
= XEXP (mem
, 0);
680 machine_mode mode
= GET_MODE (mem_addr
);
682 XEXP (mem
, 0) = gen_rtx_PLUS (mode
, reg
, gen_int_mode (new_offset
, mode
));
686 bool illegal
= insn_invalid_p (insn
, false)
687 || !memory_address_addr_space_p (mode
, XEXP (mem
, 0),
688 MEM_ADDR_SPACE (mem
));
690 /* Restore the instruction. */
691 XEXP (mem
, 0) = mem_addr
;
692 INSN_CODE (insn
) = icode
;
695 bitmap_ior_into (&cannot_fold_insns
, info
->fold_insns
);
697 bitmap_ior_into (&candidate_fold_insns
, info
->fold_insns
);
701 compute_validity_closure (fold_info_map
*fold_info
)
703 /* Let's say we have an arbitrary chain of foldable instructions xN = xN + C
704 and memory operations rN that use xN as shown below. If folding x1 in r1
705 turns out to be invalid for whatever reason then it's also invalid to fold
706 any of the other xN into any rN. That means that we need the transitive
707 closure of validity to determine whether we can fold a xN instruction.
709 +--------------+ +-------------------+ +-------------------+
710 | r1 = mem[x1] | | r2 = mem[x1 + x2] | | r3 = mem[x2 + x3] | ...
711 +--------------+ +-------------------+ +-------------------+
715 +-------------+ / +-------------+ / +-------------+
716 | x1 = x1 + 1 |-----+ | x2 = x2 + 1 |-----+ | x3 = x3 + 1 |--- ...
717 +-------------+ +-------------+ +-------------+
723 /* In general three iterations should be enough for most cases, but allow up
724 to five when -fexpensive-optimizations is used. */
725 int max_iters
= 3 + 2 * flag_expensive_optimizations
;
726 for (int pass
= 0; pass
< max_iters
; pass
++)
728 bool made_changes
= false;
729 for (fold_info_map::iterator iter
= fold_info
->begin ();
730 iter
!= fold_info
->end (); ++iter
)
732 fold_mem_info
*info
= (*iter
).second
;
733 if (bitmap_intersect_p (&cannot_fold_insns
, info
->fold_insns
))
734 made_changes
|= bitmap_ior_into (&cannot_fold_insns
,
745 /* If INSN is a root memory instruction that was affected by any folding
746 then update its offset as necessary. */
748 do_commit_offset (rtx_insn
*insn
, fold_mem_info
*info
)
751 HOST_WIDE_INT cur_offset
;
752 if (!get_fold_mem_root (insn
, &mem
, ®
, &cur_offset
))
755 HOST_WIDE_INT new_offset
= cur_offset
+ info
->added_offset
;
757 if (new_offset
== cur_offset
)
760 gcc_assert (!bitmap_empty_p (info
->fold_insns
));
762 if (bitmap_intersect_p (&cannot_fold_insns
, info
->fold_insns
))
767 fprintf (dump_file
, "Memory offset changed from "
768 HOST_WIDE_INT_PRINT_DEC
" to " HOST_WIDE_INT_PRINT_DEC
769 " for instruction:\n", cur_offset
, new_offset
);
770 print_rtl_single (dump_file
, insn
);
773 machine_mode mode
= GET_MODE (XEXP (mem
, 0));
775 XEXP (mem
, 0) = gen_rtx_PLUS (mode
, reg
, gen_int_mode (new_offset
, mode
));
778 INSN_CODE (insn
) = recog (PATTERN (insn
), insn
, 0);
779 df_insn_rescan (insn
);
782 /* If INSN is a move / add instruction that was folded then replace its
783 constant part with zero. */
785 do_commit_insn (rtx_insn
*insn
)
787 if (bitmap_bit_p (&candidate_fold_insns
, INSN_UID (insn
))
788 && !bitmap_bit_p (&cannot_fold_insns
, INSN_UID (insn
)))
792 fprintf (dump_file
, "Instruction folded:");
793 print_rtl_single (dump_file
, insn
);
798 rtx set
= single_set (insn
);
799 rtx dest
= SET_DEST (set
);
800 rtx src
= SET_SRC (set
);
802 /* Emit a move and let subsequent passes eliminate it if possible. */
803 if (GET_CODE (src
) == CONST_INT
)
806 Replace it with R1 = 0 because C was folded. */
808 = gen_move_insn (dest
, gen_int_mode (0, GET_MODE (dest
)));
809 df_insn_rescan (emit_insn_after (mov_rtx
, insn
));
813 /* INSN is R1 = R2 + C.
814 Replace it with R1 = R2 because C was folded. */
815 rtx arg1
= XEXP (src
, 0);
817 /* If the DEST == ARG1 then the move is a no-op. */
818 if (REGNO (dest
) != REGNO (arg1
))
820 gcc_checking_assert (GET_MODE (dest
) == GET_MODE (arg1
));
821 rtx mov_rtx
= gen_move_insn (dest
, arg1
);
822 df_insn_rescan (emit_insn_after (mov_rtx
, insn
));
826 /* Delete the original move / add instruction. */
832 pass_fold_mem_offsets::execute (function
*fn
)
834 df_set_flags (DF_EQ_NOTES
+ DF_RD_PRUNE_DEAD_DEFS
+ DF_DEFER_INSN_RESCAN
);
835 df_chain_add_problem (DF_UD_CHAIN
+ DF_DU_CHAIN
);
838 bitmap_initialize (&can_fold_insns
, NULL
);
839 bitmap_initialize (&candidate_fold_insns
, NULL
);
840 bitmap_initialize (&cannot_fold_insns
, NULL
);
842 stats_fold_count
= 0;
846 FOR_ALL_BB_FN (bb
, fn
)
848 /* There is a conflict between this pass and RISCV's shorten-memrefs
849 pass. For now disable folding if optimizing for size because
850 otherwise this cancels the effects of shorten-memrefs. */
851 if (optimize_bb_for_size_p (bb
))
854 fold_info_map fold_info
;
856 bitmap_clear (&can_fold_insns
);
857 bitmap_clear (&candidate_fold_insns
);
858 bitmap_clear (&cannot_fold_insns
);
860 FOR_BB_INSNS (bb
, insn
)
863 FOR_BB_INSNS (bb
, insn
)
864 do_fold_info_calculation (insn
, &fold_info
);
866 FOR_BB_INSNS (bb
, insn
)
867 if (fold_mem_info
**info
= fold_info
.get (insn
))
868 do_check_validity (insn
, *info
);
870 if (compute_validity_closure (&fold_info
))
872 FOR_BB_INSNS (bb
, insn
)
873 if (fold_mem_info
**info
= fold_info
.get (insn
))
874 do_commit_offset (insn
, *info
);
876 FOR_BB_INSNS (bb
, insn
)
877 do_commit_insn (insn
);
880 for (fold_info_map::iterator iter
= fold_info
.begin ();
881 iter
!= fold_info
.end (); ++iter
)
882 delete (*iter
).second
;
885 statistics_counter_event (cfun
, "Number of folded instructions",
888 bitmap_release (&can_fold_insns
);
889 bitmap_release (&candidate_fold_insns
);
890 bitmap_release (&cannot_fold_insns
);
898 make_pass_fold_mem_offsets (gcc::context
*ctxt
)
900 return new pass_fold_mem_offsets (ctxt
);