1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
24 - reordering of memory allocation and freeing to be more space efficient
25 - do rough calc of how many regs are needed in each block, and a rough
26 calc of how many regs are available in each class and use that to
27 throttle back the code in cases where RTX_COST is minimal.
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
37 /* References searched while implementing this.
39 Compilers Principles, Techniques and Tools
43 Global Optimization by Suppression of Partial Redundancies
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
49 Stanford Ph.D. thesis, Dec. 1983
51 A Fast Algorithm for Code Movement Optimization
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
65 Efficiently Computing Static Single Assignment Form and the Control
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Global code motion / global value numbering
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
113 Value Driven Redundancy Elimination
115 Rice University Ph.D. thesis, Apr. 1996
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
121 High Performance Compilers for Parallel Computing
125 Advanced Compiler Design and Implementation
127 Morgan Kaufmann, 1997
129 Building an Optimizing Compiler
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
153 #include "hard-reg-set.h"
156 #include "insn-config.h"
158 #include "basic-block.h"
160 #include "function.h"
166 #define obstack_chunk_alloc gmalloc
167 #define obstack_chunk_free free
169 /* Propagate flow information through back edges and thus enable PRE's
170 moving loop invariant calculations out of loops.
172 Originally this tended to create worse overall code, but several
173 improvements during the development of PRE seem to have made following
174 back edges generally a win.
176 Note much of the loop invariant code motion done here would normally
177 be done by loop.c, which has more heuristics for when to move invariants
178 out of loops. At some point we might need to move some of those
179 heuristics into gcse.c. */
180 #define FOLLOW_BACK_EDGES 1
182 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
183 are a superset of those done by GCSE.
185 We perform the following steps:
187 1) Compute basic block information.
189 2) Compute table of places where registers are set.
191 3) Perform copy/constant propagation.
193 4) Perform global cse.
195 5) Perform another pass of copy/constant propagation.
197 Two passes of copy/constant propagation are done because the first one
198 enables more GCSE and the second one helps to clean up the copies that
199 GCSE creates. This is needed more for PRE than for Classic because Classic
200 GCSE will try to use an existing register containing the common
201 subexpression rather than create a new one. This is harder to do for PRE
202 because of the code motion (which Classic GCSE doesn't do).
204 Expressions we are interested in GCSE-ing are of the form
205 (set (pseudo-reg) (expression)).
206 Function want_to_gcse_p says what these are.
208 PRE handles moving invariant expressions out of loops (by treating them as
209 partially redundant).
211 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
212 assignment) based GVN (global value numbering). L. T. Simpson's paper
213 (Rice University) on value numbering is a useful reference for this.
215 **********************
217 We used to support multiple passes but there are diminishing returns in
218 doing so. The first pass usually makes 90% of the changes that are doable.
219 A second pass can make a few more changes made possible by the first pass.
220 Experiments show any further passes don't make enough changes to justify
223 A study of spec92 using an unlimited number of passes:
224 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
225 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
226 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
228 It was found doing copy propagation between each pass enables further
231 PRE is quite expensive in complicated functions because the DFA can take
232 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
233 be modified if one wants to experiment.
235 **********************
237 The steps for PRE are:
239 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
241 2) Perform the data flow analysis for PRE.
243 3) Delete the redundant instructions
245 4) Insert the required copies [if any] that make the partially
246 redundant instructions fully redundant.
248 5) For other reaching expressions, insert an instruction to copy the value
249 to a newly created pseudo that will reach the redundant instruction.
251 The deletion is done first so that when we do insertions we
252 know which pseudo reg to use.
254 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
255 argue it is not. The number of iterations for the algorithm to converge
256 is typically 2-4 so I don't view it as that expensive (relatively speaking).
258 PRE GCSE depends heavily on the second CSE pass to clean up the copies
259 we create. To make an expression reach the place where it's redundant,
260 the result of the expression is copied to a new register, and the redundant
261 expression is deleted by replacing it with this new register. Classic GCSE
262 doesn't have this problem as much as it computes the reaching defs of
263 each register in each block and thus can try to use an existing register.
265 **********************
267 A fair bit of simplicity is created by creating small functions for simple
268 tasks, even when the function is only called in one place. This may
269 measurably slow things down [or may not] by creating more function call
270 overhead than is necessary. The source is laid out so that it's trivial
271 to make the affected functions inline so that one can measure what speed
272 up, if any, can be achieved, and maybe later when things settle things can
275 Help stamp out big monolithic functions! */
277 /* GCSE global vars. */
280 static FILE *gcse_file
;
282 /* Note whether or not we should run jump optimization after gcse. We
283 want to do this for two cases.
285 * If we changed any jumps via cprop.
287 * If we added any labels via edge splitting. */
289 static int run_jump_opt_after_gcse
;
291 /* Bitmaps are normally not included in debugging dumps.
292 However it's useful to be able to print them from GDB.
293 We could create special functions for this, but it's simpler to
294 just allow passing stderr to the dump_foo fns. Since stderr can
295 be a macro, we store a copy here. */
296 static FILE *debug_stderr
;
298 /* An obstack for our working variables. */
299 static struct obstack gcse_obstack
;
301 /* Non-zero for each mode that supports (set (reg) (reg)).
302 This is trivially true for integer and floating point values.
303 It may or may not be true for condition codes. */
304 static char can_copy_p
[(int) NUM_MACHINE_MODES
];
306 /* Non-zero if can_copy_p has been initialized. */
307 static int can_copy_init_p
;
309 struct reg_use
{rtx reg_rtx
; };
311 /* Hash table of expressions. */
315 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
317 /* Index in the available expression bitmaps. */
319 /* Next entry with the same hash. */
320 struct expr
*next_same_hash
;
321 /* List of anticipatable occurrences in basic blocks in the function.
322 An "anticipatable occurrence" is one that is the first occurrence in the
323 basic block, the operands are not modified in the basic block prior
324 to the occurrence and the output is not used between the start of
325 the block and the occurrence. */
326 struct occr
*antic_occr
;
327 /* List of available occurrence in basic blocks in the function.
328 An "available occurrence" is one that is the last occurrence in the
329 basic block and the operands are not modified by following statements in
330 the basic block [including this insn]. */
331 struct occr
*avail_occr
;
332 /* Non-null if the computation is PRE redundant.
333 The value is the newly created pseudo-reg to record a copy of the
334 expression in all the places that reach the redundant copy. */
338 /* Occurrence of an expression.
339 There is one per basic block. If a pattern appears more than once the
340 last appearance is used [or first for anticipatable expressions]. */
344 /* Next occurrence of this expression. */
346 /* The insn that computes the expression. */
348 /* Non-zero if this [anticipatable] occurrence has been deleted. */
350 /* Non-zero if this [available] occurrence has been copied to
352 /* ??? This is mutually exclusive with deleted_p, so they could share
357 /* Expression and copy propagation hash tables.
358 Each hash table is an array of buckets.
359 ??? It is known that if it were an array of entries, structure elements
360 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
361 not clear whether in the final analysis a sufficient amount of memory would
362 be saved as the size of the available expression bitmaps would be larger
363 [one could build a mapping table without holes afterwards though].
364 Someday I'll perform the computation and figure it out. */
366 /* Total size of the expression hash table, in elements. */
367 static unsigned int expr_hash_table_size
;
370 This is an array of `expr_hash_table_size' elements. */
371 static struct expr
**expr_hash_table
;
373 /* Total size of the copy propagation hash table, in elements. */
374 static unsigned int set_hash_table_size
;
377 This is an array of `set_hash_table_size' elements. */
378 static struct expr
**set_hash_table
;
380 /* Mapping of uids to cuids.
381 Only real insns get cuids. */
382 static int *uid_cuid
;
384 /* Highest UID in UID_CUID. */
387 /* Get the cuid of an insn. */
388 #ifdef ENABLE_CHECKING
389 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
391 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
394 /* Number of cuids. */
397 /* Mapping of cuids to insns. */
398 static rtx
*cuid_insn
;
400 /* Get insn from cuid. */
401 #define CUID_INSN(CUID) (cuid_insn[CUID])
403 /* Maximum register number in function prior to doing gcse + 1.
404 Registers created during this pass have regno >= max_gcse_regno.
405 This is named with "gcse" to not collide with global of same name. */
406 static unsigned int max_gcse_regno
;
408 /* Maximum number of cse-able expressions found. */
411 /* Maximum number of assignments for copy propagation found. */
414 /* Table of registers that are modified.
416 For each register, each element is a list of places where the pseudo-reg
419 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
420 requires knowledge of which blocks kill which regs [and thus could use
421 a bitmap instead of the lists `reg_set_table' uses].
423 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
424 num-regs) [however perhaps it may be useful to keep the data as is]. One
425 advantage of recording things this way is that `reg_set_table' is fairly
426 sparse with respect to pseudo regs but for hard regs could be fairly dense
427 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
428 up functions like compute_transp since in the case of pseudo-regs we only
429 need to iterate over the number of times a pseudo-reg is set, not over the
430 number of basic blocks [clearly there is a bit of a slow down in the cases
431 where a pseudo is set more than once in a block, however it is believed
432 that the net effect is to speed things up]. This isn't done for hard-regs
433 because recording call-clobbered hard-regs in `reg_set_table' at each
434 function call can consume a fair bit of memory, and iterating over
435 hard-regs stored this way in compute_transp will be more expensive. */
437 typedef struct reg_set
439 /* The next setting of this register. */
440 struct reg_set
*next
;
441 /* The insn where it was set. */
445 static reg_set
**reg_set_table
;
447 /* Size of `reg_set_table'.
448 The table starts out at max_gcse_regno + slop, and is enlarged as
450 static int reg_set_table_size
;
452 /* Amount to grow `reg_set_table' by when it's full. */
453 #define REG_SET_TABLE_SLOP 100
455 /* This is a list of expressions which are MEMs and will be used by load
457 Load motion tracks MEMs which aren't killed by
458 anything except itself. (ie, loads and stores to a single location).
459 We can then allow movement of these MEM refs with a little special
460 allowance. (all stores copy the same value to the reaching reg used
461 for the loads). This means all values used to store into memory must have
462 no side effects so we can re-issue the setter value.
463 Store Motion uses this structure as an expression table to track stores
464 which look interesting, and might be moveable towards the exit block. */
468 struct expr
* expr
; /* Gcse expression reference for LM. */
469 rtx pattern
; /* Pattern of this mem. */
470 rtx loads
; /* INSN list of loads seen. */
471 rtx stores
; /* INSN list of stores seen. */
472 struct ls_expr
* next
; /* Next in the list. */
473 int invalid
; /* Invalid for some reason. */
474 int index
; /* If it maps to a bitmap index. */
475 int hash_index
; /* Index when in a hash table. */
476 rtx reaching_reg
; /* Register to use when re-writing. */
479 /* Head of the list of load/store memory refs. */
480 static struct ls_expr
* pre_ldst_mems
= NULL
;
482 /* Bitmap containing one bit for each register in the program.
483 Used when performing GCSE to track which registers have been set since
484 the start of the basic block. */
485 static regset reg_set_bitmap
;
487 /* For each block, a bitmap of registers set in the block.
488 This is used by expr_killed_p and compute_transp.
489 It is computed during hash table computation and not by compute_sets
490 as it includes registers added since the last pass (or between cprop and
491 gcse) and it's currently not easy to realloc sbitmap vectors. */
492 static sbitmap
*reg_set_in_block
;
494 /* Array, indexed by basic block number for a list of insns which modify
495 memory within that block. */
496 static rtx
* modify_mem_list
;
497 bitmap modify_mem_list_set
;
499 /* This array parallels modify_mem_list, but is kept canonicalized. */
500 static rtx
* canon_modify_mem_list
;
501 bitmap canon_modify_mem_list_set
;
502 /* Various variables for statistics gathering. */
504 /* Memory used in a pass.
505 This isn't intended to be absolutely precise. Its intent is only
506 to keep an eye on memory usage. */
507 static int bytes_used
;
509 /* GCSE substitutions made. */
510 static int gcse_subst_count
;
511 /* Number of copy instructions created. */
512 static int gcse_create_count
;
513 /* Number of constants propagated. */
514 static int const_prop_count
;
515 /* Number of copys propagated. */
516 static int copy_prop_count
;
518 /* These variables are used by classic GCSE.
519 Normally they'd be defined a bit later, but `rd_gen' needs to
520 be declared sooner. */
522 /* Each block has a bitmap of each type.
523 The length of each blocks bitmap is:
525 max_cuid - for reaching definitions
526 n_exprs - for available expressions
528 Thus we view the bitmaps as 2 dimensional arrays. i.e.
529 rd_kill[block_num][cuid_num]
530 ae_kill[block_num][expr_num] */
532 /* For reaching defs */
533 static sbitmap
*rd_kill
, *rd_gen
, *reaching_defs
, *rd_out
;
535 /* for available exprs */
536 static sbitmap
*ae_kill
, *ae_gen
, *ae_in
, *ae_out
;
538 /* Objects of this type are passed around by the null-pointer check
540 struct null_pointer_info
542 /* The basic block being processed. */
544 /* The first register to be handled in this pass. */
545 unsigned int min_reg
;
546 /* One greater than the last register to be handled in this pass. */
547 unsigned int max_reg
;
548 sbitmap
*nonnull_local
;
549 sbitmap
*nonnull_killed
;
552 static void compute_can_copy
PARAMS ((void));
553 static char *gmalloc
PARAMS ((unsigned int));
554 static char *grealloc
PARAMS ((char *, unsigned int));
555 static char *gcse_alloc
PARAMS ((unsigned long));
556 static void alloc_gcse_mem
PARAMS ((rtx
));
557 static void free_gcse_mem
PARAMS ((void));
558 static void alloc_reg_set_mem
PARAMS ((int));
559 static void free_reg_set_mem
PARAMS ((void));
560 static int get_bitmap_width
PARAMS ((int, int, int));
561 static void record_one_set
PARAMS ((int, rtx
));
562 static void record_set_info
PARAMS ((rtx
, rtx
, void *));
563 static void compute_sets
PARAMS ((rtx
));
564 static void hash_scan_insn
PARAMS ((rtx
, int, int));
565 static void hash_scan_set
PARAMS ((rtx
, rtx
, int));
566 static void hash_scan_clobber
PARAMS ((rtx
, rtx
));
567 static void hash_scan_call
PARAMS ((rtx
, rtx
));
568 static int want_to_gcse_p
PARAMS ((rtx
));
569 static int oprs_unchanged_p
PARAMS ((rtx
, rtx
, int));
570 static int oprs_anticipatable_p
PARAMS ((rtx
, rtx
));
571 static int oprs_available_p
PARAMS ((rtx
, rtx
));
572 static void insert_expr_in_table
PARAMS ((rtx
, enum machine_mode
, rtx
,
574 static void insert_set_in_table
PARAMS ((rtx
, rtx
));
575 static unsigned int hash_expr
PARAMS ((rtx
, enum machine_mode
, int *, int));
576 static unsigned int hash_expr_1
PARAMS ((rtx
, enum machine_mode
, int *));
577 static unsigned int hash_string_1
PARAMS ((const char *));
578 static unsigned int hash_set
PARAMS ((int, int));
579 static int expr_equiv_p
PARAMS ((rtx
, rtx
));
580 static void record_last_reg_set_info
PARAMS ((rtx
, int));
581 static void record_last_mem_set_info
PARAMS ((rtx
));
582 static void record_last_set_info
PARAMS ((rtx
, rtx
, void *));
583 static void compute_hash_table
PARAMS ((int));
584 static void alloc_set_hash_table
PARAMS ((int));
585 static void free_set_hash_table
PARAMS ((void));
586 static void compute_set_hash_table
PARAMS ((void));
587 static void alloc_expr_hash_table
PARAMS ((unsigned int));
588 static void free_expr_hash_table
PARAMS ((void));
589 static void compute_expr_hash_table
PARAMS ((void));
590 static void dump_hash_table
PARAMS ((FILE *, const char *, struct expr
**,
592 static struct expr
*lookup_expr
PARAMS ((rtx
));
593 static struct expr
*lookup_set
PARAMS ((unsigned int, rtx
));
594 static struct expr
*next_set
PARAMS ((unsigned int, struct expr
*));
595 static void reset_opr_set_tables
PARAMS ((void));
596 static int oprs_not_set_p
PARAMS ((rtx
, rtx
));
597 static void mark_call
PARAMS ((rtx
));
598 static void mark_set
PARAMS ((rtx
, rtx
));
599 static void mark_clobber
PARAMS ((rtx
, rtx
));
600 static void mark_oprs_set
PARAMS ((rtx
));
601 static void alloc_cprop_mem
PARAMS ((int, int));
602 static void free_cprop_mem
PARAMS ((void));
603 static void compute_transp
PARAMS ((rtx
, int, sbitmap
*, int));
604 static void compute_transpout
PARAMS ((void));
605 static void compute_local_properties
PARAMS ((sbitmap
*, sbitmap
*, sbitmap
*,
607 static void compute_cprop_data
PARAMS ((void));
608 static void find_used_regs
PARAMS ((rtx
*, void *));
609 static int try_replace_reg
PARAMS ((rtx
, rtx
, rtx
));
610 static struct expr
*find_avail_set
PARAMS ((int, rtx
));
611 static int cprop_jump
PARAMS ((basic_block
, rtx
, rtx
, rtx
));
613 static int cprop_cc0_jump
PARAMS ((basic_block
, rtx
, struct reg_use
*, rtx
));
615 static void mems_conflict_for_gcse_p
PARAMS ((rtx
, rtx
, void *));
616 static int load_killed_in_block_p
PARAMS ((basic_block
, int, rtx
, int));
617 static void canon_list_insert
PARAMS ((rtx
, rtx
, void *));
618 static int cprop_insn
PARAMS ((basic_block
, rtx
, int));
619 static int cprop
PARAMS ((int));
620 static int one_cprop_pass
PARAMS ((int, int));
621 static void alloc_pre_mem
PARAMS ((int, int));
622 static void free_pre_mem
PARAMS ((void));
623 static void compute_pre_data
PARAMS ((void));
624 static int pre_expr_reaches_here_p
PARAMS ((basic_block
, struct expr
*,
626 static void insert_insn_end_bb
PARAMS ((struct expr
*, basic_block
, int));
627 static void pre_insert_copy_insn
PARAMS ((struct expr
*, rtx
));
628 static void pre_insert_copies
PARAMS ((void));
629 static int pre_delete
PARAMS ((void));
630 static int pre_gcse
PARAMS ((void));
631 static int one_pre_gcse_pass
PARAMS ((int));
632 static void add_label_notes
PARAMS ((rtx
, rtx
));
633 static void alloc_code_hoist_mem
PARAMS ((int, int));
634 static void free_code_hoist_mem
PARAMS ((void));
635 static void compute_code_hoist_vbeinout
PARAMS ((void));
636 static void compute_code_hoist_data
PARAMS ((void));
637 static int hoist_expr_reaches_here_p
PARAMS ((basic_block
, int, basic_block
,
639 static void hoist_code
PARAMS ((void));
640 static int one_code_hoisting_pass
PARAMS ((void));
641 static void alloc_rd_mem
PARAMS ((int, int));
642 static void free_rd_mem
PARAMS ((void));
643 static void handle_rd_kill_set
PARAMS ((rtx
, int, basic_block
));
644 static void compute_kill_rd
PARAMS ((void));
645 static void compute_rd
PARAMS ((void));
646 static void alloc_avail_expr_mem
PARAMS ((int, int));
647 static void free_avail_expr_mem
PARAMS ((void));
648 static void compute_ae_gen
PARAMS ((void));
649 static int expr_killed_p
PARAMS ((rtx
, basic_block
));
650 static void compute_ae_kill
PARAMS ((sbitmap
*, sbitmap
*));
651 static int expr_reaches_here_p
PARAMS ((struct occr
*, struct expr
*,
653 static rtx computing_insn
PARAMS ((struct expr
*, rtx
));
654 static int def_reaches_here_p
PARAMS ((rtx
, rtx
));
655 static int can_disregard_other_sets
PARAMS ((struct reg_set
**, rtx
, int));
656 static int handle_avail_expr
PARAMS ((rtx
, struct expr
*));
657 static int classic_gcse
PARAMS ((void));
658 static int one_classic_gcse_pass
PARAMS ((int));
659 static void invalidate_nonnull_info
PARAMS ((rtx
, rtx
, void *));
660 static void delete_null_pointer_checks_1
PARAMS ((unsigned int *,
661 sbitmap
*, sbitmap
*,
662 struct null_pointer_info
*));
663 static rtx process_insert_insn
PARAMS ((struct expr
*));
664 static int pre_edge_insert
PARAMS ((struct edge_list
*, struct expr
**));
665 static int expr_reaches_here_p_work
PARAMS ((struct occr
*, struct expr
*,
666 basic_block
, int, char *));
667 static int pre_expr_reaches_here_p_work
PARAMS ((basic_block
, struct expr
*,
668 basic_block
, char *));
669 static struct ls_expr
* ldst_entry
PARAMS ((rtx
));
670 static void free_ldst_entry
PARAMS ((struct ls_expr
*));
671 static void free_ldst_mems
PARAMS ((void));
672 static void print_ldst_list
PARAMS ((FILE *));
673 static struct ls_expr
* find_rtx_in_ldst
PARAMS ((rtx
));
674 static int enumerate_ldsts
PARAMS ((void));
675 static inline struct ls_expr
* first_ls_expr
PARAMS ((void));
676 static inline struct ls_expr
* next_ls_expr
PARAMS ((struct ls_expr
*));
677 static int simple_mem
PARAMS ((rtx
));
678 static void invalidate_any_buried_refs
PARAMS ((rtx
));
679 static void compute_ld_motion_mems
PARAMS ((void));
680 static void trim_ld_motion_mems
PARAMS ((void));
681 static void update_ld_motion_stores
PARAMS ((struct expr
*));
682 static void reg_set_info
PARAMS ((rtx
, rtx
, void *));
683 static int store_ops_ok
PARAMS ((rtx
, basic_block
));
684 static void find_moveable_store
PARAMS ((rtx
));
685 static int compute_store_table
PARAMS ((void));
686 static int load_kills_store
PARAMS ((rtx
, rtx
));
687 static int find_loads
PARAMS ((rtx
, rtx
));
688 static int store_killed_in_insn
PARAMS ((rtx
, rtx
));
689 static int store_killed_after
PARAMS ((rtx
, rtx
, basic_block
));
690 static int store_killed_before
PARAMS ((rtx
, rtx
, basic_block
));
691 static void build_store_vectors
PARAMS ((void));
692 static void insert_insn_start_bb
PARAMS ((rtx
, basic_block
));
693 static int insert_store
PARAMS ((struct ls_expr
*, edge
));
694 static void replace_store_insn
PARAMS ((rtx
, rtx
, basic_block
));
695 static void delete_store
PARAMS ((struct ls_expr
*,
697 static void free_store_memory
PARAMS ((void));
698 static void store_motion
PARAMS ((void));
699 static void clear_modify_mem_tables
PARAMS ((void));
700 static void free_modify_mem_tables
PARAMS ((void));
702 /* Entry point for global common subexpression elimination.
703 F is the first instruction in the function. */
711 /* Bytes used at start of pass. */
712 int initial_bytes_used
;
713 /* Maximum number of bytes used by a pass. */
715 /* Point to release obstack data from for each pass. */
716 char *gcse_obstack_bottom
;
718 /* Insertion of instructions on edges can create new basic blocks; we
719 need the original basic block count so that we can properly deallocate
720 arrays sized on the number of basic blocks originally in the cfg. */
722 /* We do not construct an accurate cfg in functions which call
723 setjmp, so just punt to be safe. */
724 if (current_function_calls_setjmp
)
727 /* Assume that we do not need to run jump optimizations after gcse. */
728 run_jump_opt_after_gcse
= 0;
730 /* For calling dump_foo fns from gdb. */
731 debug_stderr
= stderr
;
734 /* Identify the basic block information for this function, including
735 successors and predecessors. */
736 max_gcse_regno
= max_reg_num ();
739 dump_flow_info (file
);
741 orig_bb_count
= n_basic_blocks
;
742 /* Return if there's nothing to do. */
743 if (n_basic_blocks
<= 1)
746 /* Trying to perform global optimizations on flow graphs which have
747 a high connectivity will take a long time and is unlikely to be
750 In normal circumstances a cfg should have about twice as many edges
751 as blocks. But we do not want to punish small functions which have
752 a couple switch statements. So we require a relatively large number
753 of basic blocks and the ratio of edges to blocks to be high. */
754 if (n_basic_blocks
> 1000 && n_edges
/ n_basic_blocks
>= 20)
756 if (warn_disabled_optimization
)
757 warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
758 n_basic_blocks
, n_edges
/ n_basic_blocks
);
762 /* If allocating memory for the cprop bitmap would take up too much
763 storage it's better just to disable the optimization. */
765 * SBITMAP_SET_SIZE (max_gcse_regno
)
766 * sizeof (SBITMAP_ELT_TYPE
)) > MAX_GCSE_MEMORY
)
768 if (warn_disabled_optimization
)
769 warning ("GCSE disabled: %d basic blocks and %d registers",
770 n_basic_blocks
, max_gcse_regno
);
775 /* See what modes support reg/reg copy operations. */
776 if (! can_copy_init_p
)
782 gcc_obstack_init (&gcse_obstack
);
786 init_alias_analysis ();
787 /* Record where pseudo-registers are set. This data is kept accurate
788 during each pass. ??? We could also record hard-reg information here
789 [since it's unchanging], however it is currently done during hash table
792 It may be tempting to compute MEM set information here too, but MEM sets
793 will be subject to code motion one day and thus we need to compute
794 information about memory sets when we build the hash tables. */
796 alloc_reg_set_mem (max_gcse_regno
);
800 initial_bytes_used
= bytes_used
;
802 gcse_obstack_bottom
= gcse_alloc (1);
804 while (changed
&& pass
< MAX_GCSE_PASSES
)
808 fprintf (file
, "GCSE pass %d\n\n", pass
+ 1);
810 /* Initialize bytes_used to the space for the pred/succ lists,
811 and the reg_set_table data. */
812 bytes_used
= initial_bytes_used
;
814 /* Each pass may create new registers, so recalculate each time. */
815 max_gcse_regno
= max_reg_num ();
819 /* Don't allow constant propagation to modify jumps
821 changed
= one_cprop_pass (pass
+ 1, 0);
824 changed
|= one_classic_gcse_pass (pass
+ 1);
827 changed
|= one_pre_gcse_pass (pass
+ 1);
828 /* We may have just created new basic blocks. Release and
829 recompute various things which are sized on the number of
833 free_modify_mem_tables ();
835 = (rtx
*) gmalloc (n_basic_blocks
* sizeof (rtx
));
836 canon_modify_mem_list
837 = (rtx
*) gmalloc (n_basic_blocks
* sizeof (rtx
));
838 memset ((char *) modify_mem_list
, 0, n_basic_blocks
* sizeof (rtx
));
839 memset ((char *) canon_modify_mem_list
, 0, n_basic_blocks
* sizeof (rtx
));
840 orig_bb_count
= n_basic_blocks
;
843 alloc_reg_set_mem (max_reg_num ());
845 run_jump_opt_after_gcse
= 1;
848 if (max_pass_bytes
< bytes_used
)
849 max_pass_bytes
= bytes_used
;
851 /* Free up memory, then reallocate for code hoisting. We can
852 not re-use the existing allocated memory because the tables
853 will not have info for the insns or registers created by
854 partial redundancy elimination. */
857 /* It does not make sense to run code hoisting unless we optimizing
858 for code size -- it rarely makes programs faster, and can make
859 them bigger if we did partial redundancy elimination (when optimizing
860 for space, we use a classic gcse algorithm instead of partial
861 redundancy algorithms). */
864 max_gcse_regno
= max_reg_num ();
866 changed
|= one_code_hoisting_pass ();
869 if (max_pass_bytes
< bytes_used
)
870 max_pass_bytes
= bytes_used
;
875 fprintf (file
, "\n");
879 obstack_free (&gcse_obstack
, gcse_obstack_bottom
);
883 /* Do one last pass of copy propagation, including cprop into
884 conditional jumps. */
886 max_gcse_regno
= max_reg_num ();
888 /* This time, go ahead and allow cprop to alter jumps. */
889 one_cprop_pass (pass
+ 1, 1);
894 fprintf (file
, "GCSE of %s: %d basic blocks, ",
895 current_function_name
, n_basic_blocks
);
896 fprintf (file
, "%d pass%s, %d bytes\n\n",
897 pass
, pass
> 1 ? "es" : "", max_pass_bytes
);
900 obstack_free (&gcse_obstack
, NULL
);
902 /* We are finished with alias. */
903 end_alias_analysis ();
904 allocate_reg_info (max_reg_num (), FALSE
, FALSE
);
906 if (!optimize_size
&& flag_gcse_sm
)
908 /* Record where pseudo-registers are set. */
909 return run_jump_opt_after_gcse
;
912 /* Misc. utilities. */
914 /* Compute which modes support reg/reg copy operations. */
920 #ifndef AVOID_CCMODE_COPIES
923 memset (can_copy_p
, 0, NUM_MACHINE_MODES
);
926 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
927 if (GET_MODE_CLASS (i
) == MODE_CC
)
929 #ifdef AVOID_CCMODE_COPIES
932 reg
= gen_rtx_REG ((enum machine_mode
) i
, LAST_VIRTUAL_REGISTER
+ 1);
933 insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, reg
));
934 if (recog (PATTERN (insn
), insn
, NULL
) >= 0)
944 /* Cover function to xmalloc to record bytes allocated. */
951 return xmalloc (size
);
954 /* Cover function to xrealloc.
955 We don't record the additional size since we don't know it.
956 It won't affect memory usage stats much anyway. */
963 return xrealloc (ptr
, size
);
966 /* Cover function to obstack_alloc.
967 We don't need to record the bytes allocated here since
968 obstack_chunk_alloc is set to gmalloc. */
974 return (char *) obstack_alloc (&gcse_obstack
, size
);
977 /* Allocate memory for the cuid mapping array,
978 and reg/memory set tracking tables.
980 This is called at the start of each pass. */
989 /* Find the largest UID and create a mapping from UIDs to CUIDs.
990 CUIDs are like UIDs except they increase monotonically, have no gaps,
991 and only apply to real insns. */
993 max_uid
= get_max_uid ();
994 n
= (max_uid
+ 1) * sizeof (int);
995 uid_cuid
= (int *) gmalloc (n
);
996 memset ((char *) uid_cuid
, 0, n
);
997 for (insn
= f
, i
= 0; insn
; insn
= NEXT_INSN (insn
))
1000 uid_cuid
[INSN_UID (insn
)] = i
++;
1002 uid_cuid
[INSN_UID (insn
)] = i
;
1005 /* Create a table mapping cuids to insns. */
1008 n
= (max_cuid
+ 1) * sizeof (rtx
);
1009 cuid_insn
= (rtx
*) gmalloc (n
);
1010 memset ((char *) cuid_insn
, 0, n
);
1011 for (insn
= f
, i
= 0; insn
; insn
= NEXT_INSN (insn
))
1013 CUID_INSN (i
++) = insn
;
1015 /* Allocate vars to track sets of regs. */
1016 reg_set_bitmap
= BITMAP_XMALLOC ();
1018 /* Allocate vars to track sets of regs, memory per block. */
1019 reg_set_in_block
= (sbitmap
*) sbitmap_vector_alloc (n_basic_blocks
,
1021 /* Allocate array to keep a list of insns which modify memory in each
1023 modify_mem_list
= (rtx
*) gmalloc (n_basic_blocks
* sizeof (rtx
));
1024 canon_modify_mem_list
= (rtx
*) gmalloc (n_basic_blocks
* sizeof (rtx
));
1025 memset ((char *) modify_mem_list
, 0, n_basic_blocks
* sizeof (rtx
));
1026 memset ((char *) canon_modify_mem_list
, 0, n_basic_blocks
* sizeof (rtx
));
1027 modify_mem_list_set
= BITMAP_XMALLOC ();
1028 canon_modify_mem_list_set
= BITMAP_XMALLOC ();
1031 /* Free memory allocated by alloc_gcse_mem. */
1039 BITMAP_XFREE (reg_set_bitmap
);
1041 sbitmap_vector_free (reg_set_in_block
);
1042 free_modify_mem_tables ();
1043 BITMAP_XFREE (modify_mem_list_set
);
1044 BITMAP_XFREE (canon_modify_mem_list_set
);
1047 /* Many of the global optimization algorithms work by solving dataflow
1048 equations for various expressions. Initially, some local value is
1049 computed for each expression in each block. Then, the values across the
1050 various blocks are combined (by following flow graph edges) to arrive at
1051 global values. Conceptually, each set of equations is independent. We
1052 may therefore solve all the equations in parallel, solve them one at a
1053 time, or pick any intermediate approach.
1055 When you're going to need N two-dimensional bitmaps, each X (say, the
1056 number of blocks) by Y (say, the number of expressions), call this
1057 function. It's not important what X and Y represent; only that Y
1058 correspond to the things that can be done in parallel. This function will
1059 return an appropriate chunking factor C; you should solve C sets of
1060 equations in parallel. By going through this function, we can easily
1061 trade space against time; by solving fewer equations in parallel we use
1065 get_bitmap_width (n
, x
, y
)
1070 /* It's not really worth figuring out *exactly* how much memory will
1071 be used by a particular choice. The important thing is to get
1072 something approximately right. */
1073 size_t max_bitmap_memory
= 10 * 1024 * 1024;
1075 /* The number of bytes we'd use for a single column of minimum
1077 size_t column_size
= n
* x
* sizeof (SBITMAP_ELT_TYPE
);
1079 /* Often, it's reasonable just to solve all the equations in
1081 if (column_size
* SBITMAP_SET_SIZE (y
) <= max_bitmap_memory
)
1084 /* Otherwise, pick the largest width we can, without going over the
1086 return SBITMAP_ELT_BITS
* ((max_bitmap_memory
+ column_size
- 1)
1090 /* Compute the local properties of each recorded expression.
1092 Local properties are those that are defined by the block, irrespective of
1095 An expression is transparent in a block if its operands are not modified
1098 An expression is computed (locally available) in a block if it is computed
1099 at least once and expression would contain the same value if the
1100 computation was moved to the end of the block.
1102 An expression is locally anticipatable in a block if it is computed at
1103 least once and expression would contain the same value if the computation
1104 was moved to the beginning of the block.
1106 We call this routine for cprop, pre and code hoisting. They all compute
1107 basically the same information and thus can easily share this code.
1109 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1110 properties. If NULL, then it is not necessary to compute or record that
1111 particular property.
1113 SETP controls which hash table to look at. If zero, this routine looks at
1114 the expr hash table; if nonzero this routine looks at the set hash table.
1115 Additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1119 compute_local_properties (transp
, comp
, antloc
, setp
)
1125 unsigned int i
, hash_table_size
;
1126 struct expr
**hash_table
;
1128 /* Initialize any bitmaps that were passed in. */
1132 sbitmap_vector_zero (transp
, n_basic_blocks
);
1134 sbitmap_vector_ones (transp
, n_basic_blocks
);
1138 sbitmap_vector_zero (comp
, n_basic_blocks
);
1140 sbitmap_vector_zero (antloc
, n_basic_blocks
);
1142 /* We use the same code for cprop, pre and hoisting. For cprop
1143 we care about the set hash table, for pre and hoisting we
1144 care about the expr hash table. */
1145 hash_table_size
= setp
? set_hash_table_size
: expr_hash_table_size
;
1146 hash_table
= setp
? set_hash_table
: expr_hash_table
;
1148 for (i
= 0; i
< hash_table_size
; i
++)
1152 for (expr
= hash_table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
1154 int indx
= expr
->bitmap_index
;
1157 /* The expression is transparent in this block if it is not killed.
1158 We start by assuming all are transparent [none are killed], and
1159 then reset the bits for those that are. */
1161 compute_transp (expr
->expr
, indx
, transp
, setp
);
1163 /* The occurrences recorded in antic_occr are exactly those that
1164 we want to set to non-zero in ANTLOC. */
1166 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
1168 SET_BIT (antloc
[BLOCK_NUM (occr
->insn
)], indx
);
1170 /* While we're scanning the table, this is a good place to
1172 occr
->deleted_p
= 0;
1175 /* The occurrences recorded in avail_occr are exactly those that
1176 we want to set to non-zero in COMP. */
1178 for (occr
= expr
->avail_occr
; occr
!= NULL
; occr
= occr
->next
)
1180 SET_BIT (comp
[BLOCK_NUM (occr
->insn
)], indx
);
1182 /* While we're scanning the table, this is a good place to
1187 /* While we're scanning the table, this is a good place to
1189 expr
->reaching_reg
= 0;
1194 /* Register set information.
1196 `reg_set_table' records where each register is set or otherwise
1199 static struct obstack reg_set_obstack
;
1202 alloc_reg_set_mem (n_regs
)
1207 reg_set_table_size
= n_regs
+ REG_SET_TABLE_SLOP
;
1208 n
= reg_set_table_size
* sizeof (struct reg_set
*);
1209 reg_set_table
= (struct reg_set
**) gmalloc (n
);
1210 memset ((char *) reg_set_table
, 0, n
);
1212 gcc_obstack_init (®_set_obstack
);
1218 free (reg_set_table
);
1219 obstack_free (®_set_obstack
, NULL
);
1222 /* Record REGNO in the reg_set table. */
1225 record_one_set (regno
, insn
)
1229 /* Allocate a new reg_set element and link it onto the list. */
1230 struct reg_set
*new_reg_info
;
1232 /* If the table isn't big enough, enlarge it. */
1233 if (regno
>= reg_set_table_size
)
1235 int new_size
= regno
+ REG_SET_TABLE_SLOP
;
1238 = (struct reg_set
**) grealloc ((char *) reg_set_table
,
1239 new_size
* sizeof (struct reg_set
*));
1240 memset ((char *) (reg_set_table
+ reg_set_table_size
), 0,
1241 (new_size
- reg_set_table_size
) * sizeof (struct reg_set
*));
1242 reg_set_table_size
= new_size
;
1245 new_reg_info
= (struct reg_set
*) obstack_alloc (®_set_obstack
,
1246 sizeof (struct reg_set
));
1247 bytes_used
+= sizeof (struct reg_set
);
1248 new_reg_info
->insn
= insn
;
1249 new_reg_info
->next
= reg_set_table
[regno
];
1250 reg_set_table
[regno
] = new_reg_info
;
1253 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1254 an insn. The DATA is really the instruction in which the SET is
1258 record_set_info (dest
, setter
, data
)
1259 rtx dest
, setter ATTRIBUTE_UNUSED
;
1262 rtx record_set_insn
= (rtx
) data
;
1264 if (GET_CODE (dest
) == REG
&& REGNO (dest
) >= FIRST_PSEUDO_REGISTER
)
1265 record_one_set (REGNO (dest
), record_set_insn
);
1268 /* Scan the function and record each set of each pseudo-register.
1270 This is called once, at the start of the gcse pass. See the comments for
1271 `reg_set_table' for further documenation. */
1279 for (insn
= f
; insn
!= 0; insn
= NEXT_INSN (insn
))
1281 note_stores (PATTERN (insn
), record_set_info
, insn
);
1284 /* Hash table support. */
1286 /* For each register, the cuid of the first/last insn in the block
1287 that set it, or -1 if not set. */
1288 #define NEVER_SET -1
1290 struct reg_avail_info
1297 static struct reg_avail_info
*reg_avail_info
;
1298 static int current_bb
;
1301 /* See whether X, the source of a set, is something we want to consider for
1308 static rtx test_insn
= 0;
1309 int num_clobbers
= 0;
1312 switch (GET_CODE (x
))
1326 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1327 if (general_operand (x
, GET_MODE (x
)))
1329 else if (GET_MODE (x
) == VOIDmode
)
1332 /* Otherwise, check if we can make a valid insn from it. First initialize
1333 our test insn if we haven't already. */
1337 = make_insn_raw (gen_rtx_SET (VOIDmode
,
1338 gen_rtx_REG (word_mode
,
1339 FIRST_PSEUDO_REGISTER
* 2),
1341 NEXT_INSN (test_insn
) = PREV_INSN (test_insn
) = 0;
1342 ggc_add_rtx_root (&test_insn
, 1);
1345 /* Now make an insn like the one we would make when GCSE'ing and see if
1347 PUT_MODE (SET_DEST (PATTERN (test_insn
)), GET_MODE (x
));
1348 SET_SRC (PATTERN (test_insn
)) = x
;
1349 return ((icode
= recog (PATTERN (test_insn
), test_insn
, &num_clobbers
)) >= 0
1350 && (num_clobbers
== 0 || ! added_clobbers_hard_reg_p (icode
)));
1353 /* Return non-zero if the operands of expression X are unchanged from the
1354 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1355 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1358 oprs_unchanged_p (x
, insn
, avail_p
)
1369 code
= GET_CODE (x
);
1374 struct reg_avail_info
*info
= ®_avail_info
[REGNO (x
)];
1376 if (info
->last_bb
!= current_bb
)
1379 return info
->last_set
< INSN_CUID (insn
);
1381 return info
->first_set
>= INSN_CUID (insn
);
1385 if (load_killed_in_block_p (BASIC_BLOCK (current_bb
), INSN_CUID (insn
),
1389 return oprs_unchanged_p (XEXP (x
, 0), insn
, avail_p
);
1415 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
1419 /* If we are about to do the last recursive call needed at this
1420 level, change it into iteration. This function is called enough
1423 return oprs_unchanged_p (XEXP (x
, i
), insn
, avail_p
);
1425 else if (! oprs_unchanged_p (XEXP (x
, i
), insn
, avail_p
))
1428 else if (fmt
[i
] == 'E')
1429 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1430 if (! oprs_unchanged_p (XVECEXP (x
, i
, j
), insn
, avail_p
))
1437 /* Used for communication between mems_conflict_for_gcse_p and
1438 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1439 conflict between two memory references. */
1440 static int gcse_mems_conflict_p
;
1442 /* Used for communication between mems_conflict_for_gcse_p and
1443 load_killed_in_block_p. A memory reference for a load instruction,
1444 mems_conflict_for_gcse_p will see if a memory store conflicts with
1445 this memory load. */
1446 static rtx gcse_mem_operand
;
1448 /* DEST is the output of an instruction. If it is a memory reference, and
1449 possibly conflicts with the load found in gcse_mem_operand, then set
1450 gcse_mems_conflict_p to a nonzero value. */
1453 mems_conflict_for_gcse_p (dest
, setter
, data
)
1454 rtx dest
, setter ATTRIBUTE_UNUSED
;
1455 void *data ATTRIBUTE_UNUSED
;
1457 while (GET_CODE (dest
) == SUBREG
1458 || GET_CODE (dest
) == ZERO_EXTRACT
1459 || GET_CODE (dest
) == SIGN_EXTRACT
1460 || GET_CODE (dest
) == STRICT_LOW_PART
)
1461 dest
= XEXP (dest
, 0);
1463 /* If DEST is not a MEM, then it will not conflict with the load. Note
1464 that function calls are assumed to clobber memory, but are handled
1466 if (GET_CODE (dest
) != MEM
)
1469 /* If we are setting a MEM in our list of specially recognized MEMs,
1470 don't mark as killed this time. */
1472 if (dest
== gcse_mem_operand
&& pre_ldst_mems
!= NULL
)
1474 if (!find_rtx_in_ldst (dest
))
1475 gcse_mems_conflict_p
= 1;
1479 if (true_dependence (dest
, GET_MODE (dest
), gcse_mem_operand
,
1481 gcse_mems_conflict_p
= 1;
1484 /* Return nonzero if the expression in X (a memory reference) is killed
1485 in block BB before or after the insn with the CUID in UID_LIMIT.
1486 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1489 To check the entire block, set UID_LIMIT to max_uid + 1 and
1493 load_killed_in_block_p (bb
, uid_limit
, x
, avail_p
)
1499 rtx list_entry
= modify_mem_list
[bb
->index
];
1503 /* Ignore entries in the list that do not apply. */
1505 && INSN_CUID (XEXP (list_entry
, 0)) < uid_limit
)
1507 && INSN_CUID (XEXP (list_entry
, 0)) > uid_limit
))
1509 list_entry
= XEXP (list_entry
, 1);
1513 setter
= XEXP (list_entry
, 0);
1515 /* If SETTER is a call everything is clobbered. Note that calls
1516 to pure functions are never put on the list, so we need not
1517 worry about them. */
1518 if (GET_CODE (setter
) == CALL_INSN
)
1521 /* SETTER must be an INSN of some kind that sets memory. Call
1522 note_stores to examine each hunk of memory that is modified.
1524 The note_stores interface is pretty limited, so we have to
1525 communicate via global variables. Yuk. */
1526 gcse_mem_operand
= x
;
1527 gcse_mems_conflict_p
= 0;
1528 note_stores (PATTERN (setter
), mems_conflict_for_gcse_p
, NULL
);
1529 if (gcse_mems_conflict_p
)
1531 list_entry
= XEXP (list_entry
, 1);
1536 /* Return non-zero if the operands of expression X are unchanged from
1537 the start of INSN's basic block up to but not including INSN. */
1540 oprs_anticipatable_p (x
, insn
)
1543 return oprs_unchanged_p (x
, insn
, 0);
1546 /* Return non-zero if the operands of expression X are unchanged from
1547 INSN to the end of INSN's basic block. */
1550 oprs_available_p (x
, insn
)
1553 return oprs_unchanged_p (x
, insn
, 1);
1556 /* Hash expression X.
1558 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1559 indicating if a volatile operand is found or if the expression contains
1560 something we don't want to insert in the table.
1562 ??? One might want to merge this with canon_hash. Later. */
1565 hash_expr (x
, mode
, do_not_record_p
, hash_table_size
)
1567 enum machine_mode mode
;
1568 int *do_not_record_p
;
1569 int hash_table_size
;
1573 *do_not_record_p
= 0;
1575 hash
= hash_expr_1 (x
, mode
, do_not_record_p
);
1576 return hash
% hash_table_size
;
1579 /* Hash a string. Just add its bytes up. */
1581 static inline unsigned
1586 const unsigned char *p
= (const unsigned char *) ps
;
1595 /* Subroutine of hash_expr to do the actual work. */
1598 hash_expr_1 (x
, mode
, do_not_record_p
)
1600 enum machine_mode mode
;
1601 int *do_not_record_p
;
1608 /* Used to turn recursion into iteration. We can't rely on GCC's
1609 tail-recursion eliminatio since we need to keep accumulating values
1616 code
= GET_CODE (x
);
1620 hash
+= ((unsigned int) REG
<< 7) + REGNO (x
);
1624 hash
+= (((unsigned int) CONST_INT
<< 7) + (unsigned int) mode
1625 + (unsigned int) INTVAL (x
));
1629 /* This is like the general case, except that it only counts
1630 the integers representing the constant. */
1631 hash
+= (unsigned int) code
+ (unsigned int) GET_MODE (x
);
1632 if (GET_MODE (x
) != VOIDmode
)
1633 for (i
= 2; i
< GET_RTX_LENGTH (CONST_DOUBLE
); i
++)
1634 hash
+= (unsigned int) XWINT (x
, i
);
1636 hash
+= ((unsigned int) CONST_DOUBLE_LOW (x
)
1637 + (unsigned int) CONST_DOUBLE_HIGH (x
));
1645 units
= CONST_VECTOR_NUNITS (x
);
1647 for (i
= 0; i
< units
; ++i
)
1649 elt
= CONST_VECTOR_ELT (x
, i
);
1650 hash
+= hash_expr_1 (elt
, GET_MODE (elt
), do_not_record_p
);
1656 /* Assume there is only one rtx object for any given label. */
1658 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1659 differences and differences between each stage's debugging dumps. */
1660 hash
+= (((unsigned int) LABEL_REF
<< 7)
1661 + CODE_LABEL_NUMBER (XEXP (x
, 0)));
1666 /* Don't hash on the symbol's address to avoid bootstrap differences.
1667 Different hash values may cause expressions to be recorded in
1668 different orders and thus different registers to be used in the
1669 final assembler. This also avoids differences in the dump files
1670 between various stages. */
1672 const unsigned char *p
= (const unsigned char *) XSTR (x
, 0);
1675 h
+= (h
<< 7) + *p
++; /* ??? revisit */
1677 hash
+= ((unsigned int) SYMBOL_REF
<< 7) + h
;
1682 if (MEM_VOLATILE_P (x
))
1684 *do_not_record_p
= 1;
1688 hash
+= (unsigned int) MEM
;
1689 hash
+= MEM_ALIAS_SET (x
);
1700 case UNSPEC_VOLATILE
:
1701 *do_not_record_p
= 1;
1705 if (MEM_VOLATILE_P (x
))
1707 *do_not_record_p
= 1;
1712 /* We don't want to take the filename and line into account. */
1713 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
)
1714 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x
))
1715 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x
))
1716 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x
);
1718 if (ASM_OPERANDS_INPUT_LENGTH (x
))
1720 for (i
= 1; i
< ASM_OPERANDS_INPUT_LENGTH (x
); i
++)
1722 hash
+= (hash_expr_1 (ASM_OPERANDS_INPUT (x
, i
),
1723 GET_MODE (ASM_OPERANDS_INPUT (x
, i
)),
1725 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1729 hash
+= hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x
, 0));
1730 x
= ASM_OPERANDS_INPUT (x
, 0);
1731 mode
= GET_MODE (x
);
1741 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
1742 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
1746 /* If we are about to do the last recursive call
1747 needed at this level, change it into iteration.
1748 This function is called enough to be worth it. */
1755 hash
+= hash_expr_1 (XEXP (x
, i
), 0, do_not_record_p
);
1756 if (*do_not_record_p
)
1760 else if (fmt
[i
] == 'E')
1761 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1763 hash
+= hash_expr_1 (XVECEXP (x
, i
, j
), 0, do_not_record_p
);
1764 if (*do_not_record_p
)
1768 else if (fmt
[i
] == 's')
1769 hash
+= hash_string_1 (XSTR (x
, i
));
1770 else if (fmt
[i
] == 'i')
1771 hash
+= (unsigned int) XINT (x
, i
);
1779 /* Hash a set of register REGNO.
1781 Sets are hashed on the register that is set. This simplifies the PRE copy
1784 ??? May need to make things more elaborate. Later, as necessary. */
1787 hash_set (regno
, hash_table_size
)
1789 int hash_table_size
;
1794 return hash
% hash_table_size
;
1797 /* Return non-zero if exp1 is equivalent to exp2.
1798 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1811 if (x
== 0 || y
== 0)
1814 code
= GET_CODE (x
);
1815 if (code
!= GET_CODE (y
))
1818 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1819 if (GET_MODE (x
) != GET_MODE (y
))
1829 return INTVAL (x
) == INTVAL (y
);
1832 return XEXP (x
, 0) == XEXP (y
, 0);
1835 return XSTR (x
, 0) == XSTR (y
, 0);
1838 return REGNO (x
) == REGNO (y
);
1841 /* Can't merge two expressions in different alias sets, since we can
1842 decide that the expression is transparent in a block when it isn't,
1843 due to it being set with the different alias set. */
1844 if (MEM_ALIAS_SET (x
) != MEM_ALIAS_SET (y
))
1848 /* For commutative operations, check both orders. */
1856 return ((expr_equiv_p (XEXP (x
, 0), XEXP (y
, 0))
1857 && expr_equiv_p (XEXP (x
, 1), XEXP (y
, 1)))
1858 || (expr_equiv_p (XEXP (x
, 0), XEXP (y
, 1))
1859 && expr_equiv_p (XEXP (x
, 1), XEXP (y
, 0))));
1862 /* We don't use the generic code below because we want to
1863 disregard filename and line numbers. */
1865 /* A volatile asm isn't equivalent to any other. */
1866 if (MEM_VOLATILE_P (x
) || MEM_VOLATILE_P (y
))
1869 if (GET_MODE (x
) != GET_MODE (y
)
1870 || strcmp (ASM_OPERANDS_TEMPLATE (x
), ASM_OPERANDS_TEMPLATE (y
))
1871 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x
),
1872 ASM_OPERANDS_OUTPUT_CONSTRAINT (y
))
1873 || ASM_OPERANDS_OUTPUT_IDX (x
) != ASM_OPERANDS_OUTPUT_IDX (y
)
1874 || ASM_OPERANDS_INPUT_LENGTH (x
) != ASM_OPERANDS_INPUT_LENGTH (y
))
1877 if (ASM_OPERANDS_INPUT_LENGTH (x
))
1879 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
1880 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x
, i
),
1881 ASM_OPERANDS_INPUT (y
, i
))
1882 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x
, i
),
1883 ASM_OPERANDS_INPUT_CONSTRAINT (y
, i
)))
1893 /* Compare the elements. If any pair of corresponding elements
1894 fail to match, return 0 for the whole thing. */
1896 fmt
= GET_RTX_FORMAT (code
);
1897 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1902 if (! expr_equiv_p (XEXP (x
, i
), XEXP (y
, i
)))
1907 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
1909 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1910 if (! expr_equiv_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
)))
1915 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
1920 if (XINT (x
, i
) != XINT (y
, i
))
1925 if (XWINT (x
, i
) != XWINT (y
, i
))
1940 /* Insert expression X in INSN in the hash table.
1941 If it is already present, record it as the last occurrence in INSN's
1944 MODE is the mode of the value X is being stored into.
1945 It is only used if X is a CONST_INT.
1947 ANTIC_P is non-zero if X is an anticipatable expression.
1948 AVAIL_P is non-zero if X is an available expression. */
1951 insert_expr_in_table (x
, mode
, insn
, antic_p
, avail_p
)
1953 enum machine_mode mode
;
1955 int antic_p
, avail_p
;
1957 int found
, do_not_record_p
;
1959 struct expr
*cur_expr
, *last_expr
= NULL
;
1960 struct occr
*antic_occr
, *avail_occr
;
1961 struct occr
*last_occr
= NULL
;
1963 hash
= hash_expr (x
, mode
, &do_not_record_p
, expr_hash_table_size
);
1965 /* Do not insert expression in table if it contains volatile operands,
1966 or if hash_expr determines the expression is something we don't want
1967 to or can't handle. */
1968 if (do_not_record_p
)
1971 cur_expr
= expr_hash_table
[hash
];
1974 while (cur_expr
&& 0 == (found
= expr_equiv_p (cur_expr
->expr
, x
)))
1976 /* If the expression isn't found, save a pointer to the end of
1978 last_expr
= cur_expr
;
1979 cur_expr
= cur_expr
->next_same_hash
;
1984 cur_expr
= (struct expr
*) gcse_alloc (sizeof (struct expr
));
1985 bytes_used
+= sizeof (struct expr
);
1986 if (expr_hash_table
[hash
] == NULL
)
1987 /* This is the first pattern that hashed to this index. */
1988 expr_hash_table
[hash
] = cur_expr
;
1990 /* Add EXPR to end of this hash chain. */
1991 last_expr
->next_same_hash
= cur_expr
;
1993 /* Set the fields of the expr element. */
1995 cur_expr
->bitmap_index
= n_exprs
++;
1996 cur_expr
->next_same_hash
= NULL
;
1997 cur_expr
->antic_occr
= NULL
;
1998 cur_expr
->avail_occr
= NULL
;
2001 /* Now record the occurrence(s). */
2004 antic_occr
= cur_expr
->antic_occr
;
2006 /* Search for another occurrence in the same basic block. */
2007 while (antic_occr
&& BLOCK_NUM (antic_occr
->insn
) != BLOCK_NUM (insn
))
2009 /* If an occurrence isn't found, save a pointer to the end of
2011 last_occr
= antic_occr
;
2012 antic_occr
= antic_occr
->next
;
2016 /* Found another instance of the expression in the same basic block.
2017 Prefer the currently recorded one. We want the first one in the
2018 block and the block is scanned from start to end. */
2019 ; /* nothing to do */
2022 /* First occurrence of this expression in this basic block. */
2023 antic_occr
= (struct occr
*) gcse_alloc (sizeof (struct occr
));
2024 bytes_used
+= sizeof (struct occr
);
2025 /* First occurrence of this expression in any block? */
2026 if (cur_expr
->antic_occr
== NULL
)
2027 cur_expr
->antic_occr
= antic_occr
;
2029 last_occr
->next
= antic_occr
;
2031 antic_occr
->insn
= insn
;
2032 antic_occr
->next
= NULL
;
2038 avail_occr
= cur_expr
->avail_occr
;
2040 /* Search for another occurrence in the same basic block. */
2041 while (avail_occr
&& BLOCK_NUM (avail_occr
->insn
) != BLOCK_NUM (insn
))
2043 /* If an occurrence isn't found, save a pointer to the end of
2045 last_occr
= avail_occr
;
2046 avail_occr
= avail_occr
->next
;
2050 /* Found another instance of the expression in the same basic block.
2051 Prefer this occurrence to the currently recorded one. We want
2052 the last one in the block and the block is scanned from start
2054 avail_occr
->insn
= insn
;
2057 /* First occurrence of this expression in this basic block. */
2058 avail_occr
= (struct occr
*) gcse_alloc (sizeof (struct occr
));
2059 bytes_used
+= sizeof (struct occr
);
2061 /* First occurrence of this expression in any block? */
2062 if (cur_expr
->avail_occr
== NULL
)
2063 cur_expr
->avail_occr
= avail_occr
;
2065 last_occr
->next
= avail_occr
;
2067 avail_occr
->insn
= insn
;
2068 avail_occr
->next
= NULL
;
2073 /* Insert pattern X in INSN in the hash table.
2074 X is a SET of a reg to either another reg or a constant.
2075 If it is already present, record it as the last occurrence in INSN's
2079 insert_set_in_table (x
, insn
)
2085 struct expr
*cur_expr
, *last_expr
= NULL
;
2086 struct occr
*cur_occr
, *last_occr
= NULL
;
2088 if (GET_CODE (x
) != SET
2089 || GET_CODE (SET_DEST (x
)) != REG
)
2092 hash
= hash_set (REGNO (SET_DEST (x
)), set_hash_table_size
);
2094 cur_expr
= set_hash_table
[hash
];
2097 while (cur_expr
&& 0 == (found
= expr_equiv_p (cur_expr
->expr
, x
)))
2099 /* If the expression isn't found, save a pointer to the end of
2101 last_expr
= cur_expr
;
2102 cur_expr
= cur_expr
->next_same_hash
;
2107 cur_expr
= (struct expr
*) gcse_alloc (sizeof (struct expr
));
2108 bytes_used
+= sizeof (struct expr
);
2109 if (set_hash_table
[hash
] == NULL
)
2110 /* This is the first pattern that hashed to this index. */
2111 set_hash_table
[hash
] = cur_expr
;
2113 /* Add EXPR to end of this hash chain. */
2114 last_expr
->next_same_hash
= cur_expr
;
2116 /* Set the fields of the expr element.
2117 We must copy X because it can be modified when copy propagation is
2118 performed on its operands. */
2119 cur_expr
->expr
= copy_rtx (x
);
2120 cur_expr
->bitmap_index
= n_sets
++;
2121 cur_expr
->next_same_hash
= NULL
;
2122 cur_expr
->antic_occr
= NULL
;
2123 cur_expr
->avail_occr
= NULL
;
2126 /* Now record the occurrence. */
2127 cur_occr
= cur_expr
->avail_occr
;
2129 /* Search for another occurrence in the same basic block. */
2130 while (cur_occr
&& BLOCK_NUM (cur_occr
->insn
) != BLOCK_NUM (insn
))
2132 /* If an occurrence isn't found, save a pointer to the end of
2134 last_occr
= cur_occr
;
2135 cur_occr
= cur_occr
->next
;
2139 /* Found another instance of the expression in the same basic block.
2140 Prefer this occurrence to the currently recorded one. We want the
2141 last one in the block and the block is scanned from start to end. */
2142 cur_occr
->insn
= insn
;
2145 /* First occurrence of this expression in this basic block. */
2146 cur_occr
= (struct occr
*) gcse_alloc (sizeof (struct occr
));
2147 bytes_used
+= sizeof (struct occr
);
2149 /* First occurrence of this expression in any block? */
2150 if (cur_expr
->avail_occr
== NULL
)
2151 cur_expr
->avail_occr
= cur_occr
;
2153 last_occr
->next
= cur_occr
;
2155 cur_occr
->insn
= insn
;
2156 cur_occr
->next
= NULL
;
2160 /* Scan pattern PAT of INSN and add an entry to the hash table. If SET_P is
2161 non-zero, this is for the assignment hash table, otherwise it is for the
2162 expression hash table. */
2165 hash_scan_set (pat
, insn
, set_p
)
2169 rtx src
= SET_SRC (pat
);
2170 rtx dest
= SET_DEST (pat
);
2173 if (GET_CODE (src
) == CALL
)
2174 hash_scan_call (src
, insn
);
2176 else if (GET_CODE (dest
) == REG
)
2178 unsigned int regno
= REGNO (dest
);
2181 /* If this is a single set and we are doing constant propagation,
2182 see if a REG_NOTE shows this equivalent to a constant. */
2183 if (set_p
&& (note
= find_reg_equal_equiv_note (insn
)) != 0
2184 && CONSTANT_P (XEXP (note
, 0)))
2185 src
= XEXP (note
, 0), pat
= gen_rtx_SET (VOIDmode
, dest
, src
);
2187 /* Only record sets of pseudo-regs in the hash table. */
2189 && regno
>= FIRST_PSEUDO_REGISTER
2190 /* Don't GCSE something if we can't do a reg/reg copy. */
2191 && can_copy_p
[GET_MODE (dest
)]
2192 /* Is SET_SRC something we want to gcse? */
2193 && want_to_gcse_p (src
)
2194 /* Don't CSE a nop. */
2195 && ! set_noop_p (pat
)
2196 /* Don't GCSE if it has attached REG_EQUIV note.
2197 At this point this only function parameters should have
2198 REG_EQUIV notes and if the argument slot is used somewhere
2199 explicitly, it means address of parameter has been taken,
2200 so we should not extend the lifetime of the pseudo. */
2201 && ((note
= find_reg_note (insn
, REG_EQUIV
, NULL_RTX
)) == 0
2202 || GET_CODE (XEXP (note
, 0)) != MEM
))
2204 /* An expression is not anticipatable if its operands are
2205 modified before this insn or if this is not the only SET in
2207 int antic_p
= oprs_anticipatable_p (src
, insn
) && single_set (insn
);
2208 /* An expression is not available if its operands are
2209 subsequently modified, including this insn. It's also not
2210 available if this is a branch, because we can't insert
2211 a set after the branch. */
2212 int avail_p
= (oprs_available_p (src
, insn
)
2213 && ! JUMP_P (insn
));
2215 insert_expr_in_table (src
, GET_MODE (dest
), insn
, antic_p
, avail_p
);
2218 /* Record sets for constant/copy propagation. */
2220 && regno
>= FIRST_PSEUDO_REGISTER
2221 && ((GET_CODE (src
) == REG
2222 && REGNO (src
) >= FIRST_PSEUDO_REGISTER
2223 && can_copy_p
[GET_MODE (dest
)]
2224 && REGNO (src
) != regno
)
2225 || CONSTANT_P (src
))
2226 /* A copy is not available if its src or dest is subsequently
2227 modified. Here we want to search from INSN+1 on, but
2228 oprs_available_p searches from INSN on. */
2229 && (insn
== BLOCK_END (BLOCK_NUM (insn
))
2230 || ((tmp
= next_nonnote_insn (insn
)) != NULL_RTX
2231 && oprs_available_p (pat
, tmp
))))
2232 insert_set_in_table (pat
, insn
);
2237 hash_scan_clobber (x
, insn
)
2238 rtx x ATTRIBUTE_UNUSED
, insn ATTRIBUTE_UNUSED
;
2240 /* Currently nothing to do. */
2244 hash_scan_call (x
, insn
)
2245 rtx x ATTRIBUTE_UNUSED
, insn ATTRIBUTE_UNUSED
;
2247 /* Currently nothing to do. */
2250 /* Process INSN and add hash table entries as appropriate.
2252 Only available expressions that set a single pseudo-reg are recorded.
2254 Single sets in a PARALLEL could be handled, but it's an extra complication
2255 that isn't dealt with right now. The trick is handling the CLOBBERs that
2256 are also in the PARALLEL. Later.
2258 If SET_P is non-zero, this is for the assignment hash table,
2259 otherwise it is for the expression hash table.
2260 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2261 not record any expressions. */
2264 hash_scan_insn (insn
, set_p
, in_libcall_block
)
2267 int in_libcall_block
;
2269 rtx pat
= PATTERN (insn
);
2272 if (in_libcall_block
)
2275 /* Pick out the sets of INSN and for other forms of instructions record
2276 what's been modified. */
2278 if (GET_CODE (pat
) == SET
)
2279 hash_scan_set (pat
, insn
, set_p
);
2280 else if (GET_CODE (pat
) == PARALLEL
)
2281 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
2283 rtx x
= XVECEXP (pat
, 0, i
);
2285 if (GET_CODE (x
) == SET
)
2286 hash_scan_set (x
, insn
, set_p
);
2287 else if (GET_CODE (x
) == CLOBBER
)
2288 hash_scan_clobber (x
, insn
);
2289 else if (GET_CODE (x
) == CALL
)
2290 hash_scan_call (x
, insn
);
2293 else if (GET_CODE (pat
) == CLOBBER
)
2294 hash_scan_clobber (pat
, insn
);
2295 else if (GET_CODE (pat
) == CALL
)
2296 hash_scan_call (pat
, insn
);
2300 dump_hash_table (file
, name
, table
, table_size
, total_size
)
2303 struct expr
**table
;
2304 int table_size
, total_size
;
2307 /* Flattened out table, so it's printed in proper order. */
2308 struct expr
**flat_table
;
2309 unsigned int *hash_val
;
2313 = (struct expr
**) xcalloc (total_size
, sizeof (struct expr
*));
2314 hash_val
= (unsigned int *) xmalloc (total_size
* sizeof (unsigned int));
2316 for (i
= 0; i
< table_size
; i
++)
2317 for (expr
= table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
2319 flat_table
[expr
->bitmap_index
] = expr
;
2320 hash_val
[expr
->bitmap_index
] = i
;
2323 fprintf (file
, "%s hash table (%d buckets, %d entries)\n",
2324 name
, table_size
, total_size
);
2326 for (i
= 0; i
< total_size
; i
++)
2327 if (flat_table
[i
] != 0)
2329 expr
= flat_table
[i
];
2330 fprintf (file
, "Index %d (hash value %d)\n ",
2331 expr
->bitmap_index
, hash_val
[i
]);
2332 print_rtl (file
, expr
->expr
);
2333 fprintf (file
, "\n");
2336 fprintf (file
, "\n");
2342 /* Record register first/last/block set information for REGNO in INSN.
2344 first_set records the first place in the block where the register
2345 is set and is used to compute "anticipatability".
2347 last_set records the last place in the block where the register
2348 is set and is used to compute "availability".
2350 last_bb records the block for which first_set and last_set are
2351 valid, as a quick test to invalidate them.
2353 reg_set_in_block records whether the register is set in the block
2354 and is used to compute "transparency". */
2357 record_last_reg_set_info (insn
, regno
)
2361 struct reg_avail_info
*info
= ®_avail_info
[regno
];
2362 int cuid
= INSN_CUID (insn
);
2364 info
->last_set
= cuid
;
2365 if (info
->last_bb
!= current_bb
)
2367 info
->last_bb
= current_bb
;
2368 info
->first_set
= cuid
;
2369 SET_BIT (reg_set_in_block
[current_bb
], regno
);
2374 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2375 Note we store a pair of elements in the list, so they have to be
2376 taken off pairwise. */
2379 canon_list_insert (dest
, unused1
, v_insn
)
2380 rtx dest ATTRIBUTE_UNUSED
;
2381 rtx unused1 ATTRIBUTE_UNUSED
;
2384 rtx dest_addr
, insn
;
2386 while (GET_CODE (dest
) == SUBREG
2387 || GET_CODE (dest
) == ZERO_EXTRACT
2388 || GET_CODE (dest
) == SIGN_EXTRACT
2389 || GET_CODE (dest
) == STRICT_LOW_PART
)
2390 dest
= XEXP (dest
, 0);
2392 /* If DEST is not a MEM, then it will not conflict with a load. Note
2393 that function calls are assumed to clobber memory, but are handled
2396 if (GET_CODE (dest
) != MEM
)
2399 dest_addr
= get_addr (XEXP (dest
, 0));
2400 dest_addr
= canon_rtx (dest_addr
);
2401 insn
= (rtx
) v_insn
;
2403 canon_modify_mem_list
[BLOCK_NUM (insn
)] =
2404 alloc_INSN_LIST (dest_addr
, canon_modify_mem_list
[BLOCK_NUM (insn
)]);
2405 canon_modify_mem_list
[BLOCK_NUM (insn
)] =
2406 alloc_INSN_LIST (dest
, canon_modify_mem_list
[BLOCK_NUM (insn
)]);
2407 bitmap_set_bit (canon_modify_mem_list_set
, BLOCK_NUM (insn
));
2410 /* Record memory modification information for INSN. We do not actually care
2411 about the memory location(s) that are set, or even how they are set (consider
2412 a CALL_INSN). We merely need to record which insns modify memory. */
2415 record_last_mem_set_info (insn
)
2418 /* load_killed_in_block_p will handle the case of calls clobbering
2420 modify_mem_list
[BLOCK_NUM (insn
)] =
2421 alloc_INSN_LIST (insn
, modify_mem_list
[BLOCK_NUM (insn
)]);
2422 bitmap_set_bit (modify_mem_list_set
, BLOCK_NUM (insn
));
2424 if (GET_CODE (insn
) == CALL_INSN
)
2426 /* Note that traversals of this loop (other than for free-ing)
2427 will break after encountering a CALL_INSN. So, there's no
2428 need to insert a pair of items, as canon_list_insert does. */
2429 canon_modify_mem_list
[BLOCK_NUM (insn
)] =
2430 alloc_INSN_LIST (insn
, canon_modify_mem_list
[BLOCK_NUM (insn
)]);
2431 bitmap_set_bit (canon_modify_mem_list_set
, BLOCK_NUM (insn
));
2434 note_stores (PATTERN (insn
), canon_list_insert
, (void*) insn
);
2437 /* Called from compute_hash_table via note_stores to handle one
2438 SET or CLOBBER in an insn. DATA is really the instruction in which
2439 the SET is taking place. */
2442 record_last_set_info (dest
, setter
, data
)
2443 rtx dest
, setter ATTRIBUTE_UNUSED
;
2446 rtx last_set_insn
= (rtx
) data
;
2448 if (GET_CODE (dest
) == SUBREG
)
2449 dest
= SUBREG_REG (dest
);
2451 if (GET_CODE (dest
) == REG
)
2452 record_last_reg_set_info (last_set_insn
, REGNO (dest
));
2453 else if (GET_CODE (dest
) == MEM
2454 /* Ignore pushes, they clobber nothing. */
2455 && ! push_operand (dest
, GET_MODE (dest
)))
2456 record_last_mem_set_info (last_set_insn
);
2459 /* Top level function to create an expression or assignment hash table.
2461 Expression entries are placed in the hash table if
2462 - they are of the form (set (pseudo-reg) src),
2463 - src is something we want to perform GCSE on,
2464 - none of the operands are subsequently modified in the block
2466 Assignment entries are placed in the hash table if
2467 - they are of the form (set (pseudo-reg) src),
2468 - src is something we want to perform const/copy propagation on,
2469 - none of the operands or target are subsequently modified in the block
2471 Currently src must be a pseudo-reg or a const_int.
2473 F is the first insn.
2474 SET_P is non-zero for computing the assignment hash table. */
2477 compute_hash_table (set_p
)
2482 /* While we compute the hash table we also compute a bit array of which
2483 registers are set in which blocks.
2484 ??? This isn't needed during const/copy propagation, but it's cheap to
2486 sbitmap_vector_zero (reg_set_in_block
, n_basic_blocks
);
2488 /* re-Cache any INSN_LIST nodes we have allocated. */
2489 clear_modify_mem_tables ();
2490 /* Some working arrays used to track first and last set in each block. */
2491 reg_avail_info
= (struct reg_avail_info
*)
2492 gmalloc (max_gcse_regno
* sizeof (struct reg_avail_info
));
2494 for (i
= 0; i
< max_gcse_regno
; ++i
)
2495 reg_avail_info
[i
].last_bb
= NEVER_SET
;
2497 for (current_bb
= 0; current_bb
< n_basic_blocks
; current_bb
++)
2501 int in_libcall_block
;
2503 /* First pass over the instructions records information used to
2504 determine when registers and memory are first and last set.
2505 ??? hard-reg reg_set_in_block computation
2506 could be moved to compute_sets since they currently don't change. */
2508 for (insn
= BLOCK_HEAD (current_bb
);
2509 insn
&& insn
!= NEXT_INSN (BLOCK_END (current_bb
));
2510 insn
= NEXT_INSN (insn
))
2512 if (! INSN_P (insn
))
2515 if (GET_CODE (insn
) == CALL_INSN
)
2517 bool clobbers_all
= false;
2518 #ifdef NON_SAVING_SETJMP
2519 if (NON_SAVING_SETJMP
2520 && find_reg_note (insn
, REG_SETJMP
, NULL_RTX
))
2521 clobbers_all
= true;
2524 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
2526 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, regno
))
2527 record_last_reg_set_info (insn
, regno
);
2532 note_stores (PATTERN (insn
), record_last_set_info
, insn
);
2535 /* The next pass builds the hash table. */
2537 for (insn
= BLOCK_HEAD (current_bb
), in_libcall_block
= 0;
2538 insn
&& insn
!= NEXT_INSN (BLOCK_END (current_bb
));
2539 insn
= NEXT_INSN (insn
))
2542 if (find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2543 in_libcall_block
= 1;
2544 else if (set_p
&& find_reg_note (insn
, REG_RETVAL
, NULL_RTX
))
2545 in_libcall_block
= 0;
2546 hash_scan_insn (insn
, set_p
, in_libcall_block
);
2547 if (!set_p
&& find_reg_note (insn
, REG_RETVAL
, NULL_RTX
))
2548 in_libcall_block
= 0;
2552 free (reg_avail_info
);
2553 reg_avail_info
= NULL
;
2556 /* Allocate space for the set hash table.
2557 N_INSNS is the number of instructions in the function.
2558 It is used to determine the number of buckets to use. */
2561 alloc_set_hash_table (n_insns
)
2566 set_hash_table_size
= n_insns
/ 4;
2567 if (set_hash_table_size
< 11)
2568 set_hash_table_size
= 11;
2570 /* Attempt to maintain efficient use of hash table.
2571 Making it an odd number is simplest for now.
2572 ??? Later take some measurements. */
2573 set_hash_table_size
|= 1;
2574 n
= set_hash_table_size
* sizeof (struct expr
*);
2575 set_hash_table
= (struct expr
**) gmalloc (n
);
2578 /* Free things allocated by alloc_set_hash_table. */
2581 free_set_hash_table ()
2583 free (set_hash_table
);
2586 /* Compute the hash table for doing copy/const propagation. */
2589 compute_set_hash_table ()
2591 /* Initialize count of number of entries in hash table. */
2593 memset ((char *) set_hash_table
, 0,
2594 set_hash_table_size
* sizeof (struct expr
*));
2596 compute_hash_table (1);
2599 /* Allocate space for the expression hash table.
2600 N_INSNS is the number of instructions in the function.
2601 It is used to determine the number of buckets to use. */
2604 alloc_expr_hash_table (n_insns
)
2605 unsigned int n_insns
;
2609 expr_hash_table_size
= n_insns
/ 2;
2610 /* Make sure the amount is usable. */
2611 if (expr_hash_table_size
< 11)
2612 expr_hash_table_size
= 11;
2614 /* Attempt to maintain efficient use of hash table.
2615 Making it an odd number is simplest for now.
2616 ??? Later take some measurements. */
2617 expr_hash_table_size
|= 1;
2618 n
= expr_hash_table_size
* sizeof (struct expr
*);
2619 expr_hash_table
= (struct expr
**) gmalloc (n
);
2622 /* Free things allocated by alloc_expr_hash_table. */
2625 free_expr_hash_table ()
2627 free (expr_hash_table
);
2630 /* Compute the hash table for doing GCSE. */
2633 compute_expr_hash_table ()
2635 /* Initialize count of number of entries in hash table. */
2637 memset ((char *) expr_hash_table
, 0,
2638 expr_hash_table_size
* sizeof (struct expr
*));
2640 compute_hash_table (0);
2643 /* Expression tracking support. */
2645 /* Lookup pattern PAT in the expression table.
2646 The result is a pointer to the table entry, or NULL if not found. */
2648 static struct expr
*
2652 int do_not_record_p
;
2653 unsigned int hash
= hash_expr (pat
, GET_MODE (pat
), &do_not_record_p
,
2654 expr_hash_table_size
);
2657 if (do_not_record_p
)
2660 expr
= expr_hash_table
[hash
];
2662 while (expr
&& ! expr_equiv_p (expr
->expr
, pat
))
2663 expr
= expr
->next_same_hash
;
2668 /* Lookup REGNO in the set table. If PAT is non-NULL look for the entry that
2669 matches it, otherwise return the first entry for REGNO. The result is a
2670 pointer to the table entry, or NULL if not found. */
2672 static struct expr
*
2673 lookup_set (regno
, pat
)
2677 unsigned int hash
= hash_set (regno
, set_hash_table_size
);
2680 expr
= set_hash_table
[hash
];
2684 while (expr
&& ! expr_equiv_p (expr
->expr
, pat
))
2685 expr
= expr
->next_same_hash
;
2689 while (expr
&& REGNO (SET_DEST (expr
->expr
)) != regno
)
2690 expr
= expr
->next_same_hash
;
2696 /* Return the next entry for REGNO in list EXPR. */
2698 static struct expr
*
2699 next_set (regno
, expr
)
2704 expr
= expr
->next_same_hash
;
2705 while (expr
&& REGNO (SET_DEST (expr
->expr
)) != regno
);
2710 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2712 clear_modify_mem_tables ()
2716 EXECUTE_IF_SET_IN_BITMAP
2717 (canon_modify_mem_list_set
, 0, i
,
2718 free_INSN_LIST_list (modify_mem_list
+ i
));
2719 bitmap_clear (canon_modify_mem_list_set
);
2721 EXECUTE_IF_SET_IN_BITMAP
2722 (canon_modify_mem_list_set
, 0, i
,
2723 free_INSN_LIST_list (canon_modify_mem_list
+ i
));
2724 bitmap_clear (modify_mem_list_set
);
2727 /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
2730 free_modify_mem_tables ()
2732 clear_modify_mem_tables ();
2733 free (modify_mem_list
);
2734 free (canon_modify_mem_list
);
2735 modify_mem_list
= 0;
2736 canon_modify_mem_list
= 0;
2739 /* Reset tables used to keep track of what's still available [since the
2740 start of the block]. */
2743 reset_opr_set_tables ()
2745 /* Maintain a bitmap of which regs have been set since beginning of
2747 CLEAR_REG_SET (reg_set_bitmap
);
2749 /* Also keep a record of the last instruction to modify memory.
2750 For now this is very trivial, we only record whether any memory
2751 location has been modified. */
2752 clear_modify_mem_tables ();
2755 /* Return non-zero if the operands of X are not set before INSN in
2756 INSN's basic block. */
2759 oprs_not_set_p (x
, insn
)
2769 code
= GET_CODE (x
);
2785 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn
),
2786 INSN_CUID (insn
), x
, 0))
2789 return oprs_not_set_p (XEXP (x
, 0), insn
);
2792 return ! REGNO_REG_SET_P (reg_set_bitmap
, REGNO (x
));
2798 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
2802 /* If we are about to do the last recursive call
2803 needed at this level, change it into iteration.
2804 This function is called enough to be worth it. */
2806 return oprs_not_set_p (XEXP (x
, i
), insn
);
2808 if (! oprs_not_set_p (XEXP (x
, i
), insn
))
2811 else if (fmt
[i
] == 'E')
2812 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2813 if (! oprs_not_set_p (XVECEXP (x
, i
, j
), insn
))
2820 /* Mark things set by a CALL. */
2826 if (! CONST_OR_PURE_CALL_P (insn
))
2827 record_last_mem_set_info (insn
);
2830 /* Mark things set by a SET. */
2833 mark_set (pat
, insn
)
2836 rtx dest
= SET_DEST (pat
);
2838 while (GET_CODE (dest
) == SUBREG
2839 || GET_CODE (dest
) == ZERO_EXTRACT
2840 || GET_CODE (dest
) == SIGN_EXTRACT
2841 || GET_CODE (dest
) == STRICT_LOW_PART
)
2842 dest
= XEXP (dest
, 0);
2844 if (GET_CODE (dest
) == REG
)
2845 SET_REGNO_REG_SET (reg_set_bitmap
, REGNO (dest
));
2846 else if (GET_CODE (dest
) == MEM
)
2847 record_last_mem_set_info (insn
);
2849 if (GET_CODE (SET_SRC (pat
)) == CALL
)
2853 /* Record things set by a CLOBBER. */
2856 mark_clobber (pat
, insn
)
2859 rtx clob
= XEXP (pat
, 0);
2861 while (GET_CODE (clob
) == SUBREG
|| GET_CODE (clob
) == STRICT_LOW_PART
)
2862 clob
= XEXP (clob
, 0);
2864 if (GET_CODE (clob
) == REG
)
2865 SET_REGNO_REG_SET (reg_set_bitmap
, REGNO (clob
));
2867 record_last_mem_set_info (insn
);
2870 /* Record things set by INSN.
2871 This data is used by oprs_not_set_p. */
2874 mark_oprs_set (insn
)
2877 rtx pat
= PATTERN (insn
);
2880 if (GET_CODE (pat
) == SET
)
2881 mark_set (pat
, insn
);
2882 else if (GET_CODE (pat
) == PARALLEL
)
2883 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
2885 rtx x
= XVECEXP (pat
, 0, i
);
2887 if (GET_CODE (x
) == SET
)
2889 else if (GET_CODE (x
) == CLOBBER
)
2890 mark_clobber (x
, insn
);
2891 else if (GET_CODE (x
) == CALL
)
2895 else if (GET_CODE (pat
) == CLOBBER
)
2896 mark_clobber (pat
, insn
);
2897 else if (GET_CODE (pat
) == CALL
)
2902 /* Classic GCSE reaching definition support. */
2904 /* Allocate reaching def variables. */
2907 alloc_rd_mem (n_blocks
, n_insns
)
2908 int n_blocks
, n_insns
;
2910 rd_kill
= (sbitmap
*) sbitmap_vector_alloc (n_blocks
, n_insns
);
2911 sbitmap_vector_zero (rd_kill
, n_basic_blocks
);
2913 rd_gen
= (sbitmap
*) sbitmap_vector_alloc (n_blocks
, n_insns
);
2914 sbitmap_vector_zero (rd_gen
, n_basic_blocks
);
2916 reaching_defs
= (sbitmap
*) sbitmap_vector_alloc (n_blocks
, n_insns
);
2917 sbitmap_vector_zero (reaching_defs
, n_basic_blocks
);
2919 rd_out
= (sbitmap
*) sbitmap_vector_alloc (n_blocks
, n_insns
);
2920 sbitmap_vector_zero (rd_out
, n_basic_blocks
);
2923 /* Free reaching def variables. */
2928 sbitmap_vector_free (rd_kill
);
2929 sbitmap_vector_free (rd_gen
);
2930 sbitmap_vector_free (reaching_defs
);
2931 sbitmap_vector_free (rd_out
);
2934 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2937 handle_rd_kill_set (insn
, regno
, bb
)
2942 struct reg_set
*this_reg
;
2944 for (this_reg
= reg_set_table
[regno
]; this_reg
; this_reg
= this_reg
->next
)
2945 if (BLOCK_NUM (this_reg
->insn
) != BLOCK_NUM (insn
))
2946 SET_BIT (rd_kill
[bb
->index
], INSN_CUID (this_reg
->insn
));
2949 /* Compute the set of kill's for reaching definitions. */
2959 For each set bit in `gen' of the block (i.e each insn which
2960 generates a definition in the block)
2961 Call the reg set by the insn corresponding to that bit regx
2962 Look at the linked list starting at reg_set_table[regx]
2963 For each setting of regx in the linked list, which is not in
2965 Set the bit in `kill' corresponding to that insn. */
2966 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
2967 for (cuid
= 0; cuid
< max_cuid
; cuid
++)
2968 if (TEST_BIT (rd_gen
[bb
], cuid
))
2970 rtx insn
= CUID_INSN (cuid
);
2971 rtx pat
= PATTERN (insn
);
2973 if (GET_CODE (insn
) == CALL_INSN
)
2975 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
2976 if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, regno
))
2977 handle_rd_kill_set (insn
, regno
, BASIC_BLOCK (bb
));
2980 if (GET_CODE (pat
) == PARALLEL
)
2982 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
2984 enum rtx_code code
= GET_CODE (XVECEXP (pat
, 0, i
));
2986 if ((code
== SET
|| code
== CLOBBER
)
2987 && GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0)) == REG
)
2988 handle_rd_kill_set (insn
,
2989 REGNO (XEXP (XVECEXP (pat
, 0, i
), 0)),
2993 else if (GET_CODE (pat
) == SET
&& GET_CODE (SET_DEST (pat
)) == REG
)
2994 /* Each setting of this register outside of this block
2995 must be marked in the set of kills in this block. */
2996 handle_rd_kill_set (insn
, REGNO (SET_DEST (pat
)), BASIC_BLOCK (bb
));
3000 /* Compute the reaching definitions as in
3001 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
3002 Chapter 10. It is the same algorithm as used for computing available
3003 expressions but applied to the gens and kills of reaching definitions. */
3008 int bb
, changed
, passes
;
3010 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
3011 sbitmap_copy (rd_out
[bb
] /*dst*/, rd_gen
[bb
] /*src*/);
3018 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
3020 sbitmap_union_of_preds (reaching_defs
[bb
], rd_out
, bb
);
3021 changed
|= sbitmap_union_of_diff (rd_out
[bb
], rd_gen
[bb
],
3022 reaching_defs
[bb
], rd_kill
[bb
]);
3028 fprintf (gcse_file
, "reaching def computation: %d passes\n", passes
);
3031 /* Classic GCSE available expression support. */
3033 /* Allocate memory for available expression computation. */
3036 alloc_avail_expr_mem (n_blocks
, n_exprs
)
3037 int n_blocks
, n_exprs
;
3039 ae_kill
= (sbitmap
*) sbitmap_vector_alloc (n_blocks
, n_exprs
);
3040 sbitmap_vector_zero (ae_kill
, n_basic_blocks
);
3042 ae_gen
= (sbitmap
*) sbitmap_vector_alloc (n_blocks
, n_exprs
);
3043 sbitmap_vector_zero (ae_gen
, n_basic_blocks
);
3045 ae_in
= (sbitmap
*) sbitmap_vector_alloc (n_blocks
, n_exprs
);
3046 sbitmap_vector_zero (ae_in
, n_basic_blocks
);
3048 ae_out
= (sbitmap
*) sbitmap_vector_alloc (n_blocks
, n_exprs
);
3049 sbitmap_vector_zero (ae_out
, n_basic_blocks
);
3053 free_avail_expr_mem ()
3055 sbitmap_vector_free (ae_kill
);
3056 sbitmap_vector_free (ae_gen
);
3057 sbitmap_vector_free (ae_in
);
3058 sbitmap_vector_free (ae_out
);
3061 /* Compute the set of available expressions generated in each basic block. */
3070 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
3071 This is all we have to do because an expression is not recorded if it
3072 is not available, and the only expressions we want to work with are the
3073 ones that are recorded. */
3074 for (i
= 0; i
< expr_hash_table_size
; i
++)
3075 for (expr
= expr_hash_table
[i
]; expr
!= 0; expr
= expr
->next_same_hash
)
3076 for (occr
= expr
->avail_occr
; occr
!= 0; occr
= occr
->next
)
3077 SET_BIT (ae_gen
[BLOCK_NUM (occr
->insn
)], expr
->bitmap_index
);
3080 /* Return non-zero if expression X is killed in BB. */
3083 expr_killed_p (x
, bb
)
3094 code
= GET_CODE (x
);
3098 return TEST_BIT (reg_set_in_block
[bb
->index
], REGNO (x
));
3101 if (load_killed_in_block_p (bb
, get_max_uid () + 1, x
, 0))
3104 return expr_killed_p (XEXP (x
, 0), bb
);
3122 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
3126 /* If we are about to do the last recursive call
3127 needed at this level, change it into iteration.
3128 This function is called enough to be worth it. */
3130 return expr_killed_p (XEXP (x
, i
), bb
);
3131 else if (expr_killed_p (XEXP (x
, i
), bb
))
3134 else if (fmt
[i
] == 'E')
3135 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3136 if (expr_killed_p (XVECEXP (x
, i
, j
), bb
))
3143 /* Compute the set of available expressions killed in each basic block. */
3146 compute_ae_kill (ae_gen
, ae_kill
)
3147 sbitmap
*ae_gen
, *ae_kill
;
3153 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
3154 for (i
= 0; i
< expr_hash_table_size
; i
++)
3155 for (expr
= expr_hash_table
[i
]; expr
; expr
= expr
->next_same_hash
)
3157 /* Skip EXPR if generated in this block. */
3158 if (TEST_BIT (ae_gen
[bb
], expr
->bitmap_index
))
3161 if (expr_killed_p (expr
->expr
, BASIC_BLOCK (bb
)))
3162 SET_BIT (ae_kill
[bb
], expr
->bitmap_index
);
3166 /* Actually perform the Classic GCSE optimizations. */
3168 /* Return non-zero if occurrence OCCR of expression EXPR reaches block BB.
3170 CHECK_SELF_LOOP is non-zero if we should consider a block reaching itself
3171 as a positive reach. We want to do this when there are two computations
3172 of the expression in the block.
3174 VISITED is a pointer to a working buffer for tracking which BB's have
3175 been visited. It is NULL for the top-level call.
3177 We treat reaching expressions that go through blocks containing the same
3178 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3179 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3180 2 as not reaching. The intent is to improve the probability of finding
3181 only one reaching expression and to reduce register lifetimes by picking
3182 the closest such expression. */
3185 expr_reaches_here_p_work (occr
, expr
, bb
, check_self_loop
, visited
)
3189 int check_self_loop
;
3194 for (pred
= bb
->pred
; pred
!= NULL
; pred
= pred
->pred_next
)
3196 basic_block pred_bb
= pred
->src
;
3198 if (visited
[pred_bb
->index
])
3199 /* This predecessor has already been visited. Nothing to do. */
3201 else if (pred_bb
== bb
)
3203 /* BB loops on itself. */
3205 && TEST_BIT (ae_gen
[pred_bb
->index
], expr
->bitmap_index
)
3206 && BLOCK_NUM (occr
->insn
) == pred_bb
->index
)
3209 visited
[pred_bb
->index
] = 1;
3212 /* Ignore this predecessor if it kills the expression. */
3213 else if (TEST_BIT (ae_kill
[pred_bb
->index
], expr
->bitmap_index
))
3214 visited
[pred_bb
->index
] = 1;
3216 /* Does this predecessor generate this expression? */
3217 else if (TEST_BIT (ae_gen
[pred_bb
->index
], expr
->bitmap_index
))
3219 /* Is this the occurrence we're looking for?
3220 Note that there's only one generating occurrence per block
3221 so we just need to check the block number. */
3222 if (BLOCK_NUM (occr
->insn
) == pred_bb
->index
)
3225 visited
[pred_bb
->index
] = 1;
3228 /* Neither gen nor kill. */
3231 visited
[pred_bb
->index
] = 1;
3232 if (expr_reaches_here_p_work (occr
, expr
, pred_bb
, check_self_loop
,
3239 /* All paths have been checked. */
3243 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3244 memory allocated for that function is returned. */
3247 expr_reaches_here_p (occr
, expr
, bb
, check_self_loop
)
3251 int check_self_loop
;
3254 char *visited
= (char *) xcalloc (n_basic_blocks
, 1);
3256 rval
= expr_reaches_here_p_work (occr
, expr
, bb
, check_self_loop
, visited
);
3262 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3263 If there is more than one such instruction, return NULL.
3265 Called only by handle_avail_expr. */
3268 computing_insn (expr
, insn
)
3272 basic_block bb
= BLOCK_FOR_INSN (insn
);
3274 if (expr
->avail_occr
->next
== NULL
)
3276 if (BLOCK_FOR_INSN (expr
->avail_occr
->insn
) == bb
)
3277 /* The available expression is actually itself
3278 (i.e. a loop in the flow graph) so do nothing. */
3281 /* (FIXME) Case that we found a pattern that was created by
3282 a substitution that took place. */
3283 return expr
->avail_occr
->insn
;
3287 /* Pattern is computed more than once.
3288 Search backwards from this insn to see how many of these
3289 computations actually reach this insn. */
3291 rtx insn_computes_expr
= NULL
;
3294 for (occr
= expr
->avail_occr
; occr
!= NULL
; occr
= occr
->next
)
3296 if (BLOCK_FOR_INSN (occr
->insn
) == bb
)
3298 /* The expression is generated in this block.
3299 The only time we care about this is when the expression
3300 is generated later in the block [and thus there's a loop].
3301 We let the normal cse pass handle the other cases. */
3302 if (INSN_CUID (insn
) < INSN_CUID (occr
->insn
)
3303 && expr_reaches_here_p (occr
, expr
, bb
, 1))
3309 insn_computes_expr
= occr
->insn
;
3312 else if (expr_reaches_here_p (occr
, expr
, bb
, 0))
3318 insn_computes_expr
= occr
->insn
;
3322 if (insn_computes_expr
== NULL
)
3325 return insn_computes_expr
;
3329 /* Return non-zero if the definition in DEF_INSN can reach INSN.
3330 Only called by can_disregard_other_sets. */
3333 def_reaches_here_p (insn
, def_insn
)
3338 if (TEST_BIT (reaching_defs
[BLOCK_NUM (insn
)], INSN_CUID (def_insn
)))
3341 if (BLOCK_NUM (insn
) == BLOCK_NUM (def_insn
))
3343 if (INSN_CUID (def_insn
) < INSN_CUID (insn
))
3345 if (GET_CODE (PATTERN (def_insn
)) == PARALLEL
)
3347 else if (GET_CODE (PATTERN (def_insn
)) == CLOBBER
)
3348 reg
= XEXP (PATTERN (def_insn
), 0);
3349 else if (GET_CODE (PATTERN (def_insn
)) == SET
)
3350 reg
= SET_DEST (PATTERN (def_insn
));
3354 return ! reg_set_between_p (reg
, NEXT_INSN (def_insn
), insn
);
3363 /* Return non-zero if *ADDR_THIS_REG can only have one value at INSN. The
3364 value returned is the number of definitions that reach INSN. Returning a
3365 value of zero means that [maybe] more than one definition reaches INSN and
3366 the caller can't perform whatever optimization it is trying. i.e. it is
3367 always safe to return zero. */
3370 can_disregard_other_sets (addr_this_reg
, insn
, for_combine
)
3371 struct reg_set
**addr_this_reg
;
3375 int number_of_reaching_defs
= 0;
3376 struct reg_set
*this_reg
;
3378 for (this_reg
= *addr_this_reg
; this_reg
!= 0; this_reg
= this_reg
->next
)
3379 if (def_reaches_here_p (insn
, this_reg
->insn
))
3381 number_of_reaching_defs
++;
3382 /* Ignore parallels for now. */
3383 if (GET_CODE (PATTERN (this_reg
->insn
)) == PARALLEL
)
3387 && (GET_CODE (PATTERN (this_reg
->insn
)) == CLOBBER
3388 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg
->insn
)),
3389 SET_SRC (PATTERN (insn
)))))
3390 /* A setting of the reg to a different value reaches INSN. */
3393 if (number_of_reaching_defs
> 1)
3395 /* If in this setting the value the register is being set to is
3396 equal to the previous value the register was set to and this
3397 setting reaches the insn we are trying to do the substitution
3398 on then we are ok. */
3399 if (GET_CODE (PATTERN (this_reg
->insn
)) == CLOBBER
)
3401 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg
->insn
)),
3402 SET_SRC (PATTERN (insn
))))
3406 *addr_this_reg
= this_reg
;
3409 return number_of_reaching_defs
;
3412 /* Expression computed by insn is available and the substitution is legal,
3413 so try to perform the substitution.
3415 The result is non-zero if any changes were made. */
3418 handle_avail_expr (insn
, expr
)
3422 rtx pat
, insn_computes_expr
, expr_set
;
3424 struct reg_set
*this_reg
;
3425 int found_setting
, use_src
;
3428 /* We only handle the case where one computation of the expression
3429 reaches this instruction. */
3430 insn_computes_expr
= computing_insn (expr
, insn
);
3431 if (insn_computes_expr
== NULL
)
3433 expr_set
= single_set (insn_computes_expr
);
3440 /* At this point we know only one computation of EXPR outside of this
3441 block reaches this insn. Now try to find a register that the
3442 expression is computed into. */
3443 if (GET_CODE (SET_SRC (expr_set
)) == REG
)
3445 /* This is the case when the available expression that reaches
3446 here has already been handled as an available expression. */
3447 unsigned int regnum_for_replacing
3448 = REGNO (SET_SRC (expr_set
));
3450 /* If the register was created by GCSE we can't use `reg_set_table',
3451 however we know it's set only once. */
3452 if (regnum_for_replacing
>= max_gcse_regno
3453 /* If the register the expression is computed into is set only once,
3454 or only one set reaches this insn, we can use it. */
3455 || (((this_reg
= reg_set_table
[regnum_for_replacing
]),
3456 this_reg
->next
== NULL
)
3457 || can_disregard_other_sets (&this_reg
, insn
, 0)))
3466 unsigned int regnum_for_replacing
3467 = REGNO (SET_DEST (expr_set
));
3469 /* This shouldn't happen. */
3470 if (regnum_for_replacing
>= max_gcse_regno
)
3473 this_reg
= reg_set_table
[regnum_for_replacing
];
3475 /* If the register the expression is computed into is set only once,
3476 or only one set reaches this insn, use it. */
3477 if (this_reg
->next
== NULL
3478 || can_disregard_other_sets (&this_reg
, insn
, 0))
3484 pat
= PATTERN (insn
);
3486 to
= SET_SRC (expr_set
);
3488 to
= SET_DEST (expr_set
);
3489 changed
= validate_change (insn
, &SET_SRC (pat
), to
, 0);
3491 /* We should be able to ignore the return code from validate_change but
3492 to play it safe we check. */
3496 if (gcse_file
!= NULL
)
3498 fprintf (gcse_file
, "GCSE: Replacing the source in insn %d with",
3500 fprintf (gcse_file
, " reg %d %s insn %d\n",
3501 REGNO (to
), use_src
? "from" : "set in",
3502 INSN_UID (insn_computes_expr
));
3507 /* The register that the expr is computed into is set more than once. */
3508 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3510 /* Insert an insn after insnx that copies the reg set in insnx
3511 into a new pseudo register call this new register REGN.
3512 From insnb until end of basic block or until REGB is set
3513 replace all uses of REGB with REGN. */
3516 to
= gen_reg_rtx (GET_MODE (SET_DEST (expr_set
)));
3518 /* Generate the new insn. */
3519 /* ??? If the change fails, we return 0, even though we created
3520 an insn. I think this is ok. */
3522 = emit_insn_after (gen_rtx_SET (VOIDmode
, to
,
3523 SET_DEST (expr_set
)),
3524 insn_computes_expr
);
3526 /* Keep register set table up to date. */
3527 record_one_set (REGNO (to
), new_insn
);
3529 gcse_create_count
++;
3530 if (gcse_file
!= NULL
)
3532 fprintf (gcse_file
, "GCSE: Creating insn %d to copy value of reg %d",
3533 INSN_UID (NEXT_INSN (insn_computes_expr
)),
3534 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr
)))));
3535 fprintf (gcse_file
, ", computed in insn %d,\n",
3536 INSN_UID (insn_computes_expr
));
3537 fprintf (gcse_file
, " into newly allocated reg %d\n",
3541 pat
= PATTERN (insn
);
3543 /* Do register replacement for INSN. */
3544 changed
= validate_change (insn
, &SET_SRC (pat
),
3546 (NEXT_INSN (insn_computes_expr
))),
3549 /* We should be able to ignore the return code from validate_change but
3550 to play it safe we check. */
3554 if (gcse_file
!= NULL
)
3557 "GCSE: Replacing the source in insn %d with reg %d ",
3559 REGNO (SET_DEST (PATTERN (NEXT_INSN
3560 (insn_computes_expr
)))));
3561 fprintf (gcse_file
, "set in insn %d\n",
3562 INSN_UID (insn_computes_expr
));
3570 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3571 the dataflow analysis has been done.
3573 The result is non-zero if a change was made. */
3581 /* Note we start at block 1. */
3584 for (bb
= 1; bb
< n_basic_blocks
; bb
++)
3586 /* Reset tables used to keep track of what's still valid [since the
3587 start of the block]. */
3588 reset_opr_set_tables ();
3590 for (insn
= BLOCK_HEAD (bb
);
3591 insn
!= NULL
&& insn
!= NEXT_INSN (BLOCK_END (bb
));
3592 insn
= NEXT_INSN (insn
))
3594 /* Is insn of form (set (pseudo-reg) ...)? */
3595 if (GET_CODE (insn
) == INSN
3596 && GET_CODE (PATTERN (insn
)) == SET
3597 && GET_CODE (SET_DEST (PATTERN (insn
))) == REG
3598 && REGNO (SET_DEST (PATTERN (insn
))) >= FIRST_PSEUDO_REGISTER
)
3600 rtx pat
= PATTERN (insn
);
3601 rtx src
= SET_SRC (pat
);
3604 if (want_to_gcse_p (src
)
3605 /* Is the expression recorded? */
3606 && ((expr
= lookup_expr (src
)) != NULL
)
3607 /* Is the expression available [at the start of the
3609 && TEST_BIT (ae_in
[bb
], expr
->bitmap_index
)
3610 /* Are the operands unchanged since the start of the
3612 && oprs_not_set_p (src
, insn
))
3613 changed
|= handle_avail_expr (insn
, expr
);
3616 /* Keep track of everything modified by this insn. */
3617 /* ??? Need to be careful w.r.t. mods done to INSN. */
3619 mark_oprs_set (insn
);
3626 /* Top level routine to perform one classic GCSE pass.
3628 Return non-zero if a change was made. */
3631 one_classic_gcse_pass (pass
)
3636 gcse_subst_count
= 0;
3637 gcse_create_count
= 0;
3639 alloc_expr_hash_table (max_cuid
);
3640 alloc_rd_mem (n_basic_blocks
, max_cuid
);
3641 compute_expr_hash_table ();
3643 dump_hash_table (gcse_file
, "Expression", expr_hash_table
,
3644 expr_hash_table_size
, n_exprs
);
3650 alloc_avail_expr_mem (n_basic_blocks
, n_exprs
);
3652 compute_ae_kill (ae_gen
, ae_kill
);
3653 compute_available (ae_gen
, ae_kill
, ae_out
, ae_in
);
3654 changed
= classic_gcse ();
3655 free_avail_expr_mem ();
3659 free_expr_hash_table ();
3663 fprintf (gcse_file
, "\n");
3664 fprintf (gcse_file
, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3665 current_function_name
, pass
, bytes_used
, gcse_subst_count
);
3666 fprintf (gcse_file
, "%d insns created\n", gcse_create_count
);
3672 /* Compute copy/constant propagation working variables. */
3674 /* Local properties of assignments. */
3675 static sbitmap
*cprop_pavloc
;
3676 static sbitmap
*cprop_absaltered
;
3678 /* Global properties of assignments (computed from the local properties). */
3679 static sbitmap
*cprop_avin
;
3680 static sbitmap
*cprop_avout
;
3682 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3683 basic blocks. N_SETS is the number of sets. */
3686 alloc_cprop_mem (n_blocks
, n_sets
)
3687 int n_blocks
, n_sets
;
3689 cprop_pavloc
= sbitmap_vector_alloc (n_blocks
, n_sets
);
3690 cprop_absaltered
= sbitmap_vector_alloc (n_blocks
, n_sets
);
3692 cprop_avin
= sbitmap_vector_alloc (n_blocks
, n_sets
);
3693 cprop_avout
= sbitmap_vector_alloc (n_blocks
, n_sets
);
3696 /* Free vars used by copy/const propagation. */
3701 sbitmap_vector_free (cprop_pavloc
);
3702 sbitmap_vector_free (cprop_absaltered
);
3703 sbitmap_vector_free (cprop_avin
);
3704 sbitmap_vector_free (cprop_avout
);
3707 /* For each block, compute whether X is transparent. X is either an
3708 expression or an assignment [though we don't care which, for this context
3709 an assignment is treated as an expression]. For each block where an
3710 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3714 compute_transp (x
, indx
, bmap
, set_p
)
3725 /* repeat is used to turn tail-recursion into iteration since GCC
3726 can't do it when there's no return value. */
3732 code
= GET_CODE (x
);
3738 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
)
3740 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
3741 if (TEST_BIT (reg_set_in_block
[bb
], REGNO (x
)))
3742 SET_BIT (bmap
[bb
], indx
);
3746 for (r
= reg_set_table
[REGNO (x
)]; r
!= NULL
; r
= r
->next
)
3747 SET_BIT (bmap
[BLOCK_NUM (r
->insn
)], indx
);
3752 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
)
3754 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
3755 if (TEST_BIT (reg_set_in_block
[bb
], REGNO (x
)))
3756 RESET_BIT (bmap
[bb
], indx
);
3760 for (r
= reg_set_table
[REGNO (x
)]; r
!= NULL
; r
= r
->next
)
3761 RESET_BIT (bmap
[BLOCK_NUM (r
->insn
)], indx
);
3768 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
3770 rtx list_entry
= canon_modify_mem_list
[bb
];
3774 rtx dest
, dest_addr
;
3776 if (GET_CODE (XEXP (list_entry
, 0)) == CALL_INSN
)
3779 SET_BIT (bmap
[bb
], indx
);
3781 RESET_BIT (bmap
[bb
], indx
);
3784 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3785 Examine each hunk of memory that is modified. */
3787 dest
= XEXP (list_entry
, 0);
3788 list_entry
= XEXP (list_entry
, 1);
3789 dest_addr
= XEXP (list_entry
, 0);
3791 if (canon_true_dependence (dest
, GET_MODE (dest
), dest_addr
,
3792 x
, rtx_addr_varies_p
))
3795 SET_BIT (bmap
[bb
], indx
);
3797 RESET_BIT (bmap
[bb
], indx
);
3800 list_entry
= XEXP (list_entry
, 1);
3823 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
3827 /* If we are about to do the last recursive call
3828 needed at this level, change it into iteration.
3829 This function is called enough to be worth it. */
3836 compute_transp (XEXP (x
, i
), indx
, bmap
, set_p
);
3838 else if (fmt
[i
] == 'E')
3839 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3840 compute_transp (XVECEXP (x
, i
, j
), indx
, bmap
, set_p
);
3844 /* Top level routine to do the dataflow analysis needed by copy/const
3848 compute_cprop_data ()
3850 compute_local_properties (cprop_absaltered
, cprop_pavloc
, NULL
, 1);
3851 compute_available (cprop_pavloc
, cprop_absaltered
,
3852 cprop_avout
, cprop_avin
);
3855 /* Copy/constant propagation. */
3857 /* Maximum number of register uses in an insn that we handle. */
3860 /* Table of uses found in an insn.
3861 Allocated statically to avoid alloc/free complexity and overhead. */
3862 static struct reg_use reg_use_table
[MAX_USES
];
3864 /* Index into `reg_use_table' while building it. */
3865 static int reg_use_count
;
3867 /* Set up a list of register numbers used in INSN. The found uses are stored
3868 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3869 and contains the number of uses in the table upon exit.
3871 ??? If a register appears multiple times we will record it multiple times.
3872 This doesn't hurt anything but it will slow things down. */
3875 find_used_regs (xptr
, data
)
3877 void *data ATTRIBUTE_UNUSED
;
3884 /* repeat is used to turn tail-recursion into iteration since GCC
3885 can't do it when there's no return value. */
3890 code
= GET_CODE (x
);
3893 if (reg_use_count
== MAX_USES
)
3896 reg_use_table
[reg_use_count
].reg_rtx
= x
;
3900 /* Recursively scan the operands of this expression. */
3902 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
3906 /* If we are about to do the last recursive call
3907 needed at this level, change it into iteration.
3908 This function is called enough to be worth it. */
3915 find_used_regs (&XEXP (x
, i
), data
);
3917 else if (fmt
[i
] == 'E')
3918 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3919 find_used_regs (&XVECEXP (x
, i
, j
), data
);
3923 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3924 Returns non-zero is successful. */
3927 try_replace_reg (from
, to
, insn
)
3930 rtx note
= find_reg_equal_equiv_note (insn
);
3933 rtx set
= single_set (insn
);
3935 success
= validate_replace_src (from
, to
, insn
);
3937 /* If above failed and this is a single set, try to simplify the source of
3938 the set given our substitution. We could perhaps try this for multiple
3939 SETs, but it probably won't buy us anything. */
3940 if (!success
&& set
!= 0)
3942 src
= simplify_replace_rtx (SET_SRC (set
), from
, to
);
3944 if (!rtx_equal_p (src
, SET_SRC (set
))
3945 && validate_change (insn
, &SET_SRC (set
), src
, 0))
3949 /* If we've failed to do replacement, have a single SET, and don't already
3950 have a note, add a REG_EQUAL note to not lose information. */
3951 if (!success
&& note
== 0 && set
!= 0)
3952 note
= set_unique_reg_note (insn
, REG_EQUAL
, copy_rtx (src
));
3954 /* If there is already a NOTE, update the expression in it with our
3957 XEXP (note
, 0) = simplify_replace_rtx (XEXP (note
, 0), from
, to
);
3959 /* REG_EQUAL may get simplified into register.
3960 We don't allow that. Remove that note. This code ought
3961 not to hapen, because previous code ought to syntetize
3962 reg-reg move, but be on the safe side. */
3963 if (note
&& REG_P (XEXP (note
, 0)))
3964 remove_note (insn
, note
);
3969 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3970 NULL no such set is found. */
3972 static struct expr
*
3973 find_avail_set (regno
, insn
)
3977 /* SET1 contains the last set found that can be returned to the caller for
3978 use in a substitution. */
3979 struct expr
*set1
= 0;
3981 /* Loops are not possible here. To get a loop we would need two sets
3982 available at the start of the block containing INSN. ie we would
3983 need two sets like this available at the start of the block:
3985 (set (reg X) (reg Y))
3986 (set (reg Y) (reg X))
3988 This can not happen since the set of (reg Y) would have killed the
3989 set of (reg X) making it unavailable at the start of this block. */
3993 struct expr
*set
= lookup_set (regno
, NULL_RTX
);
3995 /* Find a set that is available at the start of the block
3996 which contains INSN. */
3999 if (TEST_BIT (cprop_avin
[BLOCK_NUM (insn
)], set
->bitmap_index
))
4001 set
= next_set (regno
, set
);
4004 /* If no available set was found we've reached the end of the
4005 (possibly empty) copy chain. */
4009 if (GET_CODE (set
->expr
) != SET
)
4012 src
= SET_SRC (set
->expr
);
4014 /* We know the set is available.
4015 Now check that SRC is ANTLOC (i.e. none of the source operands
4016 have changed since the start of the block).
4018 If the source operand changed, we may still use it for the next
4019 iteration of this loop, but we may not use it for substitutions. */
4021 if (CONSTANT_P (src
) || oprs_not_set_p (src
, insn
))
4024 /* If the source of the set is anything except a register, then
4025 we have reached the end of the copy chain. */
4026 if (GET_CODE (src
) != REG
)
4029 /* Follow the copy chain, ie start another iteration of the loop
4030 and see if we have an available copy into SRC. */
4031 regno
= REGNO (src
);
4034 /* SET1 holds the last set that was available and anticipatable at
4039 /* Subroutine of cprop_insn that tries to propagate constants into
4040 JUMP_INSNS. INSN must be a conditional jump. FROM is what we will try to
4041 replace, SRC is the constant we will try to substitute for it. Returns
4042 nonzero if a change was made. We know INSN has just a SET. */
4045 cprop_jump (bb
, insn
, from
, src
)
4051 rtx set
= PATTERN (insn
);
4052 rtx
new = simplify_replace_rtx (SET_SRC (set
), from
, src
);
4054 /* If no simplification can be made, then try the next
4056 if (rtx_equal_p (new, SET_SRC (set
)))
4059 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
4064 if (! validate_change (insn
, &SET_SRC (set
), new, 0))
4067 /* If this has turned into an unconditional jump,
4068 then put a barrier after it so that the unreachable
4069 code will be deleted. */
4070 if (GET_CODE (SET_SRC (set
)) == LABEL_REF
)
4071 emit_barrier_after (insn
);
4074 run_jump_opt_after_gcse
= 1;
4077 if (gcse_file
!= NULL
)
4080 "CONST-PROP: Replacing reg %d in insn %d with constant ",
4081 REGNO (from
), INSN_UID (insn
));
4082 print_rtl (gcse_file
, src
);
4083 fprintf (gcse_file
, "\n");
4085 purge_dead_edges (bb
);
4092 /* Subroutine of cprop_insn that tries to propagate constants into JUMP_INSNS
4093 for machines that have CC0. INSN is a single set that stores into CC0;
4094 the insn following it is a conditional jump. REG_USED is the use we will
4095 try to replace, SRC is the constant we will try to substitute for it.
4096 Returns nonzero if a change was made. */
4099 cprop_cc0_jump (bb
, insn
, reg_used
, src
)
4102 struct reg_use
*reg_used
;
4105 /* First substitute in the SET_SRC of INSN, then substitute that for
4107 rtx jump
= NEXT_INSN (insn
);
4108 rtx new_src
= simplify_replace_rtx (SET_SRC (PATTERN (insn
)),
4109 reg_used
->reg_rtx
, src
);
4111 if (! cprop_jump (bb
, jump
, cc0_rtx
, new_src
))
4114 /* If we succeeded, delete the cc0 setter. */
4121 /* Perform constant and copy propagation on INSN.
4122 The result is non-zero if a change was made. */
4125 cprop_insn (bb
, insn
, alter_jumps
)
4130 struct reg_use
*reg_used
;
4138 note_uses (&PATTERN (insn
), find_used_regs
, NULL
);
4140 note
= find_reg_equal_equiv_note (insn
);
4142 /* We may win even when propagating constants into notes. */
4144 find_used_regs (&XEXP (note
, 0), NULL
);
4146 for (reg_used
= ®_use_table
[0]; reg_use_count
> 0;
4147 reg_used
++, reg_use_count
--)
4149 unsigned int regno
= REGNO (reg_used
->reg_rtx
);
4153 /* Ignore registers created by GCSE.
4154 We do this because ... */
4155 if (regno
>= max_gcse_regno
)
4158 /* If the register has already been set in this block, there's
4159 nothing we can do. */
4160 if (! oprs_not_set_p (reg_used
->reg_rtx
, insn
))
4163 /* Find an assignment that sets reg_used and is available
4164 at the start of the block. */
4165 set
= find_avail_set (regno
, insn
);
4170 /* ??? We might be able to handle PARALLELs. Later. */
4171 if (GET_CODE (pat
) != SET
)
4174 src
= SET_SRC (pat
);
4176 /* Constant propagation. */
4177 if (CONSTANT_P (src
))
4179 /* Handle normal insns first. */
4180 if (GET_CODE (insn
) == INSN
4181 && try_replace_reg (reg_used
->reg_rtx
, src
, insn
))
4185 if (gcse_file
!= NULL
)
4187 fprintf (gcse_file
, "CONST-PROP: Replacing reg %d in ",
4189 fprintf (gcse_file
, "insn %d with constant ",
4191 print_rtl (gcse_file
, src
);
4192 fprintf (gcse_file
, "\n");
4195 /* The original insn setting reg_used may or may not now be
4196 deletable. We leave the deletion to flow. */
4199 /* Try to propagate a CONST_INT into a conditional jump.
4200 We're pretty specific about what we will handle in this
4201 code, we can extend this as necessary over time.
4203 Right now the insn in question must look like
4204 (set (pc) (if_then_else ...)) */
4205 else if (alter_jumps
4206 && GET_CODE (insn
) == JUMP_INSN
4207 && condjump_p (insn
)
4208 && ! simplejump_p (insn
))
4209 changed
|= cprop_jump (bb
, insn
, reg_used
->reg_rtx
, src
);
4212 /* Similar code for machines that use a pair of CC0 setter and
4213 conditional jump insn. */
4214 else if (alter_jumps
4215 && GET_CODE (PATTERN (insn
)) == SET
4216 && SET_DEST (PATTERN (insn
)) == cc0_rtx
4217 && GET_CODE (NEXT_INSN (insn
)) == JUMP_INSN
4218 && condjump_p (NEXT_INSN (insn
))
4219 && ! simplejump_p (NEXT_INSN (insn
))
4220 && cprop_cc0_jump (bb
, insn
, reg_used
, src
))
4227 else if (GET_CODE (src
) == REG
4228 && REGNO (src
) >= FIRST_PSEUDO_REGISTER
4229 && REGNO (src
) != regno
)
4231 if (try_replace_reg (reg_used
->reg_rtx
, src
, insn
))
4235 if (gcse_file
!= NULL
)
4237 fprintf (gcse_file
, "COPY-PROP: Replacing reg %d in insn %d",
4238 regno
, INSN_UID (insn
));
4239 fprintf (gcse_file
, " with reg %d\n", REGNO (src
));
4242 /* The original insn setting reg_used may or may not now be
4243 deletable. We leave the deletion to flow. */
4244 /* FIXME: If it turns out that the insn isn't deletable,
4245 then we may have unnecessarily extended register lifetimes
4246 and made things worse. */
4254 /* Forward propagate copies. This includes copies and constants. Return
4255 non-zero if a change was made. */
4264 /* Note we start at block 1. */
4267 for (bb
= 1; bb
< n_basic_blocks
; bb
++)
4269 /* Reset tables used to keep track of what's still valid [since the
4270 start of the block]. */
4271 reset_opr_set_tables ();
4273 for (insn
= BLOCK_HEAD (bb
);
4274 insn
!= NULL
&& insn
!= NEXT_INSN (BLOCK_END (bb
));
4275 insn
= NEXT_INSN (insn
))
4278 changed
|= cprop_insn (BASIC_BLOCK (bb
), insn
, alter_jumps
);
4280 /* Keep track of everything modified by this insn. */
4281 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4282 call mark_oprs_set if we turned the insn into a NOTE. */
4283 if (GET_CODE (insn
) != NOTE
)
4284 mark_oprs_set (insn
);
4288 if (gcse_file
!= NULL
)
4289 fprintf (gcse_file
, "\n");
4294 /* Perform one copy/constant propagation pass.
4295 F is the first insn in the function.
4296 PASS is the pass count. */
4299 one_cprop_pass (pass
, alter_jumps
)
4305 const_prop_count
= 0;
4306 copy_prop_count
= 0;
4308 alloc_set_hash_table (max_cuid
);
4309 compute_set_hash_table ();
4311 dump_hash_table (gcse_file
, "SET", set_hash_table
, set_hash_table_size
,
4315 alloc_cprop_mem (n_basic_blocks
, n_sets
);
4316 compute_cprop_data ();
4317 changed
= cprop (alter_jumps
);
4321 free_set_hash_table ();
4325 fprintf (gcse_file
, "CPROP of %s, pass %d: %d bytes needed, ",
4326 current_function_name
, pass
, bytes_used
);
4327 fprintf (gcse_file
, "%d const props, %d copy props\n\n",
4328 const_prop_count
, copy_prop_count
);
4334 /* Compute PRE+LCM working variables. */
4336 /* Local properties of expressions. */
4337 /* Nonzero for expressions that are transparent in the block. */
4338 static sbitmap
*transp
;
4340 /* Nonzero for expressions that are transparent at the end of the block.
4341 This is only zero for expressions killed by abnormal critical edge
4342 created by a calls. */
4343 static sbitmap
*transpout
;
4345 /* Nonzero for expressions that are computed (available) in the block. */
4346 static sbitmap
*comp
;
4348 /* Nonzero for expressions that are locally anticipatable in the block. */
4349 static sbitmap
*antloc
;
4351 /* Nonzero for expressions where this block is an optimal computation
4353 static sbitmap
*pre_optimal
;
4355 /* Nonzero for expressions which are redundant in a particular block. */
4356 static sbitmap
*pre_redundant
;
4358 /* Nonzero for expressions which should be inserted on a specific edge. */
4359 static sbitmap
*pre_insert_map
;
4361 /* Nonzero for expressions which should be deleted in a specific block. */
4362 static sbitmap
*pre_delete_map
;
4364 /* Contains the edge_list returned by pre_edge_lcm. */
4365 static struct edge_list
*edge_list
;
4367 /* Redundant insns. */
4368 static sbitmap pre_redundant_insns
;
4370 /* Allocate vars used for PRE analysis. */
4373 alloc_pre_mem (n_blocks
, n_exprs
)
4374 int n_blocks
, n_exprs
;
4376 transp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4377 comp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4378 antloc
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4381 pre_redundant
= NULL
;
4382 pre_insert_map
= NULL
;
4383 pre_delete_map
= NULL
;
4386 ae_kill
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
4388 /* pre_insert and pre_delete are allocated later. */
4391 /* Free vars used for PRE analysis. */
4396 sbitmap_vector_free (transp
);
4397 sbitmap_vector_free (comp
);
4399 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
4402 sbitmap_vector_free (pre_optimal
);
4404 sbitmap_vector_free (pre_redundant
);
4406 sbitmap_vector_free (pre_insert_map
);
4408 sbitmap_vector_free (pre_delete_map
);
4410 sbitmap_vector_free (ae_in
);
4412 sbitmap_vector_free (ae_out
);
4414 transp
= comp
= NULL
;
4415 pre_optimal
= pre_redundant
= pre_insert_map
= pre_delete_map
= NULL
;
4416 ae_in
= ae_out
= NULL
;
4419 /* Top level routine to do the dataflow analysis needed by PRE. */
4424 sbitmap trapping_expr
;
4428 compute_local_properties (transp
, comp
, antloc
, 0);
4429 sbitmap_vector_zero (ae_kill
, n_basic_blocks
);
4431 /* Collect expressions which might trap. */
4432 trapping_expr
= sbitmap_alloc (n_exprs
);
4433 sbitmap_zero (trapping_expr
);
4434 for (ui
= 0; ui
< expr_hash_table_size
; ui
++)
4437 for (e
= expr_hash_table
[ui
]; e
!= NULL
; e
= e
->next_same_hash
)
4438 if (may_trap_p (e
->expr
))
4439 SET_BIT (trapping_expr
, e
->bitmap_index
);
4442 /* Compute ae_kill for each basic block using:
4446 This is significantly faster than compute_ae_kill. */
4448 for (i
= 0; i
< n_basic_blocks
; i
++)
4452 /* If the current block is the destination of an abnormal edge, we
4453 kill all trapping expressions because we won't be able to properly
4454 place the instruction on the edge. So make them neither
4455 anticipatable nor transparent. This is fairly conservative. */
4456 for (e
= BASIC_BLOCK (i
)->pred
; e
; e
= e
->pred_next
)
4457 if (e
->flags
& EDGE_ABNORMAL
)
4459 sbitmap_difference (antloc
[i
], antloc
[i
], trapping_expr
);
4460 sbitmap_difference (transp
[i
], transp
[i
], trapping_expr
);
4464 sbitmap_a_or_b (ae_kill
[i
], transp
[i
], comp
[i
]);
4465 sbitmap_not (ae_kill
[i
], ae_kill
[i
]);
4468 edge_list
= pre_edge_lcm (gcse_file
, n_exprs
, transp
, comp
, antloc
,
4469 ae_kill
, &pre_insert_map
, &pre_delete_map
);
4470 sbitmap_vector_free (antloc
);
4472 sbitmap_vector_free (ae_kill
);
4474 sbitmap_free (trapping_expr
);
4479 /* Return non-zero if an occurrence of expression EXPR in OCCR_BB would reach
4482 VISITED is a pointer to a working buffer for tracking which BB's have
4483 been visited. It is NULL for the top-level call.
4485 We treat reaching expressions that go through blocks containing the same
4486 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
4487 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
4488 2 as not reaching. The intent is to improve the probability of finding
4489 only one reaching expression and to reduce register lifetimes by picking
4490 the closest such expression. */
4493 pre_expr_reaches_here_p_work (occr_bb
, expr
, bb
, visited
)
4494 basic_block occr_bb
;
4501 for (pred
= bb
->pred
; pred
!= NULL
; pred
= pred
->pred_next
)
4503 basic_block pred_bb
= pred
->src
;
4505 if (pred
->src
== ENTRY_BLOCK_PTR
4506 /* Has predecessor has already been visited? */
4507 || visited
[pred_bb
->index
])
4508 ;/* Nothing to do. */
4510 /* Does this predecessor generate this expression? */
4511 else if (TEST_BIT (comp
[pred_bb
->index
], expr
->bitmap_index
))
4513 /* Is this the occurrence we're looking for?
4514 Note that there's only one generating occurrence per block
4515 so we just need to check the block number. */
4516 if (occr_bb
== pred_bb
)
4519 visited
[pred_bb
->index
] = 1;
4521 /* Ignore this predecessor if it kills the expression. */
4522 else if (! TEST_BIT (transp
[pred_bb
->index
], expr
->bitmap_index
))
4523 visited
[pred_bb
->index
] = 1;
4525 /* Neither gen nor kill. */
4528 visited
[pred_bb
->index
] = 1;
4529 if (pre_expr_reaches_here_p_work (occr_bb
, expr
, pred_bb
, visited
))
4534 /* All paths have been checked. */
4538 /* The wrapper for pre_expr_reaches_here_work that ensures that any
4539 memory allocated for that function is returned. */
4542 pre_expr_reaches_here_p (occr_bb
, expr
, bb
)
4543 basic_block occr_bb
;
4548 char *visited
= (char *) xcalloc (n_basic_blocks
, 1);
4550 rval
= pre_expr_reaches_here_p_work (occr_bb
, expr
, bb
, visited
);
4557 /* Given an expr, generate RTL which we can insert at the end of a BB,
4558 or on an edge. Set the block number of any insns generated to
4562 process_insert_insn (expr
)
4565 rtx reg
= expr
->reaching_reg
;
4566 rtx exp
= copy_rtx (expr
->expr
);
4571 /* If the expression is something that's an operand, like a constant,
4572 just copy it to a register. */
4573 if (general_operand (exp
, GET_MODE (reg
)))
4574 emit_move_insn (reg
, exp
);
4576 /* Otherwise, make a new insn to compute this expression and make sure the
4577 insn will be recognized (this also adds any needed CLOBBERs). Copy the
4578 expression to make sure we don't have any sharing issues. */
4579 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode
, reg
, exp
))))
4582 pat
= gen_sequence ();
4588 /* Add EXPR to the end of basic block BB.
4590 This is used by both the PRE and code hoisting.
4592 For PRE, we want to verify that the expr is either transparent
4593 or locally anticipatable in the target block. This check makes
4594 no sense for code hoisting. */
4597 insert_insn_end_bb (expr
, bb
, pre
)
4604 rtx reg
= expr
->reaching_reg
;
4605 int regno
= REGNO (reg
);
4609 pat
= process_insert_insn (expr
);
4611 /* If the last insn is a jump, insert EXPR in front [taking care to
4612 handle cc0, etc. properly]. */
4614 if (GET_CODE (insn
) == JUMP_INSN
)
4620 /* If this is a jump table, then we can't insert stuff here. Since
4621 we know the previous real insn must be the tablejump, we insert
4622 the new instruction just before the tablejump. */
4623 if (GET_CODE (PATTERN (insn
)) == ADDR_VEC
4624 || GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
)
4625 insn
= prev_real_insn (insn
);
4628 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4629 if cc0 isn't set. */
4630 note
= find_reg_note (insn
, REG_CC_SETTER
, NULL_RTX
);
4632 insn
= XEXP (note
, 0);
4635 rtx maybe_cc0_setter
= prev_nonnote_insn (insn
);
4636 if (maybe_cc0_setter
4637 && INSN_P (maybe_cc0_setter
)
4638 && sets_cc0_p (PATTERN (maybe_cc0_setter
)))
4639 insn
= maybe_cc0_setter
;
4642 /* FIXME: What if something in cc0/jump uses value set in new insn? */
4643 new_insn
= emit_insn_before (pat
, insn
);
4646 /* Likewise if the last insn is a call, as will happen in the presence
4647 of exception handling. */
4648 else if (GET_CODE (insn
) == CALL_INSN
)
4650 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4651 we search backward and place the instructions before the first
4652 parameter is loaded. Do this for everyone for consistency and a
4653 presumtion that we'll get better code elsewhere as well.
4655 It should always be the case that we can put these instructions
4656 anywhere in the basic block with performing PRE optimizations.
4660 && !TEST_BIT (antloc
[bb
->index
], expr
->bitmap_index
)
4661 && !TEST_BIT (transp
[bb
->index
], expr
->bitmap_index
))
4664 /* Since different machines initialize their parameter registers
4665 in different orders, assume nothing. Collect the set of all
4666 parameter registers. */
4667 insn
= find_first_parameter_load (insn
, bb
->head
);
4669 /* If we found all the parameter loads, then we want to insert
4670 before the first parameter load.
4672 If we did not find all the parameter loads, then we might have
4673 stopped on the head of the block, which could be a CODE_LABEL.
4674 If we inserted before the CODE_LABEL, then we would be putting
4675 the insn in the wrong basic block. In that case, put the insn
4676 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
4677 while (GET_CODE (insn
) == CODE_LABEL
4678 || NOTE_INSN_BASIC_BLOCK_P (insn
))
4679 insn
= NEXT_INSN (insn
);
4681 new_insn
= emit_insn_before (pat
, insn
);
4684 new_insn
= emit_insn_after (pat
, insn
);
4686 /* Keep block number table up to date.
4687 Note, PAT could be a multiple insn sequence, we have to make
4688 sure that each insn in the sequence is handled. */
4689 if (GET_CODE (pat
) == SEQUENCE
)
4691 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4693 rtx insn
= XVECEXP (pat
, 0, i
);
4695 add_label_notes (PATTERN (insn
), new_insn
);
4697 note_stores (PATTERN (insn
), record_set_info
, insn
);
4702 add_label_notes (pat
, new_insn
);
4704 /* Keep register set table up to date. */
4705 record_one_set (regno
, new_insn
);
4708 gcse_create_count
++;
4712 fprintf (gcse_file
, "PRE/HOIST: end of bb %d, insn %d, ",
4713 bb
->index
, INSN_UID (new_insn
));
4714 fprintf (gcse_file
, "copying expression %d to reg %d\n",
4715 expr
->bitmap_index
, regno
);
4719 /* Insert partially redundant expressions on edges in the CFG to make
4720 the expressions fully redundant. */
4723 pre_edge_insert (edge_list
, index_map
)
4724 struct edge_list
*edge_list
;
4725 struct expr
**index_map
;
4727 int e
, i
, j
, num_edges
, set_size
, did_insert
= 0;
4730 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4731 if it reaches any of the deleted expressions. */
4733 set_size
= pre_insert_map
[0]->size
;
4734 num_edges
= NUM_EDGES (edge_list
);
4735 inserted
= sbitmap_vector_alloc (num_edges
, n_exprs
);
4736 sbitmap_vector_zero (inserted
, num_edges
);
4738 for (e
= 0; e
< num_edges
; e
++)
4741 basic_block bb
= INDEX_EDGE_PRED_BB (edge_list
, e
);
4743 for (i
= indx
= 0; i
< set_size
; i
++, indx
+= SBITMAP_ELT_BITS
)
4745 SBITMAP_ELT_TYPE insert
= pre_insert_map
[e
]->elms
[i
];
4747 for (j
= indx
; insert
&& j
< n_exprs
; j
++, insert
>>= 1)
4748 if ((insert
& 1) != 0 && index_map
[j
]->reaching_reg
!= NULL_RTX
)
4750 struct expr
*expr
= index_map
[j
];
4753 /* Now look at each deleted occurrence of this expression. */
4754 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
4756 if (! occr
->deleted_p
)
4759 /* Insert this expression on this edge if if it would
4760 reach the deleted occurrence in BB. */
4761 if (!TEST_BIT (inserted
[e
], j
))
4764 edge eg
= INDEX_EDGE (edge_list
, e
);
4766 /* We can't insert anything on an abnormal and
4767 critical edge, so we insert the insn at the end of
4768 the previous block. There are several alternatives
4769 detailed in Morgans book P277 (sec 10.5) for
4770 handling this situation. This one is easiest for
4773 if ((eg
->flags
& EDGE_ABNORMAL
) == EDGE_ABNORMAL
)
4774 insert_insn_end_bb (index_map
[j
], bb
, 0);
4777 insn
= process_insert_insn (index_map
[j
]);
4778 insert_insn_on_edge (insn
, eg
);
4783 fprintf (gcse_file
, "PRE/HOIST: edge (%d,%d), ",
4785 INDEX_EDGE_SUCC_BB (edge_list
, e
)->index
);
4786 fprintf (gcse_file
, "copy expression %d\n",
4787 expr
->bitmap_index
);
4790 update_ld_motion_stores (expr
);
4791 SET_BIT (inserted
[e
], j
);
4793 gcse_create_count
++;
4800 sbitmap_vector_free (inserted
);
4804 /* Copy the result of INSN to REG. INDX is the expression number. */
4807 pre_insert_copy_insn (expr
, insn
)
4811 rtx reg
= expr
->reaching_reg
;
4812 int regno
= REGNO (reg
);
4813 int indx
= expr
->bitmap_index
;
4814 rtx set
= single_set (insn
);
4820 new_insn
= emit_insn_after (gen_move_insn (reg
, SET_DEST (set
)), insn
);
4822 /* Keep register set table up to date. */
4823 record_one_set (regno
, new_insn
);
4825 gcse_create_count
++;
4829 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
4830 BLOCK_NUM (insn
), INSN_UID (new_insn
), indx
,
4831 INSN_UID (insn
), regno
);
4832 update_ld_motion_stores (expr
);
4835 /* Copy available expressions that reach the redundant expression
4836 to `reaching_reg'. */
4839 pre_insert_copies ()
4846 /* For each available expression in the table, copy the result to
4847 `reaching_reg' if the expression reaches a deleted one.
4849 ??? The current algorithm is rather brute force.
4850 Need to do some profiling. */
4852 for (i
= 0; i
< expr_hash_table_size
; i
++)
4853 for (expr
= expr_hash_table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
4855 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
4856 we don't want to insert a copy here because the expression may not
4857 really be redundant. So only insert an insn if the expression was
4858 deleted. This test also avoids further processing if the
4859 expression wasn't deleted anywhere. */
4860 if (expr
->reaching_reg
== NULL
)
4863 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
4865 if (! occr
->deleted_p
)
4868 for (avail
= expr
->avail_occr
; avail
!= NULL
; avail
= avail
->next
)
4870 rtx insn
= avail
->insn
;
4872 /* No need to handle this one if handled already. */
4873 if (avail
->copied_p
)
4876 /* Don't handle this one if it's a redundant one. */
4877 if (TEST_BIT (pre_redundant_insns
, INSN_CUID (insn
)))
4880 /* Or if the expression doesn't reach the deleted one. */
4881 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail
->insn
),
4883 BLOCK_FOR_INSN (occr
->insn
)))
4886 /* Copy the result of avail to reaching_reg. */
4887 pre_insert_copy_insn (expr
, insn
);
4888 avail
->copied_p
= 1;
4894 /* Delete redundant computations.
4895 Deletion is done by changing the insn to copy the `reaching_reg' of
4896 the expression into the result of the SET. It is left to later passes
4897 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4899 Returns non-zero if a change is made. */
4910 for (i
= 0; i
< expr_hash_table_size
; i
++)
4911 for (expr
= expr_hash_table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
4913 int indx
= expr
->bitmap_index
;
4915 /* We only need to search antic_occr since we require
4918 for (occr
= expr
->antic_occr
; occr
!= NULL
; occr
= occr
->next
)
4920 rtx insn
= occr
->insn
;
4922 basic_block bb
= BLOCK_FOR_INSN (insn
);
4924 if (TEST_BIT (pre_delete_map
[bb
->index
], indx
))
4926 set
= single_set (insn
);
4930 /* Create a pseudo-reg to store the result of reaching
4931 expressions into. Get the mode for the new pseudo from
4932 the mode of the original destination pseudo. */
4933 if (expr
->reaching_reg
== NULL
)
4935 = gen_reg_rtx (GET_MODE (SET_DEST (set
)));
4937 /* In theory this should never fail since we're creating
4940 However, on the x86 some of the movXX patterns actually
4941 contain clobbers of scratch regs. This may cause the
4942 insn created by validate_change to not match any pattern
4943 and thus cause validate_change to fail. */
4944 if (validate_change (insn
, &SET_SRC (set
),
4945 expr
->reaching_reg
, 0))
4947 occr
->deleted_p
= 1;
4948 SET_BIT (pre_redundant_insns
, INSN_CUID (insn
));
4956 "PRE: redundant insn %d (expression %d) in ",
4957 INSN_UID (insn
), indx
);
4958 fprintf (gcse_file
, "bb %d, reaching reg is %d\n",
4959 bb
->index
, REGNO (expr
->reaching_reg
));
4968 /* Perform GCSE optimizations using PRE.
4969 This is called by one_pre_gcse_pass after all the dataflow analysis
4972 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4973 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4974 Compiler Design and Implementation.
4976 ??? A new pseudo reg is created to hold the reaching expression. The nice
4977 thing about the classical approach is that it would try to use an existing
4978 reg. If the register can't be adequately optimized [i.e. we introduce
4979 reload problems], one could add a pass here to propagate the new register
4982 ??? We don't handle single sets in PARALLELs because we're [currently] not
4983 able to copy the rest of the parallel when we insert copies to create full
4984 redundancies from partial redundancies. However, there's no reason why we
4985 can't handle PARALLELs in the cases where there are no partial
4992 int did_insert
, changed
;
4993 struct expr
**index_map
;
4996 /* Compute a mapping from expression number (`bitmap_index') to
4997 hash table entry. */
4999 index_map
= (struct expr
**) xcalloc (n_exprs
, sizeof (struct expr
*));
5000 for (i
= 0; i
< expr_hash_table_size
; i
++)
5001 for (expr
= expr_hash_table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
5002 index_map
[expr
->bitmap_index
] = expr
;
5004 /* Reset bitmap used to track which insns are redundant. */
5005 pre_redundant_insns
= sbitmap_alloc (max_cuid
);
5006 sbitmap_zero (pre_redundant_insns
);
5008 /* Delete the redundant insns first so that
5009 - we know what register to use for the new insns and for the other
5010 ones with reaching expressions
5011 - we know which insns are redundant when we go to create copies */
5013 changed
= pre_delete ();
5015 did_insert
= pre_edge_insert (edge_list
, index_map
);
5017 /* In other places with reaching expressions, copy the expression to the
5018 specially allocated pseudo-reg that reaches the redundant expr. */
5019 pre_insert_copies ();
5022 commit_edge_insertions ();
5027 sbitmap_free (pre_redundant_insns
);
5031 /* Top level routine to perform one PRE GCSE pass.
5033 Return non-zero if a change was made. */
5036 one_pre_gcse_pass (pass
)
5041 gcse_subst_count
= 0;
5042 gcse_create_count
= 0;
5044 alloc_expr_hash_table (max_cuid
);
5045 add_noreturn_fake_exit_edges ();
5047 compute_ld_motion_mems ();
5049 compute_expr_hash_table ();
5050 trim_ld_motion_mems ();
5052 dump_hash_table (gcse_file
, "Expression", expr_hash_table
,
5053 expr_hash_table_size
, n_exprs
);
5057 alloc_pre_mem (n_basic_blocks
, n_exprs
);
5058 compute_pre_data ();
5059 changed
|= pre_gcse ();
5060 free_edge_list (edge_list
);
5065 remove_fake_edges ();
5066 free_expr_hash_table ();
5070 fprintf (gcse_file
, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5071 current_function_name
, pass
, bytes_used
);
5072 fprintf (gcse_file
, "%d substs, %d insns created\n",
5073 gcse_subst_count
, gcse_create_count
);
5079 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5080 If notes are added to an insn which references a CODE_LABEL, the
5081 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5082 because the following loop optimization pass requires them. */
5084 /* ??? This is very similar to the loop.c add_label_notes function. We
5085 could probably share code here. */
5087 /* ??? If there was a jump optimization pass after gcse and before loop,
5088 then we would not need to do this here, because jump would add the
5089 necessary REG_LABEL notes. */
5092 add_label_notes (x
, insn
)
5096 enum rtx_code code
= GET_CODE (x
);
5100 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
5102 /* This code used to ignore labels that referred to dispatch tables to
5103 avoid flow generating (slighly) worse code.
5105 We no longer ignore such label references (see LABEL_REF handling in
5106 mark_jump_label for additional information). */
5108 REG_NOTES (insn
) = gen_rtx_INSN_LIST (REG_LABEL
, XEXP (x
, 0),
5110 if (LABEL_P (XEXP (x
, 0)))
5111 LABEL_NUSES (XEXP (x
, 0))++;
5115 for (i
= GET_RTX_LENGTH (code
) - 1, fmt
= GET_RTX_FORMAT (code
); i
>= 0; i
--)
5118 add_label_notes (XEXP (x
, i
), insn
);
5119 else if (fmt
[i
] == 'E')
5120 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5121 add_label_notes (XVECEXP (x
, i
, j
), insn
);
5125 /* Compute transparent outgoing information for each block.
5127 An expression is transparent to an edge unless it is killed by
5128 the edge itself. This can only happen with abnormal control flow,
5129 when the edge is traversed through a call. This happens with
5130 non-local labels and exceptions.
5132 This would not be necessary if we split the edge. While this is
5133 normally impossible for abnormal critical edges, with some effort
5134 it should be possible with exception handling, since we still have
5135 control over which handler should be invoked. But due to increased
5136 EH table sizes, this may not be worthwhile. */
5139 compute_transpout ()
5145 sbitmap_vector_ones (transpout
, n_basic_blocks
);
5147 for (bb
= 0; bb
< n_basic_blocks
; ++bb
)
5149 /* Note that flow inserted a nop a the end of basic blocks that
5150 end in call instructions for reasons other than abnormal
5152 if (GET_CODE (BLOCK_END (bb
)) != CALL_INSN
)
5155 for (i
= 0; i
< expr_hash_table_size
; i
++)
5156 for (expr
= expr_hash_table
[i
]; expr
; expr
= expr
->next_same_hash
)
5157 if (GET_CODE (expr
->expr
) == MEM
)
5159 if (GET_CODE (XEXP (expr
->expr
, 0)) == SYMBOL_REF
5160 && CONSTANT_POOL_ADDRESS_P (XEXP (expr
->expr
, 0)))
5163 /* ??? Optimally, we would use interprocedural alias
5164 analysis to determine if this mem is actually killed
5166 RESET_BIT (transpout
[bb
], expr
->bitmap_index
);
5171 /* Removal of useless null pointer checks */
5173 /* Called via note_stores. X is set by SETTER. If X is a register we must
5174 invalidate nonnull_local and set nonnull_killed. DATA is really a
5175 `null_pointer_info *'.
5177 We ignore hard registers. */
5180 invalidate_nonnull_info (x
, setter
, data
)
5182 rtx setter ATTRIBUTE_UNUSED
;
5186 struct null_pointer_info
*npi
= (struct null_pointer_info
*) data
;
5188 while (GET_CODE (x
) == SUBREG
)
5191 /* Ignore anything that is not a register or is a hard register. */
5192 if (GET_CODE (x
) != REG
5193 || REGNO (x
) < npi
->min_reg
5194 || REGNO (x
) >= npi
->max_reg
)
5197 regno
= REGNO (x
) - npi
->min_reg
;
5199 RESET_BIT (npi
->nonnull_local
[npi
->current_block
], regno
);
5200 SET_BIT (npi
->nonnull_killed
[npi
->current_block
], regno
);
5203 /* Do null-pointer check elimination for the registers indicated in
5204 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5205 they are not our responsibility to free. */
5208 delete_null_pointer_checks_1 (block_reg
, nonnull_avin
,
5210 unsigned int *block_reg
;
5211 sbitmap
*nonnull_avin
;
5212 sbitmap
*nonnull_avout
;
5213 struct null_pointer_info
*npi
;
5217 sbitmap
*nonnull_local
= npi
->nonnull_local
;
5218 sbitmap
*nonnull_killed
= npi
->nonnull_killed
;
5220 /* Compute local properties, nonnull and killed. A register will have
5221 the nonnull property if at the end of the current block its value is
5222 known to be nonnull. The killed property indicates that somewhere in
5223 the block any information we had about the register is killed.
5225 Note that a register can have both properties in a single block. That
5226 indicates that it's killed, then later in the block a new value is
5228 sbitmap_vector_zero (nonnull_local
, n_basic_blocks
);
5229 sbitmap_vector_zero (nonnull_killed
, n_basic_blocks
);
5231 for (current_block
= 0; current_block
< n_basic_blocks
; current_block
++)
5233 rtx insn
, stop_insn
;
5235 /* Set the current block for invalidate_nonnull_info. */
5236 npi
->current_block
= current_block
;
5238 /* Scan each insn in the basic block looking for memory references and
5240 stop_insn
= NEXT_INSN (BLOCK_END (current_block
));
5241 for (insn
= BLOCK_HEAD (current_block
);
5243 insn
= NEXT_INSN (insn
))
5248 /* Ignore anything that is not a normal insn. */
5249 if (! INSN_P (insn
))
5252 /* Basically ignore anything that is not a simple SET. We do have
5253 to make sure to invalidate nonnull_local and set nonnull_killed
5254 for such insns though. */
5255 set
= single_set (insn
);
5258 note_stores (PATTERN (insn
), invalidate_nonnull_info
, npi
);
5262 /* See if we've got a usable memory load. We handle it first
5263 in case it uses its address register as a dest (which kills
5264 the nonnull property). */
5265 if (GET_CODE (SET_SRC (set
)) == MEM
5266 && GET_CODE ((reg
= XEXP (SET_SRC (set
), 0))) == REG
5267 && REGNO (reg
) >= npi
->min_reg
5268 && REGNO (reg
) < npi
->max_reg
)
5269 SET_BIT (nonnull_local
[current_block
],
5270 REGNO (reg
) - npi
->min_reg
);
5272 /* Now invalidate stuff clobbered by this insn. */
5273 note_stores (PATTERN (insn
), invalidate_nonnull_info
, npi
);
5275 /* And handle stores, we do these last since any sets in INSN can
5276 not kill the nonnull property if it is derived from a MEM
5277 appearing in a SET_DEST. */
5278 if (GET_CODE (SET_DEST (set
)) == MEM
5279 && GET_CODE ((reg
= XEXP (SET_DEST (set
), 0))) == REG
5280 && REGNO (reg
) >= npi
->min_reg
5281 && REGNO (reg
) < npi
->max_reg
)
5282 SET_BIT (nonnull_local
[current_block
],
5283 REGNO (reg
) - npi
->min_reg
);
5287 /* Now compute global properties based on the local properties. This
5288 is a classic global availablity algorithm. */
5289 compute_available (nonnull_local
, nonnull_killed
,
5290 nonnull_avout
, nonnull_avin
);
5292 /* Now look at each bb and see if it ends with a compare of a value
5294 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
5296 rtx last_insn
= BLOCK_END (bb
);
5297 rtx condition
, earliest
;
5298 int compare_and_branch
;
5300 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
5301 since BLOCK_REG[BB] is zero if this block did not end with a
5302 comparison against zero, this condition works. */
5303 if (block_reg
[bb
] < npi
->min_reg
5304 || block_reg
[bb
] >= npi
->max_reg
)
5307 /* LAST_INSN is a conditional jump. Get its condition. */
5308 condition
= get_condition (last_insn
, &earliest
);
5310 /* If we can't determine the condition then skip. */
5314 /* Is the register known to have a nonzero value? */
5315 if (!TEST_BIT (nonnull_avout
[bb
], block_reg
[bb
] - npi
->min_reg
))
5318 /* Try to compute whether the compare/branch at the loop end is one or
5319 two instructions. */
5320 if (earliest
== last_insn
)
5321 compare_and_branch
= 1;
5322 else if (earliest
== prev_nonnote_insn (last_insn
))
5323 compare_and_branch
= 2;
5327 /* We know the register in this comparison is nonnull at exit from
5328 this block. We can optimize this comparison. */
5329 if (GET_CODE (condition
) == NE
)
5333 new_jump
= emit_jump_insn_before (gen_jump (JUMP_LABEL (last_insn
)),
5335 JUMP_LABEL (new_jump
) = JUMP_LABEL (last_insn
);
5336 LABEL_NUSES (JUMP_LABEL (new_jump
))++;
5337 emit_barrier_after (new_jump
);
5340 delete_insn (last_insn
);
5341 if (compare_and_branch
== 2)
5342 delete_insn (earliest
);
5343 purge_dead_edges (BASIC_BLOCK (bb
));
5345 /* Don't check this block again. (Note that BLOCK_END is
5346 invalid here; we deleted the last instruction in the
5352 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5355 This is conceptually similar to global constant/copy propagation and
5356 classic global CSE (it even uses the same dataflow equations as cprop).
5358 If a register is used as memory address with the form (mem (reg)), then we
5359 know that REG can not be zero at that point in the program. Any instruction
5360 which sets REG "kills" this property.
5362 So, if every path leading to a conditional branch has an available memory
5363 reference of that form, then we know the register can not have the value
5364 zero at the conditional branch.
5366 So we merely need to compute the local properies and propagate that data
5367 around the cfg, then optimize where possible.
5369 We run this pass two times. Once before CSE, then again after CSE. This
5370 has proven to be the most profitable approach. It is rare for new
5371 optimization opportunities of this nature to appear after the first CSE
5374 This could probably be integrated with global cprop with a little work. */
5377 delete_null_pointer_checks (f
)
5378 rtx f ATTRIBUTE_UNUSED
;
5380 sbitmap
*nonnull_avin
, *nonnull_avout
;
5381 unsigned int *block_reg
;
5386 struct null_pointer_info npi
;
5388 /* If we have only a single block, then there's nothing to do. */
5389 if (n_basic_blocks
<= 1)
5392 /* Trying to perform global optimizations on flow graphs which have
5393 a high connectivity will take a long time and is unlikely to be
5394 particularly useful.
5396 In normal circumstances a cfg should have about twice as many edges
5397 as blocks. But we do not want to punish small functions which have
5398 a couple switch statements. So we require a relatively large number
5399 of basic blocks and the ratio of edges to blocks to be high. */
5400 if (n_basic_blocks
> 1000 && n_edges
/ n_basic_blocks
>= 20)
5403 /* We need four bitmaps, each with a bit for each register in each
5405 max_reg
= max_reg_num ();
5406 regs_per_pass
= get_bitmap_width (4, n_basic_blocks
, max_reg
);
5408 /* Allocate bitmaps to hold local and global properties. */
5409 npi
.nonnull_local
= sbitmap_vector_alloc (n_basic_blocks
, regs_per_pass
);
5410 npi
.nonnull_killed
= sbitmap_vector_alloc (n_basic_blocks
, regs_per_pass
);
5411 nonnull_avin
= sbitmap_vector_alloc (n_basic_blocks
, regs_per_pass
);
5412 nonnull_avout
= sbitmap_vector_alloc (n_basic_blocks
, regs_per_pass
);
5414 /* Go through the basic blocks, seeing whether or not each block
5415 ends with a conditional branch whose condition is a comparison
5416 against zero. Record the register compared in BLOCK_REG. */
5417 block_reg
= (unsigned int *) xcalloc (n_basic_blocks
, sizeof (int));
5418 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
5420 rtx last_insn
= BLOCK_END (bb
);
5421 rtx condition
, earliest
, reg
;
5423 /* We only want conditional branches. */
5424 if (GET_CODE (last_insn
) != JUMP_INSN
5425 || !any_condjump_p (last_insn
)
5426 || !onlyjump_p (last_insn
))
5429 /* LAST_INSN is a conditional jump. Get its condition. */
5430 condition
= get_condition (last_insn
, &earliest
);
5432 /* If we were unable to get the condition, or it is not an equality
5433 comparison against zero then there's nothing we can do. */
5435 || (GET_CODE (condition
) != NE
&& GET_CODE (condition
) != EQ
)
5436 || GET_CODE (XEXP (condition
, 1)) != CONST_INT
5437 || (XEXP (condition
, 1)
5438 != CONST0_RTX (GET_MODE (XEXP (condition
, 0)))))
5441 /* We must be checking a register against zero. */
5442 reg
= XEXP (condition
, 0);
5443 if (GET_CODE (reg
) != REG
)
5446 block_reg
[bb
] = REGNO (reg
);
5449 /* Go through the algorithm for each block of registers. */
5450 for (reg
= FIRST_PSEUDO_REGISTER
; reg
< max_reg
; reg
+= regs_per_pass
)
5453 npi
.max_reg
= MIN (reg
+ regs_per_pass
, max_reg
);
5454 delete_null_pointer_checks_1 (block_reg
, nonnull_avin
,
5455 nonnull_avout
, &npi
);
5458 /* Free the table of registers compared at the end of every block. */
5462 sbitmap_vector_free (npi
.nonnull_local
);
5463 sbitmap_vector_free (npi
.nonnull_killed
);
5464 sbitmap_vector_free (nonnull_avin
);
5465 sbitmap_vector_free (nonnull_avout
);
5468 /* Code Hoisting variables and subroutines. */
5470 /* Very busy expressions. */
5471 static sbitmap
*hoist_vbein
;
5472 static sbitmap
*hoist_vbeout
;
5474 /* Hoistable expressions. */
5475 static sbitmap
*hoist_exprs
;
5477 /* Dominator bitmaps. */
5478 static sbitmap
*dominators
;
5480 /* ??? We could compute post dominators and run this algorithm in
5481 reverse to to perform tail merging, doing so would probably be
5482 more effective than the tail merging code in jump.c.
5484 It's unclear if tail merging could be run in parallel with
5485 code hoisting. It would be nice. */
5487 /* Allocate vars used for code hoisting analysis. */
5490 alloc_code_hoist_mem (n_blocks
, n_exprs
)
5491 int n_blocks
, n_exprs
;
5493 antloc
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
5494 transp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
5495 comp
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
5497 hoist_vbein
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
5498 hoist_vbeout
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
5499 hoist_exprs
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
5500 transpout
= sbitmap_vector_alloc (n_blocks
, n_exprs
);
5502 dominators
= sbitmap_vector_alloc (n_blocks
, n_blocks
);
5505 /* Free vars used for code hoisting analysis. */
5508 free_code_hoist_mem ()
5510 sbitmap_vector_free (antloc
);
5511 sbitmap_vector_free (transp
);
5512 sbitmap_vector_free (comp
);
5514 sbitmap_vector_free (hoist_vbein
);
5515 sbitmap_vector_free (hoist_vbeout
);
5516 sbitmap_vector_free (hoist_exprs
);
5517 sbitmap_vector_free (transpout
);
5519 sbitmap_vector_free (dominators
);
5522 /* Compute the very busy expressions at entry/exit from each block.
5524 An expression is very busy if all paths from a given point
5525 compute the expression. */
5528 compute_code_hoist_vbeinout ()
5530 int bb
, changed
, passes
;
5532 sbitmap_vector_zero (hoist_vbeout
, n_basic_blocks
);
5533 sbitmap_vector_zero (hoist_vbein
, n_basic_blocks
);
5542 /* We scan the blocks in the reverse order to speed up
5544 for (bb
= n_basic_blocks
- 1; bb
>= 0; bb
--)
5546 changed
|= sbitmap_a_or_b_and_c (hoist_vbein
[bb
], antloc
[bb
],
5547 hoist_vbeout
[bb
], transp
[bb
]);
5548 if (bb
!= n_basic_blocks
- 1)
5549 sbitmap_intersection_of_succs (hoist_vbeout
[bb
], hoist_vbein
, bb
);
5556 fprintf (gcse_file
, "hoisting vbeinout computation: %d passes\n", passes
);
5559 /* Top level routine to do the dataflow analysis needed by code hoisting. */
5562 compute_code_hoist_data ()
5564 compute_local_properties (transp
, comp
, antloc
, 0);
5565 compute_transpout ();
5566 compute_code_hoist_vbeinout ();
5567 calculate_dominance_info (NULL
, dominators
, CDI_DOMINATORS
);
5569 fprintf (gcse_file
, "\n");
5572 /* Determine if the expression identified by EXPR_INDEX would
5573 reach BB unimpared if it was placed at the end of EXPR_BB.
5575 It's unclear exactly what Muchnick meant by "unimpared". It seems
5576 to me that the expression must either be computed or transparent in
5577 *every* block in the path(s) from EXPR_BB to BB. Any other definition
5578 would allow the expression to be hoisted out of loops, even if
5579 the expression wasn't a loop invariant.
5581 Contrast this to reachability for PRE where an expression is
5582 considered reachable if *any* path reaches instead of *all*
5586 hoist_expr_reaches_here_p (expr_bb
, expr_index
, bb
, visited
)
5587 basic_block expr_bb
;
5593 int visited_allocated_locally
= 0;
5596 if (visited
== NULL
)
5598 visited_allocated_locally
= 1;
5599 visited
= xcalloc (n_basic_blocks
, 1);
5602 for (pred
= bb
->pred
; pred
!= NULL
; pred
= pred
->pred_next
)
5604 basic_block pred_bb
= pred
->src
;
5606 if (pred
->src
== ENTRY_BLOCK_PTR
)
5608 else if (visited
[pred_bb
->index
])
5611 /* Does this predecessor generate this expression? */
5612 else if (TEST_BIT (comp
[pred_bb
->index
], expr_index
))
5614 else if (! TEST_BIT (transp
[pred_bb
->index
], expr_index
))
5620 visited
[pred_bb
->index
] = 1;
5621 if (! hoist_expr_reaches_here_p (expr_bb
, expr_index
,
5626 if (visited_allocated_locally
)
5629 return (pred
== NULL
);
5632 /* Actually perform code hoisting. */
5639 struct expr
**index_map
;
5642 sbitmap_vector_zero (hoist_exprs
, n_basic_blocks
);
5644 /* Compute a mapping from expression number (`bitmap_index') to
5645 hash table entry. */
5647 index_map
= (struct expr
**) xcalloc (n_exprs
, sizeof (struct expr
*));
5648 for (i
= 0; i
< expr_hash_table_size
; i
++)
5649 for (expr
= expr_hash_table
[i
]; expr
!= NULL
; expr
= expr
->next_same_hash
)
5650 index_map
[expr
->bitmap_index
] = expr
;
5652 /* Walk over each basic block looking for potentially hoistable
5653 expressions, nothing gets hoisted from the entry block. */
5654 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
5657 int insn_inserted_p
;
5659 /* Examine each expression that is very busy at the exit of this
5660 block. These are the potentially hoistable expressions. */
5661 for (i
= 0; i
< hoist_vbeout
[bb
]->n_bits
; i
++)
5665 if (TEST_BIT (hoist_vbeout
[bb
], i
) && TEST_BIT (transpout
[bb
], i
))
5667 /* We've found a potentially hoistable expression, now
5668 we look at every block BB dominates to see if it
5669 computes the expression. */
5670 for (dominated
= 0; dominated
< n_basic_blocks
; dominated
++)
5672 /* Ignore self dominance. */
5674 || ! TEST_BIT (dominators
[dominated
], bb
))
5677 /* We've found a dominated block, now see if it computes
5678 the busy expression and whether or not moving that
5679 expression to the "beginning" of that block is safe. */
5680 if (!TEST_BIT (antloc
[dominated
], i
))
5683 /* Note if the expression would reach the dominated block
5684 unimpared if it was placed at the end of BB.
5686 Keep track of how many times this expression is hoistable
5687 from a dominated block into BB. */
5688 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb
), i
,
5689 BASIC_BLOCK (dominated
), NULL
))
5693 /* If we found more than one hoistable occurrence of this
5694 expression, then note it in the bitmap of expressions to
5695 hoist. It makes no sense to hoist things which are computed
5696 in only one BB, and doing so tends to pessimize register
5697 allocation. One could increase this value to try harder
5698 to avoid any possible code expansion due to register
5699 allocation issues; however experiments have shown that
5700 the vast majority of hoistable expressions are only movable
5701 from two successors, so raising this threshhold is likely
5702 to nullify any benefit we get from code hoisting. */
5705 SET_BIT (hoist_exprs
[bb
], i
);
5711 /* If we found nothing to hoist, then quit now. */
5715 /* Loop over all the hoistable expressions. */
5716 for (i
= 0; i
< hoist_exprs
[bb
]->n_bits
; i
++)
5718 /* We want to insert the expression into BB only once, so
5719 note when we've inserted it. */
5720 insn_inserted_p
= 0;
5722 /* These tests should be the same as the tests above. */
5723 if (TEST_BIT (hoist_vbeout
[bb
], i
))
5725 /* We've found a potentially hoistable expression, now
5726 we look at every block BB dominates to see if it
5727 computes the expression. */
5728 for (dominated
= 0; dominated
< n_basic_blocks
; dominated
++)
5730 /* Ignore self dominance. */
5732 || ! TEST_BIT (dominators
[dominated
], bb
))
5735 /* We've found a dominated block, now see if it computes
5736 the busy expression and whether or not moving that
5737 expression to the "beginning" of that block is safe. */
5738 if (!TEST_BIT (antloc
[dominated
], i
))
5741 /* The expression is computed in the dominated block and
5742 it would be safe to compute it at the start of the
5743 dominated block. Now we have to determine if the
5744 expression would reach the dominated block if it was
5745 placed at the end of BB. */
5746 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb
), i
,
5747 BASIC_BLOCK (dominated
), NULL
))
5749 struct expr
*expr
= index_map
[i
];
5750 struct occr
*occr
= expr
->antic_occr
;
5754 /* Find the right occurrence of this expression. */
5755 while (BLOCK_NUM (occr
->insn
) != dominated
&& occr
)
5758 /* Should never happen. */
5764 set
= single_set (insn
);
5768 /* Create a pseudo-reg to store the result of reaching
5769 expressions into. Get the mode for the new pseudo
5770 from the mode of the original destination pseudo. */
5771 if (expr
->reaching_reg
== NULL
)
5773 = gen_reg_rtx (GET_MODE (SET_DEST (set
)));
5775 /* In theory this should never fail since we're creating
5778 However, on the x86 some of the movXX patterns
5779 actually contain clobbers of scratch regs. This may
5780 cause the insn created by validate_change to not
5781 match any pattern and thus cause validate_change to
5783 if (validate_change (insn
, &SET_SRC (set
),
5784 expr
->reaching_reg
, 0))
5786 occr
->deleted_p
= 1;
5787 if (!insn_inserted_p
)
5789 insert_insn_end_bb (index_map
[i
],
5790 BASIC_BLOCK (bb
), 0);
5791 insn_inserted_p
= 1;
5803 /* Top level routine to perform one code hoisting (aka unification) pass
5805 Return non-zero if a change was made. */
5808 one_code_hoisting_pass ()
5812 alloc_expr_hash_table (max_cuid
);
5813 compute_expr_hash_table ();
5815 dump_hash_table (gcse_file
, "Code Hosting Expressions", expr_hash_table
,
5816 expr_hash_table_size
, n_exprs
);
5820 alloc_code_hoist_mem (n_basic_blocks
, n_exprs
);
5821 compute_code_hoist_data ();
5823 free_code_hoist_mem ();
5826 free_expr_hash_table ();
5831 /* Here we provide the things required to do store motion towards
5832 the exit. In order for this to be effective, gcse also needed to
5833 be taught how to move a load when it is kill only by a store to itself.
5838 void foo(float scale)
5840 for (i=0; i<10; i++)
5844 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
5845 the load out since its live around the loop, and stored at the bottom
5848 The 'Load Motion' referred to and implemented in this file is
5849 an enhancement to gcse which when using edge based lcm, recognizes
5850 this situation and allows gcse to move the load out of the loop.
5852 Once gcse has hoisted the load, store motion can then push this
5853 load towards the exit, and we end up with no loads or stores of 'i'
5856 /* This will search the ldst list for a matching expression. If it
5857 doesn't find one, we create one and initialize it. */
5859 static struct ls_expr
*
5863 struct ls_expr
* ptr
;
5865 for (ptr
= first_ls_expr(); ptr
!= NULL
; ptr
= next_ls_expr (ptr
))
5866 if (expr_equiv_p (ptr
->pattern
, x
))
5871 ptr
= (struct ls_expr
*) xmalloc (sizeof (struct ls_expr
));
5873 ptr
->next
= pre_ldst_mems
;
5876 ptr
->loads
= NULL_RTX
;
5877 ptr
->stores
= NULL_RTX
;
5878 ptr
->reaching_reg
= NULL_RTX
;
5881 ptr
->hash_index
= 0;
5882 pre_ldst_mems
= ptr
;
5888 /* Free up an individual ldst entry. */
5891 free_ldst_entry (ptr
)
5892 struct ls_expr
* ptr
;
5894 free_INSN_LIST_list (& ptr
->loads
);
5895 free_INSN_LIST_list (& ptr
->stores
);
5900 /* Free up all memory associated with the ldst list. */
5905 while (pre_ldst_mems
)
5907 struct ls_expr
* tmp
= pre_ldst_mems
;
5909 pre_ldst_mems
= pre_ldst_mems
->next
;
5911 free_ldst_entry (tmp
);
5914 pre_ldst_mems
= NULL
;
5917 /* Dump debugging info about the ldst list. */
5920 print_ldst_list (file
)
5923 struct ls_expr
* ptr
;
5925 fprintf (file
, "LDST list: \n");
5927 for (ptr
= first_ls_expr(); ptr
!= NULL
; ptr
= next_ls_expr (ptr
))
5929 fprintf (file
, " Pattern (%3d): ", ptr
->index
);
5931 print_rtl (file
, ptr
->pattern
);
5933 fprintf (file
, "\n Loads : ");
5936 print_rtl (file
, ptr
->loads
);
5938 fprintf (file
, "(nil)");
5940 fprintf (file
, "\n Stores : ");
5943 print_rtl (file
, ptr
->stores
);
5945 fprintf (file
, "(nil)");
5947 fprintf (file
, "\n\n");
5950 fprintf (file
, "\n");
5953 /* Returns 1 if X is in the list of ldst only expressions. */
5955 static struct ls_expr
*
5956 find_rtx_in_ldst (x
)
5959 struct ls_expr
* ptr
;
5961 for (ptr
= pre_ldst_mems
; ptr
!= NULL
; ptr
= ptr
->next
)
5962 if (expr_equiv_p (ptr
->pattern
, x
) && ! ptr
->invalid
)
5968 /* Assign each element of the list of mems a monotonically increasing value. */
5973 struct ls_expr
* ptr
;
5976 for (ptr
= pre_ldst_mems
; ptr
!= NULL
; ptr
= ptr
->next
)
5982 /* Return first item in the list. */
5984 static inline struct ls_expr
*
5987 return pre_ldst_mems
;
5990 /* Return the next item in ther list after the specified one. */
5992 static inline struct ls_expr
*
5994 struct ls_expr
* ptr
;
5999 /* Load Motion for loads which only kill themselves. */
6001 /* Return true if x is a simple MEM operation, with no registers or
6002 side effects. These are the types of loads we consider for the
6003 ld_motion list, otherwise we let the usual aliasing take care of it. */
6009 if (GET_CODE (x
) != MEM
)
6012 if (MEM_VOLATILE_P (x
))
6015 if (GET_MODE (x
) == BLKmode
)
6018 if (!rtx_varies_p (XEXP (x
, 0), 0))
6024 /* Make sure there isn't a buried reference in this pattern anywhere.
6025 If there is, invalidate the entry for it since we're not capable
6026 of fixing it up just yet.. We have to be sure we know about ALL
6027 loads since the aliasing code will allow all entries in the
6028 ld_motion list to not-alias itself. If we miss a load, we will get
6029 the wrong value since gcse might common it and we won't know to
6033 invalidate_any_buried_refs (x
)
6038 struct ls_expr
* ptr
;
6040 /* Invalidate it in the list. */
6041 if (GET_CODE (x
) == MEM
&& simple_mem (x
))
6043 ptr
= ldst_entry (x
);
6047 /* Recursively process the insn. */
6048 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6050 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6053 invalidate_any_buried_refs (XEXP (x
, i
));
6054 else if (fmt
[i
] == 'E')
6055 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6056 invalidate_any_buried_refs (XVECEXP (x
, i
, j
));
6060 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6061 being defined as MEM loads and stores to symbols, with no
6062 side effects and no registers in the expression. If there are any
6063 uses/defs which don't match this criteria, it is invalidated and
6064 trimmed out later. */
6067 compute_ld_motion_mems ()
6069 struct ls_expr
* ptr
;
6073 pre_ldst_mems
= NULL
;
6075 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
6077 for (insn
= BLOCK_HEAD (bb
);
6078 insn
&& insn
!= NEXT_INSN (BLOCK_END (bb
));
6079 insn
= NEXT_INSN (insn
))
6081 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
6083 if (GET_CODE (PATTERN (insn
)) == SET
)
6085 rtx src
= SET_SRC (PATTERN (insn
));
6086 rtx dest
= SET_DEST (PATTERN (insn
));
6088 /* Check for a simple LOAD... */
6089 if (GET_CODE (src
) == MEM
&& simple_mem (src
))
6091 ptr
= ldst_entry (src
);
6092 if (GET_CODE (dest
) == REG
)
6093 ptr
->loads
= alloc_INSN_LIST (insn
, ptr
->loads
);
6099 /* Make sure there isn't a buried load somewhere. */
6100 invalidate_any_buried_refs (src
);
6103 /* Check for stores. Don't worry about aliased ones, they
6104 will block any movement we might do later. We only care
6105 about this exact pattern since those are the only
6106 circumstance that we will ignore the aliasing info. */
6107 if (GET_CODE (dest
) == MEM
&& simple_mem (dest
))
6109 ptr
= ldst_entry (dest
);
6111 if (GET_CODE (src
) != MEM
6112 && GET_CODE (src
) != ASM_OPERANDS
)
6113 ptr
->stores
= alloc_INSN_LIST (insn
, ptr
->stores
);
6119 invalidate_any_buried_refs (PATTERN (insn
));
6125 /* Remove any references that have been either invalidated or are not in the
6126 expression list for pre gcse. */
6129 trim_ld_motion_mems ()
6131 struct ls_expr
* last
= NULL
;
6132 struct ls_expr
* ptr
= first_ls_expr ();
6136 int del
= ptr
->invalid
;
6137 struct expr
* expr
= NULL
;
6139 /* Delete if entry has been made invalid. */
6145 /* Delete if we cannot find this mem in the expression list. */
6146 for (i
= 0; i
< expr_hash_table_size
&& del
; i
++)
6148 for (expr
= expr_hash_table
[i
];
6150 expr
= expr
->next_same_hash
)
6151 if (expr_equiv_p (expr
->expr
, ptr
->pattern
))
6163 last
->next
= ptr
->next
;
6164 free_ldst_entry (ptr
);
6169 pre_ldst_mems
= pre_ldst_mems
->next
;
6170 free_ldst_entry (ptr
);
6171 ptr
= pre_ldst_mems
;
6176 /* Set the expression field if we are keeping it. */
6183 /* Show the world what we've found. */
6184 if (gcse_file
&& pre_ldst_mems
!= NULL
)
6185 print_ldst_list (gcse_file
);
6188 /* This routine will take an expression which we are replacing with
6189 a reaching register, and update any stores that are needed if
6190 that expression is in the ld_motion list. Stores are updated by
6191 copying their SRC to the reaching register, and then storeing
6192 the reaching register into the store location. These keeps the
6193 correct value in the reaching register for the loads. */
6196 update_ld_motion_stores (expr
)
6199 struct ls_expr
* mem_ptr
;
6201 if ((mem_ptr
= find_rtx_in_ldst (expr
->expr
)))
6203 /* We can try to find just the REACHED stores, but is shouldn't
6204 matter to set the reaching reg everywhere... some might be
6205 dead and should be eliminated later. */
6207 /* We replace SET mem = expr with
6209 SET mem = reg , where reg is the
6210 reaching reg used in the load. */
6211 rtx list
= mem_ptr
->stores
;
6213 for ( ; list
!= NULL_RTX
; list
= XEXP (list
, 1))
6215 rtx insn
= XEXP (list
, 0);
6216 rtx pat
= PATTERN (insn
);
6217 rtx src
= SET_SRC (pat
);
6218 rtx reg
= expr
->reaching_reg
;
6221 /* If we've already copied it, continue. */
6222 if (expr
->reaching_reg
== src
)
6227 fprintf (gcse_file
, "PRE: store updated with reaching reg ");
6228 print_rtl (gcse_file
, expr
->reaching_reg
);
6229 fprintf (gcse_file
, ":\n ");
6230 print_inline_rtx (gcse_file
, insn
, 8);
6231 fprintf (gcse_file
, "\n");
6234 copy
= gen_move_insn ( reg
, SET_SRC (pat
));
6235 new = emit_insn_before (copy
, insn
);
6236 record_one_set (REGNO (reg
), new);
6237 SET_SRC (pat
) = reg
;
6239 /* un-recognize this pattern since it's probably different now. */
6240 INSN_CODE (insn
) = -1;
6241 gcse_create_count
++;
6246 /* Store motion code. */
6248 /* This is used to communicate the target bitvector we want to use in the
6249 reg_set_info routine when called via the note_stores mechanism. */
6250 static sbitmap
* regvec
;
6252 /* Used in computing the reverse edge graph bit vectors. */
6253 static sbitmap
* st_antloc
;
6255 /* Global holding the number of store expressions we are dealing with. */
6256 static int num_stores
;
6258 /* Checks to set if we need to mark a register set. Called from note_stores. */
6261 reg_set_info (dest
, setter
, data
)
6262 rtx dest
, setter ATTRIBUTE_UNUSED
;
6263 void * data ATTRIBUTE_UNUSED
;
6265 if (GET_CODE (dest
) == SUBREG
)
6266 dest
= SUBREG_REG (dest
);
6268 if (GET_CODE (dest
) == REG
)
6269 SET_BIT (*regvec
, REGNO (dest
));
6272 /* Return non-zero if the register operands of expression X are killed
6273 anywhere in basic block BB. */
6276 store_ops_ok (x
, bb
)
6284 /* Repeat is used to turn tail-recursion into iteration. */
6290 code
= GET_CODE (x
);
6294 /* If a reg has changed after us in this
6295 block, the operand has been killed. */
6296 return TEST_BIT (reg_set_in_block
[bb
->index
], REGNO (x
));
6324 i
= GET_RTX_LENGTH (code
) - 1;
6325 fmt
= GET_RTX_FORMAT (code
);
6331 rtx tem
= XEXP (x
, i
);
6333 /* If we are about to do the last recursive call
6334 needed at this level, change it into iteration.
6335 This function is called enough to be worth it. */
6342 if (! store_ops_ok (tem
, bb
))
6345 else if (fmt
[i
] == 'E')
6349 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
6351 if (! store_ops_ok (XVECEXP (x
, i
, j
), bb
))
6360 /* Determine whether insn is MEM store pattern that we will consider moving. */
6363 find_moveable_store (insn
)
6366 struct ls_expr
* ptr
;
6367 rtx dest
= PATTERN (insn
);
6369 if (GET_CODE (dest
) != SET
6370 || GET_CODE (SET_SRC (dest
)) == ASM_OPERANDS
)
6373 dest
= SET_DEST (dest
);
6375 if (GET_CODE (dest
) != MEM
|| MEM_VOLATILE_P (dest
)
6376 || GET_MODE (dest
) == BLKmode
)
6379 if (GET_CODE (XEXP (dest
, 0)) != SYMBOL_REF
)
6382 if (rtx_varies_p (XEXP (dest
, 0), 0))
6385 ptr
= ldst_entry (dest
);
6386 ptr
->stores
= alloc_INSN_LIST (insn
, ptr
->stores
);
6389 /* Perform store motion. Much like gcse, except we move expressions the
6390 other way by looking at the flowgraph in reverse. */
6393 compute_store_table ()
6399 max_gcse_regno
= max_reg_num ();
6401 reg_set_in_block
= (sbitmap
*) sbitmap_vector_alloc (n_basic_blocks
,
6403 sbitmap_vector_zero (reg_set_in_block
, n_basic_blocks
);
6406 /* Find all the stores we care about. */
6407 for (bb
= 0; bb
< n_basic_blocks
; bb
++)
6409 regvec
= & (reg_set_in_block
[bb
]);
6410 for (insn
= BLOCK_END (bb
);
6411 insn
&& insn
!= PREV_INSN (BLOCK_HEAD (bb
));
6412 insn
= PREV_INSN (insn
))
6414 /* Ignore anything that is not a normal insn. */
6415 if (! INSN_P (insn
))
6418 if (GET_CODE (insn
) == CALL_INSN
)
6420 bool clobbers_all
= false;
6421 #ifdef NON_SAVING_SETJMP
6422 if (NON_SAVING_SETJMP
6423 && find_reg_note (insn
, REG_SETJMP
, NULL_RTX
))
6424 clobbers_all
= true;
6427 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
6429 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, regno
))
6430 SET_BIT (reg_set_in_block
[bb
], regno
);
6433 pat
= PATTERN (insn
);
6434 note_stores (pat
, reg_set_info
, NULL
);
6436 /* Now that we've marked regs, look for stores. */
6437 if (GET_CODE (pat
) == SET
)
6438 find_moveable_store (insn
);
6442 ret
= enumerate_ldsts ();
6446 fprintf (gcse_file
, "Store Motion Expressions.\n");
6447 print_ldst_list (gcse_file
);
6453 /* Check to see if the load X is aliased with STORE_PATTERN. */
6456 load_kills_store (x
, store_pattern
)
6457 rtx x
, store_pattern
;
6459 if (true_dependence (x
, GET_MODE (x
), store_pattern
, rtx_addr_varies_p
))
6464 /* Go through the entire insn X, looking for any loads which might alias
6465 STORE_PATTERN. Return 1 if found. */
6468 find_loads (x
, store_pattern
)
6469 rtx x
, store_pattern
;
6478 if (GET_CODE (x
) == SET
)
6481 if (GET_CODE (x
) == MEM
)
6483 if (load_kills_store (x
, store_pattern
))
6487 /* Recursively process the insn. */
6488 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6490 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0 && !ret
; i
--)
6493 ret
|= find_loads (XEXP (x
, i
), store_pattern
);
6494 else if (fmt
[i
] == 'E')
6495 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6496 ret
|= find_loads (XVECEXP (x
, i
, j
), store_pattern
);
6501 /* Check if INSN kills the store pattern X (is aliased with it).
6502 Return 1 if it it does. */
6505 store_killed_in_insn (x
, insn
)
6508 if (GET_RTX_CLASS (GET_CODE (insn
)) != 'i')
6511 if (GET_CODE (insn
) == CALL_INSN
)
6513 /* A normal or pure call might read from pattern,
6514 but a const call will not. */
6515 if (CONST_OR_PURE_CALL_P (insn
))
6519 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
6521 link
= XEXP (link
, 1))
6522 if (GET_CODE (XEXP (link
, 0)) == USE
6523 && GET_CODE (XEXP (XEXP (link
, 0), 0)) == MEM
6524 && GET_CODE (XEXP (XEXP (XEXP (link
, 0), 0), 0)) == SCRATCH
)
6532 if (GET_CODE (PATTERN (insn
)) == SET
)
6534 rtx pat
= PATTERN (insn
);
6535 /* Check for memory stores to aliased objects. */
6536 if (GET_CODE (SET_DEST (pat
)) == MEM
&& !expr_equiv_p (SET_DEST (pat
), x
))
6537 /* pretend its a load and check for aliasing. */
6538 if (find_loads (SET_DEST (pat
), x
))
6540 return find_loads (SET_SRC (pat
), x
);
6543 return find_loads (PATTERN (insn
), x
);
6546 /* Returns 1 if the expression X is loaded or clobbered on or after INSN
6547 within basic block BB. */
6550 store_killed_after (x
, insn
, bb
)
6559 /* Check if the register operands of the store are OK in this block.
6560 Note that if registers are changed ANYWHERE in the block, we'll
6561 decide we can't move it, regardless of whether it changed above
6562 or below the store. This could be improved by checking the register
6563 operands while lookinng for aliasing in each insn. */
6564 if (!store_ops_ok (XEXP (x
, 0), bb
))
6567 for ( ; insn
&& insn
!= NEXT_INSN (last
); insn
= NEXT_INSN (insn
))
6568 if (store_killed_in_insn (x
, insn
))
6574 /* Returns 1 if the expression X is loaded or clobbered on or before INSN
6575 within basic block BB. */
6577 store_killed_before (x
, insn
, bb
)
6581 rtx first
= bb
->head
;
6584 return store_killed_in_insn (x
, insn
);
6586 /* Check if the register operands of the store are OK in this block.
6587 Note that if registers are changed ANYWHERE in the block, we'll
6588 decide we can't move it, regardless of whether it changed above
6589 or below the store. This could be improved by checking the register
6590 operands while lookinng for aliasing in each insn. */
6591 if (!store_ops_ok (XEXP (x
, 0), bb
))
6594 for ( ; insn
&& insn
!= PREV_INSN (first
); insn
= PREV_INSN (insn
))
6595 if (store_killed_in_insn (x
, insn
))
6601 #define ANTIC_STORE_LIST(x) ((x)->loads)
6602 #define AVAIL_STORE_LIST(x) ((x)->stores)
6604 /* Given the table of available store insns at the end of blocks,
6605 determine which ones are not killed by aliasing, and generate
6606 the appropriate vectors for gen and killed. */
6608 build_store_vectors ()
6613 struct ls_expr
* ptr
;
6615 /* Build the gen_vector. This is any store in the table which is not killed
6616 by aliasing later in its block. */
6617 ae_gen
= (sbitmap
*) sbitmap_vector_alloc (n_basic_blocks
, num_stores
);
6618 sbitmap_vector_zero (ae_gen
, n_basic_blocks
);
6620 st_antloc
= (sbitmap
*) sbitmap_vector_alloc (n_basic_blocks
, num_stores
);
6621 sbitmap_vector_zero (st_antloc
, n_basic_blocks
);
6623 for (ptr
= first_ls_expr (); ptr
!= NULL
; ptr
= next_ls_expr (ptr
))
6625 /* Put all the stores into either the antic list, or the avail list,
6627 rtx store_list
= ptr
->stores
;
6628 ptr
->stores
= NULL_RTX
;
6630 for (st
= store_list
; st
!= NULL
; st
= XEXP (st
, 1))
6632 insn
= XEXP (st
, 0);
6633 bb
= BLOCK_FOR_INSN (insn
);
6635 if (!store_killed_after (ptr
->pattern
, insn
, bb
))
6637 /* If we've already seen an availale expression in this block,
6638 we can delete the one we saw already (It occurs earlier in
6639 the block), and replace it with this one). We'll copy the
6640 old SRC expression to an unused register in case there
6641 are any side effects. */
6642 if (TEST_BIT (ae_gen
[bb
->index
], ptr
->index
))
6644 /* Find previous store. */
6646 for (st
= AVAIL_STORE_LIST (ptr
); st
; st
= XEXP (st
, 1))
6647 if (BLOCK_FOR_INSN (XEXP (st
, 0)) == bb
)
6651 rtx r
= gen_reg_rtx (GET_MODE (ptr
->pattern
));
6653 fprintf (gcse_file
, "Removing redundant store:\n");
6654 replace_store_insn (r
, XEXP (st
, 0), bb
);
6655 XEXP (st
, 0) = insn
;
6659 SET_BIT (ae_gen
[bb
->index
], ptr
->index
);
6660 AVAIL_STORE_LIST (ptr
) = alloc_INSN_LIST (insn
,
6661 AVAIL_STORE_LIST (ptr
));
6664 if (!store_killed_before (ptr
->pattern
, insn
, bb
))
6666 SET_BIT (st_antloc
[BLOCK_NUM (insn
)], ptr
->index
);
6667 ANTIC_STORE_LIST (ptr
) = alloc_INSN_LIST (insn
,
6668 ANTIC_STORE_LIST (ptr
));
6672 /* Free the original list of store insns. */
6673 free_INSN_LIST_list (&store_list
);
6676 ae_kill
= (sbitmap
*) sbitmap_vector_alloc (n_basic_blocks
, num_stores
);
6677 sbitmap_vector_zero (ae_kill
, n_basic_blocks
);
6679 transp
= (sbitmap
*) sbitmap_vector_alloc (n_basic_blocks
, num_stores
);
6680 sbitmap_vector_zero (transp
, n_basic_blocks
);
6682 for (ptr
= first_ls_expr (); ptr
!= NULL
; ptr
= next_ls_expr (ptr
))
6683 for (b
= 0; b
< n_basic_blocks
; b
++)
6685 if (store_killed_after (ptr
->pattern
, BLOCK_HEAD (b
), BASIC_BLOCK (b
)))
6687 /* The anticipatable expression is not killed if it's gen'd. */
6689 We leave this check out for now. If we have a code sequence
6690 in a block which looks like:
6694 We should flag this as having an ANTIC expression, NOT
6695 transparent, NOT killed, and AVAIL.
6696 Unfortunately, since we haven't re-written all loads to
6697 use the reaching reg, we'll end up doing an incorrect
6698 Load in the middle here if we push the store down. It happens in
6699 gcc.c-torture/execute/960311-1.c with -O3
6700 If we always kill it in this case, we'll sometimes do
6701 uneccessary work, but it shouldn't actually hurt anything.
6702 if (!TEST_BIT (ae_gen[b], ptr->index)). */
6703 SET_BIT (ae_kill
[b
], ptr
->index
);
6706 SET_BIT (transp
[b
], ptr
->index
);
6709 /* Any block with no exits calls some non-returning function, so
6710 we better mark the store killed here, or we might not store to
6711 it at all. If we knew it was abort, we wouldn't have to store,
6712 but we don't know that for sure. */
6715 fprintf (gcse_file
, "ST_avail and ST_antic (shown under loads..)\n");
6716 print_ldst_list (gcse_file
);
6717 dump_sbitmap_vector (gcse_file
, "st_antloc", "", st_antloc
, n_basic_blocks
);
6718 dump_sbitmap_vector (gcse_file
, "st_kill", "", ae_kill
, n_basic_blocks
);
6719 dump_sbitmap_vector (gcse_file
, "Transpt", "", transp
, n_basic_blocks
);
6720 dump_sbitmap_vector (gcse_file
, "st_avloc", "", ae_gen
, n_basic_blocks
);
6724 /* Insert an instruction at the begining of a basic block, and update
6725 the BLOCK_HEAD if needed. */
6728 insert_insn_start_bb (insn
, bb
)
6732 /* Insert at start of successor block. */
6733 rtx prev
= PREV_INSN (bb
->head
);
6734 rtx before
= bb
->head
;
6737 if (GET_CODE (before
) != CODE_LABEL
6738 && (GET_CODE (before
) != NOTE
6739 || NOTE_LINE_NUMBER (before
) != NOTE_INSN_BASIC_BLOCK
))
6742 if (prev
== bb
->end
)
6744 before
= NEXT_INSN (before
);
6747 insn
= emit_insn_after (insn
, prev
);
6751 fprintf (gcse_file
, "STORE_MOTION insert store at start of BB %d:\n",
6753 print_inline_rtx (gcse_file
, insn
, 6);
6754 fprintf (gcse_file
, "\n");
6758 /* This routine will insert a store on an edge. EXPR is the ldst entry for
6759 the memory reference, and E is the edge to insert it on. Returns non-zero
6760 if an edge insertion was performed. */
6763 insert_store (expr
, e
)
6764 struct ls_expr
* expr
;
6771 /* We did all the deleted before this insert, so if we didn't delete a
6772 store, then we haven't set the reaching reg yet either. */
6773 if (expr
->reaching_reg
== NULL_RTX
)
6776 reg
= expr
->reaching_reg
;
6777 insn
= gen_move_insn (expr
->pattern
, reg
);
6779 /* If we are inserting this expression on ALL predecessor edges of a BB,
6780 insert it at the start of the BB, and reset the insert bits on the other
6781 edges so we don't try to insert it on the other edges. */
6783 for (tmp
= e
->dest
->pred
; tmp
; tmp
= tmp
->pred_next
)
6785 int index
= EDGE_INDEX (edge_list
, tmp
->src
, tmp
->dest
);
6786 if (index
== EDGE_INDEX_NO_EDGE
)
6788 if (! TEST_BIT (pre_insert_map
[index
], expr
->index
))
6792 /* If tmp is NULL, we found an insertion on every edge, blank the
6793 insertion vector for these edges, and insert at the start of the BB. */
6794 if (!tmp
&& bb
!= EXIT_BLOCK_PTR
)
6796 for (tmp
= e
->dest
->pred
; tmp
; tmp
= tmp
->pred_next
)
6798 int index
= EDGE_INDEX (edge_list
, tmp
->src
, tmp
->dest
);
6799 RESET_BIT (pre_insert_map
[index
], expr
->index
);
6801 insert_insn_start_bb (insn
, bb
);
6805 /* We can't insert on this edge, so we'll insert at the head of the
6806 successors block. See Morgan, sec 10.5. */
6807 if ((e
->flags
& EDGE_ABNORMAL
) == EDGE_ABNORMAL
)
6809 insert_insn_start_bb (insn
, bb
);
6813 insert_insn_on_edge (insn
, e
);
6817 fprintf (gcse_file
, "STORE_MOTION insert insn on edge (%d, %d):\n",
6818 e
->src
->index
, e
->dest
->index
);
6819 print_inline_rtx (gcse_file
, insn
, 6);
6820 fprintf (gcse_file
, "\n");
6826 /* This routine will replace a store with a SET to a specified register. */
6829 replace_store_insn (reg
, del
, bb
)
6835 insn
= gen_move_insn (reg
, SET_SRC (PATTERN (del
)));
6836 insn
= emit_insn_after (insn
, del
);
6841 "STORE_MOTION delete insn in BB %d:\n ", bb
->index
);
6842 print_inline_rtx (gcse_file
, del
, 6);
6843 fprintf (gcse_file
, "\nSTORE MOTION replaced with insn:\n ");
6844 print_inline_rtx (gcse_file
, insn
, 6);
6845 fprintf (gcse_file
, "\n");
6852 /* Delete a store, but copy the value that would have been stored into
6853 the reaching_reg for later storing. */
6856 delete_store (expr
, bb
)
6857 struct ls_expr
* expr
;
6862 if (expr
->reaching_reg
== NULL_RTX
)
6863 expr
->reaching_reg
= gen_reg_rtx (GET_MODE (expr
->pattern
));
6866 /* If there is more than 1 store, the earlier ones will be dead,
6867 but it doesn't hurt to replace them here. */
6868 reg
= expr
->reaching_reg
;
6870 for (i
= AVAIL_STORE_LIST (expr
); i
; i
= XEXP (i
, 1))
6873 if (BLOCK_FOR_INSN (del
) == bb
)
6875 /* We know there is only one since we deleted redundant
6876 ones during the available computation. */
6877 replace_store_insn (reg
, del
, bb
);
6883 /* Free memory used by store motion. */
6886 free_store_memory ()
6891 sbitmap_vector_free (ae_gen
);
6893 sbitmap_vector_free (ae_kill
);
6895 sbitmap_vector_free (transp
);
6897 sbitmap_vector_free (st_antloc
);
6899 sbitmap_vector_free (pre_insert_map
);
6901 sbitmap_vector_free (pre_delete_map
);
6902 if (reg_set_in_block
)
6903 sbitmap_vector_free (reg_set_in_block
);
6905 ae_gen
= ae_kill
= transp
= st_antloc
= NULL
;
6906 pre_insert_map
= pre_delete_map
= reg_set_in_block
= NULL
;
6909 /* Perform store motion. Much like gcse, except we move expressions the
6910 other way by looking at the flowgraph in reverse. */
6916 struct ls_expr
* ptr
;
6917 int update_flow
= 0;
6921 fprintf (gcse_file
, "before store motion\n");
6922 print_rtl (gcse_file
, get_insns ());
6926 init_alias_analysis ();
6928 /* Find all the stores that are live to the end of their block. */
6929 num_stores
= compute_store_table ();
6930 if (num_stores
== 0)
6932 sbitmap_vector_free (reg_set_in_block
);
6933 end_alias_analysis ();
6937 /* Now compute whats actually available to move. */
6938 add_noreturn_fake_exit_edges ();
6939 build_store_vectors ();
6941 edge_list
= pre_edge_rev_lcm (gcse_file
, num_stores
, transp
, ae_gen
,
6942 st_antloc
, ae_kill
, &pre_insert_map
,
6945 /* Now we want to insert the new stores which are going to be needed. */
6946 for (ptr
= first_ls_expr (); ptr
!= NULL
; ptr
= next_ls_expr (ptr
))
6948 for (x
= 0; x
< n_basic_blocks
; x
++)
6949 if (TEST_BIT (pre_delete_map
[x
], ptr
->index
))
6950 delete_store (ptr
, BASIC_BLOCK (x
));
6952 for (x
= 0; x
< NUM_EDGES (edge_list
); x
++)
6953 if (TEST_BIT (pre_insert_map
[x
], ptr
->index
))
6954 update_flow
|= insert_store (ptr
, INDEX_EDGE (edge_list
, x
));
6958 commit_edge_insertions ();
6960 free_store_memory ();
6961 free_edge_list (edge_list
);
6962 remove_fake_edges ();
6963 end_alias_analysis ();