1 /* Induction variable optimizations.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass tries to find the optimal set of induction variables for the loop.
22 It optimizes just the basic linear induction variables (although adding
23 support for other types should not be too hard). It includes the
24 optimizations commonly known as strength reduction, induction variable
25 coalescing and induction variable elimination. It does it in the
28 1) The interesting uses of induction variables are found. This includes
30 -- uses of induction variables in non-linear expressions
31 -- addresses of arrays
32 -- comparisons of induction variables
34 2) Candidates for the induction variables are found. This includes
36 -- old induction variables
37 -- the variables defined by expressions derived from the "interesting
40 3) The optimal (w.r. to a cost function) set of variables is chosen. The
41 cost function assigns a cost to sets of induction variables and consists
44 -- The use costs. Each of the interesting uses chooses the best induction
45 variable in the set and adds its cost to the sum. The cost reflects
46 the time spent on modifying the induction variables value to be usable
47 for the given purpose (adding base and offset for arrays, etc.).
48 -- The variable costs. Each of the variables has a cost assigned that
49 reflects the costs associated with incrementing the value of the
50 variable. The original variables are somewhat preferred.
51 -- The set cost. Depending on the size of the set, extra cost may be
52 added to reflect register pressure.
54 All the costs are defined in a machine-specific way, using the target
55 hooks and machine descriptions to determine them.
57 4) The trees are transformed to use the new variables, the dead code is
60 All of this is done loop by loop. Doing it globally is theoretically
61 possible, it might give a better performance and it might enable us
62 to decide costs more precisely, but getting all the interactions right
63 would be complicated. */
67 #include "coretypes.h"
71 #include "basic-block.h"
73 #include "tree-pretty-print.h"
74 #include "gimple-pretty-print.h"
75 #include "tree-flow.h"
76 #include "tree-dump.h"
79 #include "tree-pass.h"
81 #include "insn-config.h"
83 #include "pointer-set.h"
85 #include "tree-chrec.h"
86 #include "tree-scalar-evolution.h"
89 #include "langhooks.h"
90 #include "tree-affine.h"
92 #include "tree-inline.h"
93 #include "tree-ssa-propagate.h"
95 /* FIXME: add_cost and zero_cost defined in exprmed.h conflict with local uses.
101 /* FIXME: Expressions are expanded to RTL in this pass to determine the
102 cost of different addressing modes. This should be moved to a TBD
103 interface between the GIMPLE and RTL worlds. */
106 /* The infinite cost. */
107 #define INFTY 10000000
109 #define AVG_LOOP_NITER(LOOP) 5
111 /* Returns the expected number of loop iterations for LOOP.
112 The average trip count is computed from profile data if it
115 static inline HOST_WIDE_INT
116 avg_loop_niter (struct loop
*loop
)
118 HOST_WIDE_INT niter
= estimated_loop_iterations_int (loop
, false);
120 return AVG_LOOP_NITER (loop
);
125 /* Representation of the induction variable. */
128 tree base
; /* Initial value of the iv. */
129 tree base_object
; /* A memory object to that the induction variable points. */
130 tree step
; /* Step of the iv (constant only). */
131 tree ssa_name
; /* The ssa name with the value. */
132 bool biv_p
; /* Is it a biv? */
133 bool have_use_for
; /* Do we already have a use for it? */
134 unsigned use_id
; /* The identifier in the use if it is the case. */
137 /* Per-ssa version information (induction variable descriptions, etc.). */
140 tree name
; /* The ssa name. */
141 struct iv
*iv
; /* Induction variable description. */
142 bool has_nonlin_use
; /* For a loop-level invariant, whether it is used in
143 an expression that is not an induction variable. */
144 bool preserve_biv
; /* For the original biv, whether to preserve it. */
145 unsigned inv_id
; /* Id of an invariant. */
151 USE_NONLINEAR_EXPR
, /* Use in a nonlinear expression. */
152 USE_ADDRESS
, /* Use in an address. */
153 USE_COMPARE
/* Use is a compare. */
156 /* Cost of a computation. */
159 int cost
; /* The runtime cost. */
160 unsigned complexity
; /* The estimate of the complexity of the code for
161 the computation (in no concrete units --
162 complexity field should be larger for more
163 complex expressions and addressing modes). */
166 static const comp_cost zero_cost
= {0, 0};
167 static const comp_cost infinite_cost
= {INFTY
, INFTY
};
169 /* The candidate - cost pair. */
172 struct iv_cand
*cand
; /* The candidate. */
173 comp_cost cost
; /* The cost. */
174 bitmap depends_on
; /* The list of invariants that have to be
176 tree value
; /* For final value elimination, the expression for
177 the final value of the iv. For iv elimination,
178 the new bound to compare with. */
179 int inv_expr_id
; /* Loop invariant expression id. */
185 unsigned id
; /* The id of the use. */
186 enum use_type type
; /* Type of the use. */
187 struct iv
*iv
; /* The induction variable it is based on. */
188 gimple stmt
; /* Statement in that it occurs. */
189 tree
*op_p
; /* The place where it occurs. */
190 bitmap related_cands
; /* The set of "related" iv candidates, plus the common
193 unsigned n_map_members
; /* Number of candidates in the cost_map list. */
194 struct cost_pair
*cost_map
;
195 /* The costs wrto the iv candidates. */
197 struct iv_cand
*selected
;
198 /* The selected candidate. */
201 /* The position where the iv is computed. */
204 IP_NORMAL
, /* At the end, just before the exit condition. */
205 IP_END
, /* At the end of the latch block. */
206 IP_BEFORE_USE
, /* Immediately before a specific use. */
207 IP_AFTER_USE
, /* Immediately after a specific use. */
208 IP_ORIGINAL
/* The original biv. */
211 /* The induction variable candidate. */
214 unsigned id
; /* The number of the candidate. */
215 bool important
; /* Whether this is an "important" candidate, i.e. such
216 that it should be considered by all uses. */
217 ENUM_BITFIELD(iv_position
) pos
: 8; /* Where it is computed. */
218 gimple incremented_at
;/* For original biv, the statement where it is
220 tree var_before
; /* The variable used for it before increment. */
221 tree var_after
; /* The variable used for it after increment. */
222 struct iv
*iv
; /* The value of the candidate. NULL for
223 "pseudocandidate" used to indicate the possibility
224 to replace the final value of an iv by direct
225 computation of the value. */
226 unsigned cost
; /* Cost of the candidate. */
227 unsigned cost_step
; /* Cost of the candidate's increment operation. */
228 struct iv_use
*ainc_use
; /* For IP_{BEFORE,AFTER}_USE candidates, the place
229 where it is incremented. */
230 bitmap depends_on
; /* The list of invariants that are used in step of the
234 /* Loop invariant expression hashtable entry. */
235 struct iv_inv_expr_ent
242 /* The data used by the induction variable optimizations. */
244 typedef struct iv_use
*iv_use_p
;
246 DEF_VEC_ALLOC_P(iv_use_p
,heap
);
248 typedef struct iv_cand
*iv_cand_p
;
249 DEF_VEC_P(iv_cand_p
);
250 DEF_VEC_ALLOC_P(iv_cand_p
,heap
);
254 /* The currently optimized loop. */
255 struct loop
*current_loop
;
257 /* Numbers of iterations for all exits of the current loop. */
258 struct pointer_map_t
*niters
;
260 /* Number of registers used in it. */
263 /* The size of version_info array allocated. */
264 unsigned version_info_size
;
266 /* The array of information for the ssa names. */
267 struct version_info
*version_info
;
269 /* The hashtable of loop invariant expressions created
273 /* Loop invariant expression id. */
276 /* The bitmap of indices in version_info whose value was changed. */
279 /* The uses of induction variables. */
280 VEC(iv_use_p
,heap
) *iv_uses
;
282 /* The candidates. */
283 VEC(iv_cand_p
,heap
) *iv_candidates
;
285 /* A bitmap of important candidates. */
286 bitmap important_candidates
;
288 /* The maximum invariant id. */
291 /* Whether to consider just related and important candidates when replacing a
293 bool consider_all_candidates
;
295 /* Are we optimizing for speed? */
298 /* Whether the loop body includes any function calls. */
299 bool body_includes_call
;
302 /* An assignment of iv candidates to uses. */
306 /* The number of uses covered by the assignment. */
309 /* Number of uses that cannot be expressed by the candidates in the set. */
312 /* Candidate assigned to a use, together with the related costs. */
313 struct cost_pair
**cand_for_use
;
315 /* Number of times each candidate is used. */
316 unsigned *n_cand_uses
;
318 /* The candidates used. */
321 /* The number of candidates in the set. */
324 /* Total number of registers needed. */
327 /* Total cost of expressing uses. */
328 comp_cost cand_use_cost
;
330 /* Total cost of candidates. */
333 /* Number of times each invariant is used. */
334 unsigned *n_invariant_uses
;
336 /* The array holding the number of uses of each loop
337 invariant expressions created by ivopt. */
338 unsigned *used_inv_expr
;
340 /* The number of created loop invariants. */
341 unsigned num_used_inv_expr
;
343 /* Total cost of the assignment. */
347 /* Difference of two iv candidate assignments. */
354 /* An old assignment (for rollback purposes). */
355 struct cost_pair
*old_cp
;
357 /* A new assignment. */
358 struct cost_pair
*new_cp
;
360 /* Next change in the list. */
361 struct iv_ca_delta
*next_change
;
364 /* Bound on number of candidates below that all candidates are considered. */
366 #define CONSIDER_ALL_CANDIDATES_BOUND \
367 ((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
369 /* If there are more iv occurrences, we just give up (it is quite unlikely that
370 optimizing such a loop would help, and it would take ages). */
372 #define MAX_CONSIDERED_USES \
373 ((unsigned) PARAM_VALUE (PARAM_IV_MAX_CONSIDERED_USES))
375 /* If there are at most this number of ivs in the set, try removing unnecessary
376 ivs from the set always. */
378 #define ALWAYS_PRUNE_CAND_SET_BOUND \
379 ((unsigned) PARAM_VALUE (PARAM_IV_ALWAYS_PRUNE_CAND_SET_BOUND))
381 /* The list of trees for that the decl_rtl field must be reset is stored
384 static VEC(tree
,heap
) *decl_rtl_to_reset
;
386 static comp_cost
force_expr_to_var_cost (tree
, bool);
388 /* Number of uses recorded in DATA. */
390 static inline unsigned
391 n_iv_uses (struct ivopts_data
*data
)
393 return VEC_length (iv_use_p
, data
->iv_uses
);
396 /* Ith use recorded in DATA. */
398 static inline struct iv_use
*
399 iv_use (struct ivopts_data
*data
, unsigned i
)
401 return VEC_index (iv_use_p
, data
->iv_uses
, i
);
404 /* Number of candidates recorded in DATA. */
406 static inline unsigned
407 n_iv_cands (struct ivopts_data
*data
)
409 return VEC_length (iv_cand_p
, data
->iv_candidates
);
412 /* Ith candidate recorded in DATA. */
414 static inline struct iv_cand
*
415 iv_cand (struct ivopts_data
*data
, unsigned i
)
417 return VEC_index (iv_cand_p
, data
->iv_candidates
, i
);
420 /* The single loop exit if it dominates the latch, NULL otherwise. */
423 single_dom_exit (struct loop
*loop
)
425 edge exit
= single_exit (loop
);
430 if (!just_once_each_iteration_p (loop
, exit
->src
))
436 /* Dumps information about the induction variable IV to FILE. */
438 extern void dump_iv (FILE *, struct iv
*);
440 dump_iv (FILE *file
, struct iv
*iv
)
444 fprintf (file
, "ssa name ");
445 print_generic_expr (file
, iv
->ssa_name
, TDF_SLIM
);
446 fprintf (file
, "\n");
449 fprintf (file
, " type ");
450 print_generic_expr (file
, TREE_TYPE (iv
->base
), TDF_SLIM
);
451 fprintf (file
, "\n");
455 fprintf (file
, " base ");
456 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
457 fprintf (file
, "\n");
459 fprintf (file
, " step ");
460 print_generic_expr (file
, iv
->step
, TDF_SLIM
);
461 fprintf (file
, "\n");
465 fprintf (file
, " invariant ");
466 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
467 fprintf (file
, "\n");
472 fprintf (file
, " base object ");
473 print_generic_expr (file
, iv
->base_object
, TDF_SLIM
);
474 fprintf (file
, "\n");
478 fprintf (file
, " is a biv\n");
481 /* Dumps information about the USE to FILE. */
483 extern void dump_use (FILE *, struct iv_use
*);
485 dump_use (FILE *file
, struct iv_use
*use
)
487 fprintf (file
, "use %d\n", use
->id
);
491 case USE_NONLINEAR_EXPR
:
492 fprintf (file
, " generic\n");
496 fprintf (file
, " address\n");
500 fprintf (file
, " compare\n");
507 fprintf (file
, " in statement ");
508 print_gimple_stmt (file
, use
->stmt
, 0, 0);
509 fprintf (file
, "\n");
511 fprintf (file
, " at position ");
513 print_generic_expr (file
, *use
->op_p
, TDF_SLIM
);
514 fprintf (file
, "\n");
516 dump_iv (file
, use
->iv
);
518 if (use
->related_cands
)
520 fprintf (file
, " related candidates ");
521 dump_bitmap (file
, use
->related_cands
);
525 /* Dumps information about the uses to FILE. */
527 extern void dump_uses (FILE *, struct ivopts_data
*);
529 dump_uses (FILE *file
, struct ivopts_data
*data
)
534 for (i
= 0; i
< n_iv_uses (data
); i
++)
536 use
= iv_use (data
, i
);
538 dump_use (file
, use
);
539 fprintf (file
, "\n");
543 /* Dumps information about induction variable candidate CAND to FILE. */
545 extern void dump_cand (FILE *, struct iv_cand
*);
547 dump_cand (FILE *file
, struct iv_cand
*cand
)
549 struct iv
*iv
= cand
->iv
;
551 fprintf (file
, "candidate %d%s\n",
552 cand
->id
, cand
->important
? " (important)" : "");
554 if (cand
->depends_on
)
556 fprintf (file
, " depends on ");
557 dump_bitmap (file
, cand
->depends_on
);
562 fprintf (file
, " final value replacement\n");
566 if (cand
->var_before
)
568 fprintf (file
, " var_before ");
569 print_generic_expr (file
, cand
->var_before
, TDF_SLIM
);
570 fprintf (file
, "\n");
574 fprintf (file
, " var_after ");
575 print_generic_expr (file
, cand
->var_after
, TDF_SLIM
);
576 fprintf (file
, "\n");
582 fprintf (file
, " incremented before exit test\n");
586 fprintf (file
, " incremented before use %d\n", cand
->ainc_use
->id
);
590 fprintf (file
, " incremented after use %d\n", cand
->ainc_use
->id
);
594 fprintf (file
, " incremented at end\n");
598 fprintf (file
, " original biv\n");
605 /* Returns the info for ssa version VER. */
607 static inline struct version_info
*
608 ver_info (struct ivopts_data
*data
, unsigned ver
)
610 return data
->version_info
+ ver
;
613 /* Returns the info for ssa name NAME. */
615 static inline struct version_info
*
616 name_info (struct ivopts_data
*data
, tree name
)
618 return ver_info (data
, SSA_NAME_VERSION (name
));
621 /* Returns true if STMT is after the place where the IP_NORMAL ivs will be
625 stmt_after_ip_normal_pos (struct loop
*loop
, gimple stmt
)
627 basic_block bb
= ip_normal_pos (loop
), sbb
= gimple_bb (stmt
);
631 if (sbb
== loop
->latch
)
637 return stmt
== last_stmt (bb
);
640 /* Returns true if STMT if after the place where the original induction
641 variable CAND is incremented. If TRUE_IF_EQUAL is set, we return true
642 if the positions are identical. */
645 stmt_after_inc_pos (struct iv_cand
*cand
, gimple stmt
, bool true_if_equal
)
647 basic_block cand_bb
= gimple_bb (cand
->incremented_at
);
648 basic_block stmt_bb
= gimple_bb (stmt
);
650 if (!dominated_by_p (CDI_DOMINATORS
, stmt_bb
, cand_bb
))
653 if (stmt_bb
!= cand_bb
)
657 && gimple_uid (stmt
) == gimple_uid (cand
->incremented_at
))
659 return gimple_uid (stmt
) > gimple_uid (cand
->incremented_at
);
662 /* Returns true if STMT if after the place where the induction variable
663 CAND is incremented in LOOP. */
666 stmt_after_increment (struct loop
*loop
, struct iv_cand
*cand
, gimple stmt
)
674 return stmt_after_ip_normal_pos (loop
, stmt
);
678 return stmt_after_inc_pos (cand
, stmt
, false);
681 return stmt_after_inc_pos (cand
, stmt
, true);
688 /* Returns true if EXP is a ssa name that occurs in an abnormal phi node. */
691 abnormal_ssa_name_p (tree exp
)
696 if (TREE_CODE (exp
) != SSA_NAME
)
699 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp
) != 0;
702 /* Returns false if BASE or INDEX contains a ssa name that occurs in an
703 abnormal phi node. Callback for for_each_index. */
706 idx_contains_abnormal_ssa_name_p (tree base
, tree
*index
,
707 void *data ATTRIBUTE_UNUSED
)
709 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
711 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 2)))
713 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 3)))
717 return !abnormal_ssa_name_p (*index
);
720 /* Returns true if EXPR contains a ssa name that occurs in an
721 abnormal phi node. */
724 contains_abnormal_ssa_name_p (tree expr
)
727 enum tree_code_class codeclass
;
732 code
= TREE_CODE (expr
);
733 codeclass
= TREE_CODE_CLASS (code
);
735 if (code
== SSA_NAME
)
736 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
) != 0;
738 if (code
== INTEGER_CST
739 || is_gimple_min_invariant (expr
))
742 if (code
== ADDR_EXPR
)
743 return !for_each_index (&TREE_OPERAND (expr
, 0),
744 idx_contains_abnormal_ssa_name_p
,
747 if (code
== COND_EXPR
)
748 return contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0))
749 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1))
750 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 2));
756 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1)))
761 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0)))
773 /* Returns tree describing number of iterations determined from
774 EXIT of DATA->current_loop, or NULL if something goes wrong. */
777 niter_for_exit (struct ivopts_data
*data
, edge exit
,
778 struct tree_niter_desc
**desc_p
)
780 struct tree_niter_desc
* desc
= NULL
;
786 data
->niters
= pointer_map_create ();
790 slot
= pointer_map_contains (data
->niters
, exit
);
794 /* Try to determine number of iterations. We must know it
795 unconditionally (i.e., without possibility of # of iterations
796 being zero). Also, we cannot safely work with ssa names that
797 appear in phi nodes on abnormal edges, so that we do not create
798 overlapping life ranges for them (PR 27283). */
799 desc
= XNEW (struct tree_niter_desc
);
800 if (number_of_iterations_exit (data
->current_loop
,
802 && integer_zerop (desc
->may_be_zero
)
803 && !contains_abnormal_ssa_name_p (desc
->niter
))
809 slot
= pointer_map_insert (data
->niters
, exit
);
813 niter
= ((struct tree_niter_desc
*) *slot
)->niter
;
816 *desc_p
= (struct tree_niter_desc
*) *slot
;
820 /* Returns tree describing number of iterations determined from
821 single dominating exit of DATA->current_loop, or NULL if something
825 niter_for_single_dom_exit (struct ivopts_data
*data
)
827 edge exit
= single_dom_exit (data
->current_loop
);
832 return niter_for_exit (data
, exit
, NULL
);
835 /* Hash table equality function for expressions. */
838 htab_inv_expr_eq (const void *ent1
, const void *ent2
)
840 const struct iv_inv_expr_ent
*expr1
=
841 (const struct iv_inv_expr_ent
*)ent1
;
842 const struct iv_inv_expr_ent
*expr2
=
843 (const struct iv_inv_expr_ent
*)ent2
;
845 return expr1
->hash
== expr2
->hash
846 && operand_equal_p (expr1
->expr
, expr2
->expr
, 0);
849 /* Hash function for loop invariant expressions. */
852 htab_inv_expr_hash (const void *ent
)
854 const struct iv_inv_expr_ent
*expr
=
855 (const struct iv_inv_expr_ent
*)ent
;
859 /* Initializes data structures used by the iv optimization pass, stored
863 tree_ssa_iv_optimize_init (struct ivopts_data
*data
)
865 data
->version_info_size
= 2 * num_ssa_names
;
866 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
867 data
->relevant
= BITMAP_ALLOC (NULL
);
868 data
->important_candidates
= BITMAP_ALLOC (NULL
);
869 data
->max_inv_id
= 0;
871 data
->iv_uses
= VEC_alloc (iv_use_p
, heap
, 20);
872 data
->iv_candidates
= VEC_alloc (iv_cand_p
, heap
, 20);
873 data
->inv_expr_tab
= htab_create (10, htab_inv_expr_hash
,
874 htab_inv_expr_eq
, free
);
875 data
->inv_expr_id
= 0;
876 decl_rtl_to_reset
= VEC_alloc (tree
, heap
, 20);
879 /* Returns a memory object to that EXPR points. In case we are able to
880 determine that it does not point to any such object, NULL is returned. */
883 determine_base_object (tree expr
)
885 enum tree_code code
= TREE_CODE (expr
);
888 /* If this is a pointer casted to any type, we need to determine
889 the base object for the pointer; so handle conversions before
890 throwing away non-pointer expressions. */
891 if (CONVERT_EXPR_P (expr
))
892 return determine_base_object (TREE_OPERAND (expr
, 0));
894 if (!POINTER_TYPE_P (TREE_TYPE (expr
)))
903 obj
= TREE_OPERAND (expr
, 0);
904 base
= get_base_address (obj
);
909 if (TREE_CODE (base
) == MEM_REF
)
910 return determine_base_object (TREE_OPERAND (base
, 0));
912 return fold_convert (ptr_type_node
,
913 build_fold_addr_expr (base
));
915 case POINTER_PLUS_EXPR
:
916 return determine_base_object (TREE_OPERAND (expr
, 0));
920 /* Pointer addition is done solely using POINTER_PLUS_EXPR. */
924 return fold_convert (ptr_type_node
, expr
);
928 /* Allocates an induction variable with given initial value BASE and step STEP
932 alloc_iv (tree base
, tree step
)
934 struct iv
*iv
= XCNEW (struct iv
);
935 gcc_assert (step
!= NULL_TREE
);
938 iv
->base_object
= determine_base_object (base
);
941 iv
->have_use_for
= false;
943 iv
->ssa_name
= NULL_TREE
;
948 /* Sets STEP and BASE for induction variable IV. */
951 set_iv (struct ivopts_data
*data
, tree iv
, tree base
, tree step
)
953 struct version_info
*info
= name_info (data
, iv
);
955 gcc_assert (!info
->iv
);
957 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (iv
));
958 info
->iv
= alloc_iv (base
, step
);
959 info
->iv
->ssa_name
= iv
;
962 /* Finds induction variable declaration for VAR. */
965 get_iv (struct ivopts_data
*data
, tree var
)
968 tree type
= TREE_TYPE (var
);
970 if (!POINTER_TYPE_P (type
)
971 && !INTEGRAL_TYPE_P (type
))
974 if (!name_info (data
, var
)->iv
)
976 bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
979 || !flow_bb_inside_loop_p (data
->current_loop
, bb
))
980 set_iv (data
, var
, var
, build_int_cst (type
, 0));
983 return name_info (data
, var
)->iv
;
986 /* Determines the step of a biv defined in PHI. Returns NULL if PHI does
987 not define a simple affine biv with nonzero step. */
990 determine_biv_step (gimple phi
)
992 struct loop
*loop
= gimple_bb (phi
)->loop_father
;
993 tree name
= PHI_RESULT (phi
);
996 if (!is_gimple_reg (name
))
999 if (!simple_iv (loop
, loop
, name
, &iv
, true))
1002 return integer_zerop (iv
.step
) ? NULL_TREE
: iv
.step
;
1005 /* Finds basic ivs. */
1008 find_bivs (struct ivopts_data
*data
)
1011 tree step
, type
, base
;
1013 struct loop
*loop
= data
->current_loop
;
1014 gimple_stmt_iterator psi
;
1016 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1018 phi
= gsi_stmt (psi
);
1020 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi
)))
1023 step
= determine_biv_step (phi
);
1027 base
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
1028 base
= expand_simple_operations (base
);
1029 if (contains_abnormal_ssa_name_p (base
)
1030 || contains_abnormal_ssa_name_p (step
))
1033 type
= TREE_TYPE (PHI_RESULT (phi
));
1034 base
= fold_convert (type
, base
);
1037 if (POINTER_TYPE_P (type
))
1038 step
= fold_convert (sizetype
, step
);
1040 step
= fold_convert (type
, step
);
1043 set_iv (data
, PHI_RESULT (phi
), base
, step
);
1050 /* Marks basic ivs. */
1053 mark_bivs (struct ivopts_data
*data
)
1057 struct iv
*iv
, *incr_iv
;
1058 struct loop
*loop
= data
->current_loop
;
1059 basic_block incr_bb
;
1060 gimple_stmt_iterator psi
;
1062 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1064 phi
= gsi_stmt (psi
);
1066 iv
= get_iv (data
, PHI_RESULT (phi
));
1070 var
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
1071 incr_iv
= get_iv (data
, var
);
1075 /* If the increment is in the subloop, ignore it. */
1076 incr_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
1077 if (incr_bb
->loop_father
!= data
->current_loop
1078 || (incr_bb
->flags
& BB_IRREDUCIBLE_LOOP
))
1082 incr_iv
->biv_p
= true;
1086 /* Checks whether STMT defines a linear induction variable and stores its
1087 parameters to IV. */
1090 find_givs_in_stmt_scev (struct ivopts_data
*data
, gimple stmt
, affine_iv
*iv
)
1093 struct loop
*loop
= data
->current_loop
;
1095 iv
->base
= NULL_TREE
;
1096 iv
->step
= NULL_TREE
;
1098 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1101 lhs
= gimple_assign_lhs (stmt
);
1102 if (TREE_CODE (lhs
) != SSA_NAME
)
1105 if (!simple_iv (loop
, loop_containing_stmt (stmt
), lhs
, iv
, true))
1107 iv
->base
= expand_simple_operations (iv
->base
);
1109 if (contains_abnormal_ssa_name_p (iv
->base
)
1110 || contains_abnormal_ssa_name_p (iv
->step
))
1113 /* If STMT could throw, then do not consider STMT as defining a GIV.
1114 While this will suppress optimizations, we can not safely delete this
1115 GIV and associated statements, even if it appears it is not used. */
1116 if (stmt_could_throw_p (stmt
))
1122 /* Finds general ivs in statement STMT. */
1125 find_givs_in_stmt (struct ivopts_data
*data
, gimple stmt
)
1129 if (!find_givs_in_stmt_scev (data
, stmt
, &iv
))
1132 set_iv (data
, gimple_assign_lhs (stmt
), iv
.base
, iv
.step
);
1135 /* Finds general ivs in basic block BB. */
1138 find_givs_in_bb (struct ivopts_data
*data
, basic_block bb
)
1140 gimple_stmt_iterator bsi
;
1142 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1143 find_givs_in_stmt (data
, gsi_stmt (bsi
));
1146 /* Finds general ivs. */
1149 find_givs (struct ivopts_data
*data
)
1151 struct loop
*loop
= data
->current_loop
;
1152 basic_block
*body
= get_loop_body_in_dom_order (loop
);
1155 for (i
= 0; i
< loop
->num_nodes
; i
++)
1156 find_givs_in_bb (data
, body
[i
]);
1160 /* For each ssa name defined in LOOP determines whether it is an induction
1161 variable and if so, its initial value and step. */
1164 find_induction_variables (struct ivopts_data
*data
)
1169 if (!find_bivs (data
))
1175 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1177 tree niter
= niter_for_single_dom_exit (data
);
1181 fprintf (dump_file
, " number of iterations ");
1182 print_generic_expr (dump_file
, niter
, TDF_SLIM
);
1183 fprintf (dump_file
, "\n\n");
1186 fprintf (dump_file
, "Induction variables:\n\n");
1188 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1190 if (ver_info (data
, i
)->iv
)
1191 dump_iv (dump_file
, ver_info (data
, i
)->iv
);
1198 /* Records a use of type USE_TYPE at *USE_P in STMT whose value is IV. */
1200 static struct iv_use
*
1201 record_use (struct ivopts_data
*data
, tree
*use_p
, struct iv
*iv
,
1202 gimple stmt
, enum use_type use_type
)
1204 struct iv_use
*use
= XCNEW (struct iv_use
);
1206 use
->id
= n_iv_uses (data
);
1207 use
->type
= use_type
;
1211 use
->related_cands
= BITMAP_ALLOC (NULL
);
1213 /* To avoid showing ssa name in the dumps, if it was not reset by the
1215 iv
->ssa_name
= NULL_TREE
;
1217 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1218 dump_use (dump_file
, use
);
1220 VEC_safe_push (iv_use_p
, heap
, data
->iv_uses
, use
);
1225 /* Checks whether OP is a loop-level invariant and if so, records it.
1226 NONLINEAR_USE is true if the invariant is used in a way we do not
1227 handle specially. */
1230 record_invariant (struct ivopts_data
*data
, tree op
, bool nonlinear_use
)
1233 struct version_info
*info
;
1235 if (TREE_CODE (op
) != SSA_NAME
1236 || !is_gimple_reg (op
))
1239 bb
= gimple_bb (SSA_NAME_DEF_STMT (op
));
1241 && flow_bb_inside_loop_p (data
->current_loop
, bb
))
1244 info
= name_info (data
, op
);
1246 info
->has_nonlin_use
|= nonlinear_use
;
1248 info
->inv_id
= ++data
->max_inv_id
;
1249 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (op
));
1252 /* Checks whether the use OP is interesting and if so, records it. */
1254 static struct iv_use
*
1255 find_interesting_uses_op (struct ivopts_data
*data
, tree op
)
1262 if (TREE_CODE (op
) != SSA_NAME
)
1265 iv
= get_iv (data
, op
);
1269 if (iv
->have_use_for
)
1271 use
= iv_use (data
, iv
->use_id
);
1273 gcc_assert (use
->type
== USE_NONLINEAR_EXPR
);
1277 if (integer_zerop (iv
->step
))
1279 record_invariant (data
, op
, true);
1282 iv
->have_use_for
= true;
1284 civ
= XNEW (struct iv
);
1287 stmt
= SSA_NAME_DEF_STMT (op
);
1288 gcc_assert (gimple_code (stmt
) == GIMPLE_PHI
1289 || is_gimple_assign (stmt
));
1291 use
= record_use (data
, NULL
, civ
, stmt
, USE_NONLINEAR_EXPR
);
1292 iv
->use_id
= use
->id
;
1297 /* Given a condition in statement STMT, checks whether it is a compare
1298 of an induction variable and an invariant. If this is the case,
1299 CONTROL_VAR is set to location of the iv, BOUND to the location of
1300 the invariant, IV_VAR and IV_BOUND are set to the corresponding
1301 induction variable descriptions, and true is returned. If this is not
1302 the case, CONTROL_VAR and BOUND are set to the arguments of the
1303 condition and false is returned. */
1306 extract_cond_operands (struct ivopts_data
*data
, gimple stmt
,
1307 tree
**control_var
, tree
**bound
,
1308 struct iv
**iv_var
, struct iv
**iv_bound
)
1310 /* The objects returned when COND has constant operands. */
1311 static struct iv const_iv
;
1313 tree
*op0
= &zero
, *op1
= &zero
, *tmp_op
;
1314 struct iv
*iv0
= &const_iv
, *iv1
= &const_iv
, *tmp_iv
;
1317 if (gimple_code (stmt
) == GIMPLE_COND
)
1319 op0
= gimple_cond_lhs_ptr (stmt
);
1320 op1
= gimple_cond_rhs_ptr (stmt
);
1324 op0
= gimple_assign_rhs1_ptr (stmt
);
1325 op1
= gimple_assign_rhs2_ptr (stmt
);
1328 zero
= integer_zero_node
;
1329 const_iv
.step
= integer_zero_node
;
1331 if (TREE_CODE (*op0
) == SSA_NAME
)
1332 iv0
= get_iv (data
, *op0
);
1333 if (TREE_CODE (*op1
) == SSA_NAME
)
1334 iv1
= get_iv (data
, *op1
);
1336 /* Exactly one of the compared values must be an iv, and the other one must
1341 if (integer_zerop (iv0
->step
))
1343 /* Control variable may be on the other side. */
1344 tmp_op
= op0
; op0
= op1
; op1
= tmp_op
;
1345 tmp_iv
= iv0
; iv0
= iv1
; iv1
= tmp_iv
;
1347 ret
= !integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
);
1351 *control_var
= op0
;;
1362 /* Checks whether the condition in STMT is interesting and if so,
1366 find_interesting_uses_cond (struct ivopts_data
*data
, gimple stmt
)
1368 tree
*var_p
, *bound_p
;
1369 struct iv
*var_iv
, *civ
;
1371 if (!extract_cond_operands (data
, stmt
, &var_p
, &bound_p
, &var_iv
, NULL
))
1373 find_interesting_uses_op (data
, *var_p
);
1374 find_interesting_uses_op (data
, *bound_p
);
1378 civ
= XNEW (struct iv
);
1380 record_use (data
, NULL
, civ
, stmt
, USE_COMPARE
);
1383 /* Returns true if expression EXPR is obviously invariant in LOOP,
1384 i.e. if all its operands are defined outside of the LOOP. LOOP
1385 should not be the function body. */
1388 expr_invariant_in_loop_p (struct loop
*loop
, tree expr
)
1393 gcc_assert (loop_depth (loop
) > 0);
1395 if (is_gimple_min_invariant (expr
))
1398 if (TREE_CODE (expr
) == SSA_NAME
)
1400 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1402 && flow_bb_inside_loop_p (loop
, def_bb
))
1411 len
= TREE_OPERAND_LENGTH (expr
);
1412 for (i
= 0; i
< len
; i
++)
1413 if (!expr_invariant_in_loop_p (loop
, TREE_OPERAND (expr
, i
)))
1419 /* Returns true if statement STMT is obviously invariant in LOOP,
1420 i.e. if all its operands on the RHS are defined outside of the LOOP.
1421 LOOP should not be the function body. */
1424 stmt_invariant_in_loop_p (struct loop
*loop
, gimple stmt
)
1429 gcc_assert (loop_depth (loop
) > 0);
1431 lhs
= gimple_get_lhs (stmt
);
1432 for (i
= 0; i
< gimple_num_ops (stmt
); i
++)
1434 tree op
= gimple_op (stmt
, i
);
1435 if (op
!= lhs
&& !expr_invariant_in_loop_p (loop
, op
))
1442 /* Cumulates the steps of indices into DATA and replaces their values with the
1443 initial ones. Returns false when the value of the index cannot be determined.
1444 Callback for for_each_index. */
1446 struct ifs_ivopts_data
1448 struct ivopts_data
*ivopts_data
;
1454 idx_find_step (tree base
, tree
*idx
, void *data
)
1456 struct ifs_ivopts_data
*dta
= (struct ifs_ivopts_data
*) data
;
1458 tree step
, iv_base
, iv_step
, lbound
, off
;
1459 struct loop
*loop
= dta
->ivopts_data
->current_loop
;
1461 /* If base is a component ref, require that the offset of the reference
1463 if (TREE_CODE (base
) == COMPONENT_REF
)
1465 off
= component_ref_field_offset (base
);
1466 return expr_invariant_in_loop_p (loop
, off
);
1469 /* If base is array, first check whether we will be able to move the
1470 reference out of the loop (in order to take its address in strength
1471 reduction). In order for this to work we need both lower bound
1472 and step to be loop invariants. */
1473 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1475 /* Moreover, for a range, the size needs to be invariant as well. */
1476 if (TREE_CODE (base
) == ARRAY_RANGE_REF
1477 && !expr_invariant_in_loop_p (loop
, TYPE_SIZE (TREE_TYPE (base
))))
1480 step
= array_ref_element_size (base
);
1481 lbound
= array_ref_low_bound (base
);
1483 if (!expr_invariant_in_loop_p (loop
, step
)
1484 || !expr_invariant_in_loop_p (loop
, lbound
))
1488 if (TREE_CODE (*idx
) != SSA_NAME
)
1491 iv
= get_iv (dta
->ivopts_data
, *idx
);
1495 /* XXX We produce for a base of *D42 with iv->base being &x[0]
1496 *&x[0], which is not folded and does not trigger the
1497 ARRAY_REF path below. */
1500 if (integer_zerop (iv
->step
))
1503 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1505 step
= array_ref_element_size (base
);
1507 /* We only handle addresses whose step is an integer constant. */
1508 if (TREE_CODE (step
) != INTEGER_CST
)
1512 /* The step for pointer arithmetics already is 1 byte. */
1513 step
= size_one_node
;
1517 if (!convert_affine_scev (dta
->ivopts_data
->current_loop
,
1518 sizetype
, &iv_base
, &iv_step
, dta
->stmt
,
1521 /* The index might wrap. */
1525 step
= fold_build2 (MULT_EXPR
, sizetype
, step
, iv_step
);
1526 dta
->step
= fold_build2 (PLUS_EXPR
, sizetype
, dta
->step
, step
);
1531 /* Records use in index IDX. Callback for for_each_index. Ivopts data
1532 object is passed to it in DATA. */
1535 idx_record_use (tree base
, tree
*idx
,
1538 struct ivopts_data
*data
= (struct ivopts_data
*) vdata
;
1539 find_interesting_uses_op (data
, *idx
);
1540 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1542 find_interesting_uses_op (data
, array_ref_element_size (base
));
1543 find_interesting_uses_op (data
, array_ref_low_bound (base
));
1548 /* If we can prove that TOP = cst * BOT for some constant cst,
1549 store cst to MUL and return true. Otherwise return false.
1550 The returned value is always sign-extended, regardless of the
1551 signedness of TOP and BOT. */
1554 constant_multiple_of (tree top
, tree bot
, double_int
*mul
)
1557 enum tree_code code
;
1558 double_int res
, p0
, p1
;
1559 unsigned precision
= TYPE_PRECISION (TREE_TYPE (top
));
1564 if (operand_equal_p (top
, bot
, 0))
1566 *mul
= double_int_one
;
1570 code
= TREE_CODE (top
);
1574 mby
= TREE_OPERAND (top
, 1);
1575 if (TREE_CODE (mby
) != INTEGER_CST
)
1578 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &res
))
1581 *mul
= double_int_sext (double_int_mul (res
, tree_to_double_int (mby
)),
1587 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &p0
)
1588 || !constant_multiple_of (TREE_OPERAND (top
, 1), bot
, &p1
))
1591 if (code
== MINUS_EXPR
)
1592 p1
= double_int_neg (p1
);
1593 *mul
= double_int_sext (double_int_add (p0
, p1
), precision
);
1597 if (TREE_CODE (bot
) != INTEGER_CST
)
1600 p0
= double_int_sext (tree_to_double_int (top
), precision
);
1601 p1
= double_int_sext (tree_to_double_int (bot
), precision
);
1602 if (double_int_zero_p (p1
))
1604 *mul
= double_int_sext (double_int_sdivmod (p0
, p1
, FLOOR_DIV_EXPR
, &res
),
1606 return double_int_zero_p (res
);
1613 /* Returns true if memory reference REF with step STEP may be unaligned. */
1616 may_be_unaligned_p (tree ref
, tree step
)
1620 HOST_WIDE_INT bitsize
;
1621 HOST_WIDE_INT bitpos
;
1623 enum machine_mode mode
;
1624 int unsignedp
, volatilep
;
1625 unsigned base_align
;
1627 /* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
1628 thus they are not misaligned. */
1629 if (TREE_CODE (ref
) == TARGET_MEM_REF
)
1632 /* The test below is basically copy of what expr.c:normal_inner_ref
1633 does to check whether the object must be loaded by parts when
1634 STRICT_ALIGNMENT is true. */
1635 base
= get_inner_reference (ref
, &bitsize
, &bitpos
, &toffset
, &mode
,
1636 &unsignedp
, &volatilep
, true);
1637 base_type
= TREE_TYPE (base
);
1638 base_align
= TYPE_ALIGN (base_type
);
1640 if (mode
!= BLKmode
)
1642 unsigned mode_align
= GET_MODE_ALIGNMENT (mode
);
1644 if (base_align
< mode_align
1645 || (bitpos
% mode_align
) != 0
1646 || (bitpos
% BITS_PER_UNIT
) != 0)
1650 && (highest_pow2_factor (toffset
) * BITS_PER_UNIT
) < mode_align
)
1653 if ((highest_pow2_factor (step
) * BITS_PER_UNIT
) < mode_align
)
1660 /* Return true if EXPR may be non-addressable. */
1663 may_be_nonaddressable_p (tree expr
)
1665 switch (TREE_CODE (expr
))
1667 case TARGET_MEM_REF
:
1668 /* TARGET_MEM_REFs are translated directly to valid MEMs on the
1669 target, thus they are always addressable. */
1673 return DECL_NONADDRESSABLE_P (TREE_OPERAND (expr
, 1))
1674 || may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
1676 case VIEW_CONVERT_EXPR
:
1677 /* This kind of view-conversions may wrap non-addressable objects
1678 and make them look addressable. After some processing the
1679 non-addressability may be uncovered again, causing ADDR_EXPRs
1680 of inappropriate objects to be built. */
1681 if (is_gimple_reg (TREE_OPERAND (expr
, 0))
1682 || !is_gimple_addressable (TREE_OPERAND (expr
, 0)))
1685 /* ... fall through ... */
1688 case ARRAY_RANGE_REF
:
1689 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
1701 /* Finds addresses in *OP_P inside STMT. */
1704 find_interesting_uses_address (struct ivopts_data
*data
, gimple stmt
, tree
*op_p
)
1706 tree base
= *op_p
, step
= size_zero_node
;
1708 struct ifs_ivopts_data ifs_ivopts_data
;
1710 /* Do not play with volatile memory references. A bit too conservative,
1711 perhaps, but safe. */
1712 if (gimple_has_volatile_ops (stmt
))
1715 /* Ignore bitfields for now. Not really something terribly complicated
1717 if (TREE_CODE (base
) == BIT_FIELD_REF
)
1720 base
= unshare_expr (base
);
1722 if (TREE_CODE (base
) == TARGET_MEM_REF
)
1724 tree type
= build_pointer_type (TREE_TYPE (base
));
1728 && TREE_CODE (TMR_BASE (base
)) == SSA_NAME
)
1730 civ
= get_iv (data
, TMR_BASE (base
));
1734 TMR_BASE (base
) = civ
->base
;
1737 if (TMR_INDEX2 (base
)
1738 && TREE_CODE (TMR_INDEX2 (base
)) == SSA_NAME
)
1740 civ
= get_iv (data
, TMR_INDEX2 (base
));
1744 TMR_INDEX2 (base
) = civ
->base
;
1747 if (TMR_INDEX (base
)
1748 && TREE_CODE (TMR_INDEX (base
)) == SSA_NAME
)
1750 civ
= get_iv (data
, TMR_INDEX (base
));
1754 TMR_INDEX (base
) = civ
->base
;
1759 if (TMR_STEP (base
))
1760 astep
= fold_build2 (MULT_EXPR
, type
, TMR_STEP (base
), astep
);
1762 step
= fold_build2 (PLUS_EXPR
, type
, step
, astep
);
1766 if (integer_zerop (step
))
1768 base
= tree_mem_ref_addr (type
, base
);
1772 ifs_ivopts_data
.ivopts_data
= data
;
1773 ifs_ivopts_data
.stmt
= stmt
;
1774 ifs_ivopts_data
.step
= size_zero_node
;
1775 if (!for_each_index (&base
, idx_find_step
, &ifs_ivopts_data
)
1776 || integer_zerop (ifs_ivopts_data
.step
))
1778 step
= ifs_ivopts_data
.step
;
1780 /* Check that the base expression is addressable. This needs
1781 to be done after substituting bases of IVs into it. */
1782 if (may_be_nonaddressable_p (base
))
1785 /* Moreover, on strict alignment platforms, check that it is
1786 sufficiently aligned. */
1787 if (STRICT_ALIGNMENT
&& may_be_unaligned_p (base
, step
))
1790 base
= build_fold_addr_expr (base
);
1792 /* Substituting bases of IVs into the base expression might
1793 have caused folding opportunities. */
1794 if (TREE_CODE (base
) == ADDR_EXPR
)
1796 tree
*ref
= &TREE_OPERAND (base
, 0);
1797 while (handled_component_p (*ref
))
1798 ref
= &TREE_OPERAND (*ref
, 0);
1799 if (TREE_CODE (*ref
) == MEM_REF
)
1801 tree tem
= fold_binary (MEM_REF
, TREE_TYPE (*ref
),
1802 TREE_OPERAND (*ref
, 0),
1803 TREE_OPERAND (*ref
, 1));
1810 civ
= alloc_iv (base
, step
);
1811 record_use (data
, op_p
, civ
, stmt
, USE_ADDRESS
);
1815 for_each_index (op_p
, idx_record_use
, data
);
1818 /* Finds and records invariants used in STMT. */
1821 find_invariants_stmt (struct ivopts_data
*data
, gimple stmt
)
1824 use_operand_p use_p
;
1827 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
1829 op
= USE_FROM_PTR (use_p
);
1830 record_invariant (data
, op
, false);
1834 /* Finds interesting uses of induction variables in the statement STMT. */
1837 find_interesting_uses_stmt (struct ivopts_data
*data
, gimple stmt
)
1840 tree op
, *lhs
, *rhs
;
1842 use_operand_p use_p
;
1843 enum tree_code code
;
1845 find_invariants_stmt (data
, stmt
);
1847 if (gimple_code (stmt
) == GIMPLE_COND
)
1849 find_interesting_uses_cond (data
, stmt
);
1853 if (is_gimple_assign (stmt
))
1855 lhs
= gimple_assign_lhs_ptr (stmt
);
1856 rhs
= gimple_assign_rhs1_ptr (stmt
);
1858 if (TREE_CODE (*lhs
) == SSA_NAME
)
1860 /* If the statement defines an induction variable, the uses are not
1861 interesting by themselves. */
1863 iv
= get_iv (data
, *lhs
);
1865 if (iv
&& !integer_zerop (iv
->step
))
1869 code
= gimple_assign_rhs_code (stmt
);
1870 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
1871 && (REFERENCE_CLASS_P (*rhs
)
1872 || is_gimple_val (*rhs
)))
1874 if (REFERENCE_CLASS_P (*rhs
))
1875 find_interesting_uses_address (data
, stmt
, rhs
);
1877 find_interesting_uses_op (data
, *rhs
);
1879 if (REFERENCE_CLASS_P (*lhs
))
1880 find_interesting_uses_address (data
, stmt
, lhs
);
1883 else if (TREE_CODE_CLASS (code
) == tcc_comparison
)
1885 find_interesting_uses_cond (data
, stmt
);
1889 /* TODO -- we should also handle address uses of type
1891 memory = call (whatever);
1898 if (gimple_code (stmt
) == GIMPLE_PHI
1899 && gimple_bb (stmt
) == data
->current_loop
->header
)
1901 iv
= get_iv (data
, PHI_RESULT (stmt
));
1903 if (iv
&& !integer_zerop (iv
->step
))
1907 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
1909 op
= USE_FROM_PTR (use_p
);
1911 if (TREE_CODE (op
) != SSA_NAME
)
1914 iv
= get_iv (data
, op
);
1918 find_interesting_uses_op (data
, op
);
1922 /* Finds interesting uses of induction variables outside of loops
1923 on loop exit edge EXIT. */
1926 find_interesting_uses_outside (struct ivopts_data
*data
, edge exit
)
1929 gimple_stmt_iterator psi
;
1932 for (psi
= gsi_start_phis (exit
->dest
); !gsi_end_p (psi
); gsi_next (&psi
))
1934 phi
= gsi_stmt (psi
);
1935 def
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
1936 if (is_gimple_reg (def
))
1937 find_interesting_uses_op (data
, def
);
1941 /* Finds uses of the induction variables that are interesting. */
1944 find_interesting_uses (struct ivopts_data
*data
)
1947 gimple_stmt_iterator bsi
;
1948 basic_block
*body
= get_loop_body (data
->current_loop
);
1950 struct version_info
*info
;
1953 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1954 fprintf (dump_file
, "Uses:\n\n");
1956 for (i
= 0; i
< data
->current_loop
->num_nodes
; i
++)
1961 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1962 if (e
->dest
!= EXIT_BLOCK_PTR
1963 && !flow_bb_inside_loop_p (data
->current_loop
, e
->dest
))
1964 find_interesting_uses_outside (data
, e
);
1966 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1967 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
1968 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1969 if (!is_gimple_debug (gsi_stmt (bsi
)))
1970 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
1973 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1977 fprintf (dump_file
, "\n");
1979 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1981 info
= ver_info (data
, i
);
1984 fprintf (dump_file
, " ");
1985 print_generic_expr (dump_file
, info
->name
, TDF_SLIM
);
1986 fprintf (dump_file
, " is invariant (%d)%s\n",
1987 info
->inv_id
, info
->has_nonlin_use
? "" : ", eliminable");
1991 fprintf (dump_file
, "\n");
1997 /* Strips constant offsets from EXPR and stores them to OFFSET. If INSIDE_ADDR
1998 is true, assume we are inside an address. If TOP_COMPREF is true, assume
1999 we are at the top-level of the processed address. */
2002 strip_offset_1 (tree expr
, bool inside_addr
, bool top_compref
,
2003 unsigned HOST_WIDE_INT
*offset
)
2005 tree op0
= NULL_TREE
, op1
= NULL_TREE
, tmp
, step
;
2006 enum tree_code code
;
2007 tree type
, orig_type
= TREE_TYPE (expr
);
2008 unsigned HOST_WIDE_INT off0
, off1
, st
;
2009 tree orig_expr
= expr
;
2013 type
= TREE_TYPE (expr
);
2014 code
= TREE_CODE (expr
);
2020 if (!cst_and_fits_in_hwi (expr
)
2021 || integer_zerop (expr
))
2024 *offset
= int_cst_value (expr
);
2025 return build_int_cst (orig_type
, 0);
2027 case POINTER_PLUS_EXPR
:
2030 op0
= TREE_OPERAND (expr
, 0);
2031 op1
= TREE_OPERAND (expr
, 1);
2033 op0
= strip_offset_1 (op0
, false, false, &off0
);
2034 op1
= strip_offset_1 (op1
, false, false, &off1
);
2036 *offset
= (code
== MINUS_EXPR
? off0
- off1
: off0
+ off1
);
2037 if (op0
== TREE_OPERAND (expr
, 0)
2038 && op1
== TREE_OPERAND (expr
, 1))
2041 if (integer_zerop (op1
))
2043 else if (integer_zerop (op0
))
2045 if (code
== MINUS_EXPR
)
2046 expr
= fold_build1 (NEGATE_EXPR
, type
, op1
);
2051 expr
= fold_build2 (code
, type
, op0
, op1
);
2053 return fold_convert (orig_type
, expr
);
2056 op1
= TREE_OPERAND (expr
, 1);
2057 if (!cst_and_fits_in_hwi (op1
))
2060 op0
= TREE_OPERAND (expr
, 0);
2061 op0
= strip_offset_1 (op0
, false, false, &off0
);
2062 if (op0
== TREE_OPERAND (expr
, 0))
2065 *offset
= off0
* int_cst_value (op1
);
2066 if (integer_zerop (op0
))
2069 expr
= fold_build2 (MULT_EXPR
, type
, op0
, op1
);
2071 return fold_convert (orig_type
, expr
);
2074 case ARRAY_RANGE_REF
:
2078 step
= array_ref_element_size (expr
);
2079 if (!cst_and_fits_in_hwi (step
))
2082 st
= int_cst_value (step
);
2083 op1
= TREE_OPERAND (expr
, 1);
2084 op1
= strip_offset_1 (op1
, false, false, &off1
);
2085 *offset
= off1
* st
;
2088 && integer_zerop (op1
))
2090 /* Strip the component reference completely. */
2091 op0
= TREE_OPERAND (expr
, 0);
2092 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2102 tmp
= component_ref_field_offset (expr
);
2104 && cst_and_fits_in_hwi (tmp
))
2106 /* Strip the component reference completely. */
2107 op0
= TREE_OPERAND (expr
, 0);
2108 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2109 *offset
= off0
+ int_cst_value (tmp
);
2115 op0
= TREE_OPERAND (expr
, 0);
2116 op0
= strip_offset_1 (op0
, true, true, &off0
);
2119 if (op0
== TREE_OPERAND (expr
, 0))
2122 expr
= build_fold_addr_expr (op0
);
2123 return fold_convert (orig_type
, expr
);
2126 /* ??? Offset operand? */
2127 inside_addr
= false;
2134 /* Default handling of expressions for that we want to recurse into
2135 the first operand. */
2136 op0
= TREE_OPERAND (expr
, 0);
2137 op0
= strip_offset_1 (op0
, inside_addr
, false, &off0
);
2140 if (op0
== TREE_OPERAND (expr
, 0)
2141 && (!op1
|| op1
== TREE_OPERAND (expr
, 1)))
2144 expr
= copy_node (expr
);
2145 TREE_OPERAND (expr
, 0) = op0
;
2147 TREE_OPERAND (expr
, 1) = op1
;
2149 /* Inside address, we might strip the top level component references,
2150 thus changing type of the expression. Handling of ADDR_EXPR
2152 expr
= fold_convert (orig_type
, expr
);
2157 /* Strips constant offsets from EXPR and stores them to OFFSET. */
2160 strip_offset (tree expr
, unsigned HOST_WIDE_INT
*offset
)
2162 return strip_offset_1 (expr
, false, false, offset
);
2165 /* Returns variant of TYPE that can be used as base for different uses.
2166 We return unsigned type with the same precision, which avoids problems
2170 generic_type_for (tree type
)
2172 if (POINTER_TYPE_P (type
))
2173 return unsigned_type_for (type
);
2175 if (TYPE_UNSIGNED (type
))
2178 return unsigned_type_for (type
);
2181 /* Records invariants in *EXPR_P. Callback for walk_tree. DATA contains
2182 the bitmap to that we should store it. */
2184 static struct ivopts_data
*fd_ivopts_data
;
2186 find_depends (tree
*expr_p
, int *ws ATTRIBUTE_UNUSED
, void *data
)
2188 bitmap
*depends_on
= (bitmap
*) data
;
2189 struct version_info
*info
;
2191 if (TREE_CODE (*expr_p
) != SSA_NAME
)
2193 info
= name_info (fd_ivopts_data
, *expr_p
);
2195 if (!info
->inv_id
|| info
->has_nonlin_use
)
2199 *depends_on
= BITMAP_ALLOC (NULL
);
2200 bitmap_set_bit (*depends_on
, info
->inv_id
);
2205 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2206 position to POS. If USE is not NULL, the candidate is set as related to
2207 it. If both BASE and STEP are NULL, we add a pseudocandidate for the
2208 replacement of the final value of the iv by a direct computation. */
2210 static struct iv_cand
*
2211 add_candidate_1 (struct ivopts_data
*data
,
2212 tree base
, tree step
, bool important
, enum iv_position pos
,
2213 struct iv_use
*use
, gimple incremented_at
)
2216 struct iv_cand
*cand
= NULL
;
2217 tree type
, orig_type
;
2221 orig_type
= TREE_TYPE (base
);
2222 type
= generic_type_for (orig_type
);
2223 if (type
!= orig_type
)
2225 base
= fold_convert (type
, base
);
2226 step
= fold_convert (type
, step
);
2230 for (i
= 0; i
< n_iv_cands (data
); i
++)
2232 cand
= iv_cand (data
, i
);
2234 if (cand
->pos
!= pos
)
2237 if (cand
->incremented_at
!= incremented_at
2238 || ((pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
2239 && cand
->ainc_use
!= use
))
2253 if (operand_equal_p (base
, cand
->iv
->base
, 0)
2254 && operand_equal_p (step
, cand
->iv
->step
, 0)
2255 && (TYPE_PRECISION (TREE_TYPE (base
))
2256 == TYPE_PRECISION (TREE_TYPE (cand
->iv
->base
))))
2260 if (i
== n_iv_cands (data
))
2262 cand
= XCNEW (struct iv_cand
);
2268 cand
->iv
= alloc_iv (base
, step
);
2271 if (pos
!= IP_ORIGINAL
&& cand
->iv
)
2273 cand
->var_before
= create_tmp_var_raw (TREE_TYPE (base
), "ivtmp");
2274 cand
->var_after
= cand
->var_before
;
2276 cand
->important
= important
;
2277 cand
->incremented_at
= incremented_at
;
2278 VEC_safe_push (iv_cand_p
, heap
, data
->iv_candidates
, cand
);
2281 && TREE_CODE (step
) != INTEGER_CST
)
2283 fd_ivopts_data
= data
;
2284 walk_tree (&step
, find_depends
, &cand
->depends_on
, NULL
);
2287 if (pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
2288 cand
->ainc_use
= use
;
2290 cand
->ainc_use
= NULL
;
2292 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2293 dump_cand (dump_file
, cand
);
2296 if (important
&& !cand
->important
)
2298 cand
->important
= true;
2299 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2300 fprintf (dump_file
, "Candidate %d is important\n", cand
->id
);
2305 bitmap_set_bit (use
->related_cands
, i
);
2306 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2307 fprintf (dump_file
, "Candidate %d is related to use %d\n",
2314 /* Returns true if incrementing the induction variable at the end of the LOOP
2317 The purpose is to avoid splitting latch edge with a biv increment, thus
2318 creating a jump, possibly confusing other optimization passes and leaving
2319 less freedom to scheduler. So we allow IP_END_POS only if IP_NORMAL_POS
2320 is not available (so we do not have a better alternative), or if the latch
2321 edge is already nonempty. */
2324 allow_ip_end_pos_p (struct loop
*loop
)
2326 if (!ip_normal_pos (loop
))
2329 if (!empty_block_p (ip_end_pos (loop
)))
2335 /* If possible, adds autoincrement candidates BASE + STEP * i based on use USE.
2336 Important field is set to IMPORTANT. */
2339 add_autoinc_candidates (struct ivopts_data
*data
, tree base
, tree step
,
2340 bool important
, struct iv_use
*use
)
2342 basic_block use_bb
= gimple_bb (use
->stmt
);
2343 enum machine_mode mem_mode
;
2344 unsigned HOST_WIDE_INT cstepi
;
2346 /* If we insert the increment in any position other than the standard
2347 ones, we must ensure that it is incremented once per iteration.
2348 It must not be in an inner nested loop, or one side of an if
2350 if (use_bb
->loop_father
!= data
->current_loop
2351 || !dominated_by_p (CDI_DOMINATORS
, data
->current_loop
->latch
, use_bb
)
2352 || stmt_could_throw_p (use
->stmt
)
2353 || !cst_and_fits_in_hwi (step
))
2356 cstepi
= int_cst_value (step
);
2358 mem_mode
= TYPE_MODE (TREE_TYPE (*use
->op_p
));
2359 if ((HAVE_PRE_INCREMENT
&& GET_MODE_SIZE (mem_mode
) == cstepi
)
2360 || (HAVE_PRE_DECREMENT
&& GET_MODE_SIZE (mem_mode
) == -cstepi
))
2362 enum tree_code code
= MINUS_EXPR
;
2364 tree new_step
= step
;
2366 if (POINTER_TYPE_P (TREE_TYPE (base
)))
2368 new_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (step
), step
);
2369 code
= POINTER_PLUS_EXPR
;
2372 new_step
= fold_convert (TREE_TYPE (base
), new_step
);
2373 new_base
= fold_build2 (code
, TREE_TYPE (base
), base
, new_step
);
2374 add_candidate_1 (data
, new_base
, step
, important
, IP_BEFORE_USE
, use
,
2377 if ((HAVE_POST_INCREMENT
&& GET_MODE_SIZE (mem_mode
) == cstepi
)
2378 || (HAVE_POST_DECREMENT
&& GET_MODE_SIZE (mem_mode
) == -cstepi
))
2380 add_candidate_1 (data
, base
, step
, important
, IP_AFTER_USE
, use
,
2385 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2386 position to POS. If USE is not NULL, the candidate is set as related to
2387 it. The candidate computation is scheduled on all available positions. */
2390 add_candidate (struct ivopts_data
*data
,
2391 tree base
, tree step
, bool important
, struct iv_use
*use
)
2393 if (ip_normal_pos (data
->current_loop
))
2394 add_candidate_1 (data
, base
, step
, important
, IP_NORMAL
, use
, NULL
);
2395 if (ip_end_pos (data
->current_loop
)
2396 && allow_ip_end_pos_p (data
->current_loop
))
2397 add_candidate_1 (data
, base
, step
, important
, IP_END
, use
, NULL
);
2399 if (use
!= NULL
&& use
->type
== USE_ADDRESS
)
2400 add_autoinc_candidates (data
, base
, step
, important
, use
);
2403 /* Add a standard "0 + 1 * iteration" iv candidate for a
2404 type with SIZE bits. */
2407 add_standard_iv_candidates_for_size (struct ivopts_data
*data
,
2410 tree type
= lang_hooks
.types
.type_for_size (size
, true);
2411 add_candidate (data
, build_int_cst (type
, 0), build_int_cst (type
, 1),
2415 /* Adds standard iv candidates. */
2418 add_standard_iv_candidates (struct ivopts_data
*data
)
2420 add_standard_iv_candidates_for_size (data
, INT_TYPE_SIZE
);
2422 /* The same for a double-integer type if it is still fast enough. */
2423 if (BITS_PER_WORD
>= INT_TYPE_SIZE
* 2)
2424 add_standard_iv_candidates_for_size (data
, INT_TYPE_SIZE
* 2);
2428 /* Adds candidates bases on the old induction variable IV. */
2431 add_old_iv_candidates (struct ivopts_data
*data
, struct iv
*iv
)
2435 struct iv_cand
*cand
;
2437 add_candidate (data
, iv
->base
, iv
->step
, true, NULL
);
2439 /* The same, but with initial value zero. */
2440 if (POINTER_TYPE_P (TREE_TYPE (iv
->base
)))
2441 add_candidate (data
, size_int (0), iv
->step
, true, NULL
);
2443 add_candidate (data
, build_int_cst (TREE_TYPE (iv
->base
), 0),
2444 iv
->step
, true, NULL
);
2446 phi
= SSA_NAME_DEF_STMT (iv
->ssa_name
);
2447 if (gimple_code (phi
) == GIMPLE_PHI
)
2449 /* Additionally record the possibility of leaving the original iv
2451 def
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (data
->current_loop
));
2452 cand
= add_candidate_1 (data
,
2453 iv
->base
, iv
->step
, true, IP_ORIGINAL
, NULL
,
2454 SSA_NAME_DEF_STMT (def
));
2455 cand
->var_before
= iv
->ssa_name
;
2456 cand
->var_after
= def
;
2460 /* Adds candidates based on the old induction variables. */
2463 add_old_ivs_candidates (struct ivopts_data
*data
)
2469 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
2471 iv
= ver_info (data
, i
)->iv
;
2472 if (iv
&& iv
->biv_p
&& !integer_zerop (iv
->step
))
2473 add_old_iv_candidates (data
, iv
);
2477 /* Adds candidates based on the value of the induction variable IV and USE. */
2480 add_iv_value_candidates (struct ivopts_data
*data
,
2481 struct iv
*iv
, struct iv_use
*use
)
2483 unsigned HOST_WIDE_INT offset
;
2487 add_candidate (data
, iv
->base
, iv
->step
, false, use
);
2489 /* The same, but with initial value zero. Make such variable important,
2490 since it is generic enough so that possibly many uses may be based
2492 basetype
= TREE_TYPE (iv
->base
);
2493 if (POINTER_TYPE_P (basetype
))
2494 basetype
= sizetype
;
2495 add_candidate (data
, build_int_cst (basetype
, 0),
2496 iv
->step
, true, use
);
2498 /* Third, try removing the constant offset. Make sure to even
2499 add a candidate for &a[0] vs. (T *)&a. */
2500 base
= strip_offset (iv
->base
, &offset
);
2502 || base
!= iv
->base
)
2503 add_candidate (data
, base
, iv
->step
, false, use
);
2506 /* Adds candidates based on the uses. */
2509 add_derived_ivs_candidates (struct ivopts_data
*data
)
2513 for (i
= 0; i
< n_iv_uses (data
); i
++)
2515 struct iv_use
*use
= iv_use (data
, i
);
2522 case USE_NONLINEAR_EXPR
:
2525 /* Just add the ivs based on the value of the iv used here. */
2526 add_iv_value_candidates (data
, use
->iv
, use
);
2535 /* Record important candidates and add them to related_cands bitmaps
2539 record_important_candidates (struct ivopts_data
*data
)
2544 for (i
= 0; i
< n_iv_cands (data
); i
++)
2546 struct iv_cand
*cand
= iv_cand (data
, i
);
2548 if (cand
->important
)
2549 bitmap_set_bit (data
->important_candidates
, i
);
2552 data
->consider_all_candidates
= (n_iv_cands (data
)
2553 <= CONSIDER_ALL_CANDIDATES_BOUND
);
2555 if (data
->consider_all_candidates
)
2557 /* We will not need "related_cands" bitmaps in this case,
2558 so release them to decrease peak memory consumption. */
2559 for (i
= 0; i
< n_iv_uses (data
); i
++)
2561 use
= iv_use (data
, i
);
2562 BITMAP_FREE (use
->related_cands
);
2567 /* Add important candidates to the related_cands bitmaps. */
2568 for (i
= 0; i
< n_iv_uses (data
); i
++)
2569 bitmap_ior_into (iv_use (data
, i
)->related_cands
,
2570 data
->important_candidates
);
2574 /* Allocates the data structure mapping the (use, candidate) pairs to costs.
2575 If consider_all_candidates is true, we use a two-dimensional array, otherwise
2576 we allocate a simple list to every use. */
2579 alloc_use_cost_map (struct ivopts_data
*data
)
2581 unsigned i
, size
, s
, j
;
2583 for (i
= 0; i
< n_iv_uses (data
); i
++)
2585 struct iv_use
*use
= iv_use (data
, i
);
2588 if (data
->consider_all_candidates
)
2589 size
= n_iv_cands (data
);
2593 EXECUTE_IF_SET_IN_BITMAP (use
->related_cands
, 0, j
, bi
)
2598 /* Round up to the power of two, so that moduling by it is fast. */
2599 for (size
= 1; size
< s
; size
<<= 1)
2603 use
->n_map_members
= size
;
2604 use
->cost_map
= XCNEWVEC (struct cost_pair
, size
);
2608 /* Returns description of computation cost of expression whose runtime
2609 cost is RUNTIME and complexity corresponds to COMPLEXITY. */
2612 new_cost (unsigned runtime
, unsigned complexity
)
2616 cost
.cost
= runtime
;
2617 cost
.complexity
= complexity
;
2622 /* Adds costs COST1 and COST2. */
2625 add_costs (comp_cost cost1
, comp_cost cost2
)
2627 cost1
.cost
+= cost2
.cost
;
2628 cost1
.complexity
+= cost2
.complexity
;
2632 /* Subtracts costs COST1 and COST2. */
2635 sub_costs (comp_cost cost1
, comp_cost cost2
)
2637 cost1
.cost
-= cost2
.cost
;
2638 cost1
.complexity
-= cost2
.complexity
;
2643 /* Returns a negative number if COST1 < COST2, a positive number if
2644 COST1 > COST2, and 0 if COST1 = COST2. */
2647 compare_costs (comp_cost cost1
, comp_cost cost2
)
2649 if (cost1
.cost
== cost2
.cost
)
2650 return cost1
.complexity
- cost2
.complexity
;
2652 return cost1
.cost
- cost2
.cost
;
2655 /* Returns true if COST is infinite. */
2658 infinite_cost_p (comp_cost cost
)
2660 return cost
.cost
== INFTY
;
2663 /* Sets cost of (USE, CANDIDATE) pair to COST and record that it depends
2664 on invariants DEPENDS_ON and that the value used in expressing it
2668 set_use_iv_cost (struct ivopts_data
*data
,
2669 struct iv_use
*use
, struct iv_cand
*cand
,
2670 comp_cost cost
, bitmap depends_on
, tree value
,
2675 if (infinite_cost_p (cost
))
2677 BITMAP_FREE (depends_on
);
2681 if (data
->consider_all_candidates
)
2683 use
->cost_map
[cand
->id
].cand
= cand
;
2684 use
->cost_map
[cand
->id
].cost
= cost
;
2685 use
->cost_map
[cand
->id
].depends_on
= depends_on
;
2686 use
->cost_map
[cand
->id
].value
= value
;
2687 use
->cost_map
[cand
->id
].inv_expr_id
= inv_expr_id
;
2691 /* n_map_members is a power of two, so this computes modulo. */
2692 s
= cand
->id
& (use
->n_map_members
- 1);
2693 for (i
= s
; i
< use
->n_map_members
; i
++)
2694 if (!use
->cost_map
[i
].cand
)
2696 for (i
= 0; i
< s
; i
++)
2697 if (!use
->cost_map
[i
].cand
)
2703 use
->cost_map
[i
].cand
= cand
;
2704 use
->cost_map
[i
].cost
= cost
;
2705 use
->cost_map
[i
].depends_on
= depends_on
;
2706 use
->cost_map
[i
].value
= value
;
2707 use
->cost_map
[i
].inv_expr_id
= inv_expr_id
;
2710 /* Gets cost of (USE, CANDIDATE) pair. */
2712 static struct cost_pair
*
2713 get_use_iv_cost (struct ivopts_data
*data
, struct iv_use
*use
,
2714 struct iv_cand
*cand
)
2717 struct cost_pair
*ret
;
2722 if (data
->consider_all_candidates
)
2724 ret
= use
->cost_map
+ cand
->id
;
2731 /* n_map_members is a power of two, so this computes modulo. */
2732 s
= cand
->id
& (use
->n_map_members
- 1);
2733 for (i
= s
; i
< use
->n_map_members
; i
++)
2734 if (use
->cost_map
[i
].cand
== cand
)
2735 return use
->cost_map
+ i
;
2737 for (i
= 0; i
< s
; i
++)
2738 if (use
->cost_map
[i
].cand
== cand
)
2739 return use
->cost_map
+ i
;
2744 /* Returns estimate on cost of computing SEQ. */
2747 seq_cost (rtx seq
, bool speed
)
2752 for (; seq
; seq
= NEXT_INSN (seq
))
2754 set
= single_set (seq
);
2756 cost
+= rtx_cost (SET_SRC (set
), SET
, speed
);
2764 /* Produce DECL_RTL for object obj so it looks like it is stored in memory. */
2766 produce_memory_decl_rtl (tree obj
, int *regno
)
2768 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (obj
));
2769 enum machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
2773 if (TREE_STATIC (obj
) || DECL_EXTERNAL (obj
))
2775 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (obj
));
2776 x
= gen_rtx_SYMBOL_REF (address_mode
, name
);
2777 SET_SYMBOL_REF_DECL (x
, obj
);
2778 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
2779 set_mem_addr_space (x
, as
);
2780 targetm
.encode_section_info (obj
, x
, true);
2784 x
= gen_raw_REG (address_mode
, (*regno
)++);
2785 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
2786 set_mem_addr_space (x
, as
);
2792 /* Prepares decl_rtl for variables referred in *EXPR_P. Callback for
2793 walk_tree. DATA contains the actual fake register number. */
2796 prepare_decl_rtl (tree
*expr_p
, int *ws
, void *data
)
2798 tree obj
= NULL_TREE
;
2800 int *regno
= (int *) data
;
2802 switch (TREE_CODE (*expr_p
))
2805 for (expr_p
= &TREE_OPERAND (*expr_p
, 0);
2806 handled_component_p (*expr_p
);
2807 expr_p
= &TREE_OPERAND (*expr_p
, 0))
2810 if (DECL_P (obj
) && !DECL_RTL_SET_P (obj
))
2811 x
= produce_memory_decl_rtl (obj
, regno
);
2816 obj
= SSA_NAME_VAR (*expr_p
);
2817 if (!DECL_RTL_SET_P (obj
))
2818 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
2827 if (DECL_RTL_SET_P (obj
))
2830 if (DECL_MODE (obj
) == BLKmode
)
2831 x
= produce_memory_decl_rtl (obj
, regno
);
2833 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
2843 VEC_safe_push (tree
, heap
, decl_rtl_to_reset
, obj
);
2844 SET_DECL_RTL (obj
, x
);
2850 /* Determines cost of the computation of EXPR. */
2853 computation_cost (tree expr
, bool speed
)
2856 tree type
= TREE_TYPE (expr
);
2858 /* Avoid using hard regs in ways which may be unsupported. */
2859 int regno
= LAST_VIRTUAL_REGISTER
+ 1;
2860 struct cgraph_node
*node
= cgraph_get_node (current_function_decl
);
2861 enum node_frequency real_frequency
= node
->frequency
;
2863 node
->frequency
= NODE_FREQUENCY_NORMAL
;
2864 crtl
->maybe_hot_insn_p
= speed
;
2865 walk_tree (&expr
, prepare_decl_rtl
, ®no
, NULL
);
2867 rslt
= expand_expr (expr
, NULL_RTX
, TYPE_MODE (type
), EXPAND_NORMAL
);
2870 default_rtl_profile ();
2871 node
->frequency
= real_frequency
;
2873 cost
= seq_cost (seq
, speed
);
2875 cost
+= address_cost (XEXP (rslt
, 0), TYPE_MODE (type
),
2876 TYPE_ADDR_SPACE (type
), speed
);
2877 else if (!REG_P (rslt
))
2878 cost
+= rtx_cost (rslt
, SET
, speed
);
2883 /* Returns variable containing the value of candidate CAND at statement AT. */
2886 var_at_stmt (struct loop
*loop
, struct iv_cand
*cand
, gimple stmt
)
2888 if (stmt_after_increment (loop
, cand
, stmt
))
2889 return cand
->var_after
;
2891 return cand
->var_before
;
2894 /* Return the most significant (sign) bit of T. Similar to tree_int_cst_msb,
2895 but the bit is determined from TYPE_PRECISION, not MODE_BITSIZE. */
2898 tree_int_cst_sign_bit (const_tree t
)
2900 unsigned bitno
= TYPE_PRECISION (TREE_TYPE (t
)) - 1;
2901 unsigned HOST_WIDE_INT w
;
2903 if (bitno
< HOST_BITS_PER_WIDE_INT
)
2904 w
= TREE_INT_CST_LOW (t
);
2907 w
= TREE_INT_CST_HIGH (t
);
2908 bitno
-= HOST_BITS_PER_WIDE_INT
;
2911 return (w
>> bitno
) & 1;
2914 /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
2915 same precision that is at least as wide as the precision of TYPE, stores
2916 BA to A and BB to B, and returns the type of BA. Otherwise, returns the
2920 determine_common_wider_type (tree
*a
, tree
*b
)
2922 tree wider_type
= NULL
;
2924 tree atype
= TREE_TYPE (*a
);
2926 if (CONVERT_EXPR_P (*a
))
2928 suba
= TREE_OPERAND (*a
, 0);
2929 wider_type
= TREE_TYPE (suba
);
2930 if (TYPE_PRECISION (wider_type
) < TYPE_PRECISION (atype
))
2936 if (CONVERT_EXPR_P (*b
))
2938 subb
= TREE_OPERAND (*b
, 0);
2939 if (TYPE_PRECISION (wider_type
) != TYPE_PRECISION (TREE_TYPE (subb
)))
2950 /* Determines the expression by that USE is expressed from induction variable
2951 CAND at statement AT in LOOP. The expression is stored in a decomposed
2952 form into AFF. Returns false if USE cannot be expressed using CAND. */
2955 get_computation_aff (struct loop
*loop
,
2956 struct iv_use
*use
, struct iv_cand
*cand
, gimple at
,
2957 struct affine_tree_combination
*aff
)
2959 tree ubase
= use
->iv
->base
;
2960 tree ustep
= use
->iv
->step
;
2961 tree cbase
= cand
->iv
->base
;
2962 tree cstep
= cand
->iv
->step
, cstep_common
;
2963 tree utype
= TREE_TYPE (ubase
), ctype
= TREE_TYPE (cbase
);
2964 tree common_type
, var
;
2966 aff_tree cbase_aff
, var_aff
;
2969 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
2971 /* We do not have a precision to express the values of use. */
2975 var
= var_at_stmt (loop
, cand
, at
);
2976 uutype
= unsigned_type_for (utype
);
2978 /* If the conversion is not noop, perform it. */
2979 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
2981 cstep
= fold_convert (uutype
, cstep
);
2982 cbase
= fold_convert (uutype
, cbase
);
2983 var
= fold_convert (uutype
, var
);
2986 if (!constant_multiple_of (ustep
, cstep
, &rat
))
2989 /* In case both UBASE and CBASE are shortened to UUTYPE from some common
2990 type, we achieve better folding by computing their difference in this
2991 wider type, and cast the result to UUTYPE. We do not need to worry about
2992 overflows, as all the arithmetics will in the end be performed in UUTYPE
2994 common_type
= determine_common_wider_type (&ubase
, &cbase
);
2996 /* use = ubase - ratio * cbase + ratio * var. */
2997 tree_to_aff_combination (ubase
, common_type
, aff
);
2998 tree_to_aff_combination (cbase
, common_type
, &cbase_aff
);
2999 tree_to_aff_combination (var
, uutype
, &var_aff
);
3001 /* We need to shift the value if we are after the increment. */
3002 if (stmt_after_increment (loop
, cand
, at
))
3006 if (common_type
!= uutype
)
3007 cstep_common
= fold_convert (common_type
, cstep
);
3009 cstep_common
= cstep
;
3011 tree_to_aff_combination (cstep_common
, common_type
, &cstep_aff
);
3012 aff_combination_add (&cbase_aff
, &cstep_aff
);
3015 aff_combination_scale (&cbase_aff
, double_int_neg (rat
));
3016 aff_combination_add (aff
, &cbase_aff
);
3017 if (common_type
!= uutype
)
3018 aff_combination_convert (aff
, uutype
);
3020 aff_combination_scale (&var_aff
, rat
);
3021 aff_combination_add (aff
, &var_aff
);
3026 /* Determines the expression by that USE is expressed from induction variable
3027 CAND at statement AT in LOOP. The computation is unshared. */
3030 get_computation_at (struct loop
*loop
,
3031 struct iv_use
*use
, struct iv_cand
*cand
, gimple at
)
3034 tree type
= TREE_TYPE (use
->iv
->base
);
3036 if (!get_computation_aff (loop
, use
, cand
, at
, &aff
))
3038 unshare_aff_combination (&aff
);
3039 return fold_convert (type
, aff_combination_to_tree (&aff
));
3042 /* Determines the expression by that USE is expressed from induction variable
3043 CAND in LOOP. The computation is unshared. */
3046 get_computation (struct loop
*loop
, struct iv_use
*use
, struct iv_cand
*cand
)
3048 return get_computation_at (loop
, use
, cand
, use
->stmt
);
3051 /* Adjust the cost COST for being in loop setup rather than loop body.
3052 If we're optimizing for space, the loop setup overhead is constant;
3053 if we're optimizing for speed, amortize it over the per-iteration cost. */
3055 adjust_setup_cost (struct ivopts_data
*data
, unsigned cost
)
3059 else if (optimize_loop_for_speed_p (data
->current_loop
))
3060 return cost
/ avg_loop_niter (data
->current_loop
);
3065 /* Returns cost of addition in MODE. */
3068 add_cost (enum machine_mode mode
, bool speed
)
3070 static unsigned costs
[NUM_MACHINE_MODES
];
3078 force_operand (gen_rtx_fmt_ee (PLUS
, mode
,
3079 gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1),
3080 gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 2)),
3085 cost
= seq_cost (seq
, speed
);
3091 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3092 fprintf (dump_file
, "Addition in %s costs %d\n",
3093 GET_MODE_NAME (mode
), cost
);
3097 /* Entry in a hashtable of already known costs for multiplication. */
3100 HOST_WIDE_INT cst
; /* The constant to multiply by. */
3101 enum machine_mode mode
; /* In mode. */
3102 unsigned cost
; /* The cost. */
3105 /* Counts hash value for the ENTRY. */
3108 mbc_entry_hash (const void *entry
)
3110 const struct mbc_entry
*e
= (const struct mbc_entry
*) entry
;
3112 return 57 * (hashval_t
) e
->mode
+ (hashval_t
) (e
->cst
% 877);
3115 /* Compares the hash table entries ENTRY1 and ENTRY2. */
3118 mbc_entry_eq (const void *entry1
, const void *entry2
)
3120 const struct mbc_entry
*e1
= (const struct mbc_entry
*) entry1
;
3121 const struct mbc_entry
*e2
= (const struct mbc_entry
*) entry2
;
3123 return (e1
->mode
== e2
->mode
3124 && e1
->cst
== e2
->cst
);
3127 /* Returns cost of multiplication by constant CST in MODE. */
3130 multiply_by_cost (HOST_WIDE_INT cst
, enum machine_mode mode
, bool speed
)
3132 static htab_t costs
;
3133 struct mbc_entry
**cached
, act
;
3138 costs
= htab_create (100, mbc_entry_hash
, mbc_entry_eq
, free
);
3142 cached
= (struct mbc_entry
**) htab_find_slot (costs
, &act
, INSERT
);
3144 return (*cached
)->cost
;
3146 *cached
= XNEW (struct mbc_entry
);
3147 (*cached
)->mode
= mode
;
3148 (*cached
)->cst
= cst
;
3151 expand_mult (mode
, gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1),
3152 gen_int_mode (cst
, mode
), NULL_RTX
, 0);
3156 cost
= seq_cost (seq
, speed
);
3158 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3159 fprintf (dump_file
, "Multiplication by %d in %s costs %d\n",
3160 (int) cst
, GET_MODE_NAME (mode
), cost
);
3162 (*cached
)->cost
= cost
;
3167 /* Returns true if multiplying by RATIO is allowed in an address. Test the
3168 validity for a memory reference accessing memory of mode MODE in
3169 address space AS. */
3171 DEF_VEC_P (sbitmap
);
3172 DEF_VEC_ALLOC_P (sbitmap
, heap
);
3175 multiplier_allowed_in_address_p (HOST_WIDE_INT ratio
, enum machine_mode mode
,
3178 #define MAX_RATIO 128
3179 unsigned int data_index
= (int) as
* MAX_MACHINE_MODE
+ (int) mode
;
3180 static VEC (sbitmap
, heap
) *valid_mult_list
;
3183 if (data_index
>= VEC_length (sbitmap
, valid_mult_list
))
3184 VEC_safe_grow_cleared (sbitmap
, heap
, valid_mult_list
, data_index
+ 1);
3186 valid_mult
= VEC_index (sbitmap
, valid_mult_list
, data_index
);
3189 enum machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3190 rtx reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3194 valid_mult
= sbitmap_alloc (2 * MAX_RATIO
+ 1);
3195 sbitmap_zero (valid_mult
);
3196 addr
= gen_rtx_fmt_ee (MULT
, address_mode
, reg1
, NULL_RTX
);
3197 for (i
= -MAX_RATIO
; i
<= MAX_RATIO
; i
++)
3199 XEXP (addr
, 1) = gen_int_mode (i
, address_mode
);
3200 if (memory_address_addr_space_p (mode
, addr
, as
))
3201 SET_BIT (valid_mult
, i
+ MAX_RATIO
);
3204 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3206 fprintf (dump_file
, " allowed multipliers:");
3207 for (i
= -MAX_RATIO
; i
<= MAX_RATIO
; i
++)
3208 if (TEST_BIT (valid_mult
, i
+ MAX_RATIO
))
3209 fprintf (dump_file
, " %d", (int) i
);
3210 fprintf (dump_file
, "\n");
3211 fprintf (dump_file
, "\n");
3214 VEC_replace (sbitmap
, valid_mult_list
, data_index
, valid_mult
);
3217 if (ratio
> MAX_RATIO
|| ratio
< -MAX_RATIO
)
3220 return TEST_BIT (valid_mult
, ratio
+ MAX_RATIO
);
3223 /* Returns cost of address in shape symbol + var + OFFSET + RATIO * index.
3224 If SYMBOL_PRESENT is false, symbol is omitted. If VAR_PRESENT is false,
3225 variable is omitted. Compute the cost for a memory reference that accesses
3226 a memory location of mode MEM_MODE in address space AS.
3228 MAY_AUTOINC is set to true if the autoincrement (increasing index by
3229 size of MEM_MODE / RATIO) is available. To make this determination, we
3230 look at the size of the increment to be made, which is given in CSTEP.
3231 CSTEP may be zero if the step is unknown.
3232 STMT_AFTER_INC is true iff the statement we're looking at is after the
3233 increment of the original biv.
3235 TODO -- there must be some better way. This all is quite crude. */
3239 HOST_WIDE_INT min_offset
, max_offset
;
3240 unsigned costs
[2][2][2][2];
3241 } *address_cost_data
;
3243 DEF_VEC_P (address_cost_data
);
3244 DEF_VEC_ALLOC_P (address_cost_data
, heap
);
3247 get_address_cost (bool symbol_present
, bool var_present
,
3248 unsigned HOST_WIDE_INT offset
, HOST_WIDE_INT ratio
,
3249 HOST_WIDE_INT cstep
, enum machine_mode mem_mode
,
3250 addr_space_t as
, bool speed
,
3251 bool stmt_after_inc
, bool *may_autoinc
)
3253 enum machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3254 static VEC(address_cost_data
, heap
) *address_cost_data_list
;
3255 unsigned int data_index
= (int) as
* MAX_MACHINE_MODE
+ (int) mem_mode
;
3256 address_cost_data data
;
3257 static bool has_preinc
[MAX_MACHINE_MODE
], has_postinc
[MAX_MACHINE_MODE
];
3258 static bool has_predec
[MAX_MACHINE_MODE
], has_postdec
[MAX_MACHINE_MODE
];
3259 unsigned cost
, acost
, complexity
;
3260 bool offset_p
, ratio_p
, autoinc
;
3261 HOST_WIDE_INT s_offset
, autoinc_offset
, msize
;
3262 unsigned HOST_WIDE_INT mask
;
3265 if (data_index
>= VEC_length (address_cost_data
, address_cost_data_list
))
3266 VEC_safe_grow_cleared (address_cost_data
, heap
, address_cost_data_list
,
3269 data
= VEC_index (address_cost_data
, address_cost_data_list
, data_index
);
3273 HOST_WIDE_INT rat
, off
= 0;
3274 int old_cse_not_expected
, width
;
3275 unsigned sym_p
, var_p
, off_p
, rat_p
, add_c
;
3276 rtx seq
, addr
, base
;
3279 data
= (address_cost_data
) xcalloc (1, sizeof (*data
));
3281 reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3283 width
= GET_MODE_BITSIZE (address_mode
) - 1;
3284 if (width
> (HOST_BITS_PER_WIDE_INT
- 1))
3285 width
= HOST_BITS_PER_WIDE_INT
- 1;
3286 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, reg1
, NULL_RTX
);
3288 for (i
= width
; i
>= 0; i
--)
3290 off
= -((HOST_WIDE_INT
) 1 << i
);
3291 XEXP (addr
, 1) = gen_int_mode (off
, address_mode
);
3292 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
3295 data
->min_offset
= (i
== -1? 0 : off
);
3297 for (i
= width
; i
>= 0; i
--)
3299 off
= ((HOST_WIDE_INT
) 1 << i
) - 1;
3300 XEXP (addr
, 1) = gen_int_mode (off
, address_mode
);
3301 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
3306 data
->max_offset
= off
;
3308 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3310 fprintf (dump_file
, "get_address_cost:\n");
3311 fprintf (dump_file
, " min offset %s " HOST_WIDE_INT_PRINT_DEC
"\n",
3312 GET_MODE_NAME (mem_mode
),
3314 fprintf (dump_file
, " max offset %s " HOST_WIDE_INT_PRINT_DEC
"\n",
3315 GET_MODE_NAME (mem_mode
),
3320 for (i
= 2; i
<= MAX_RATIO
; i
++)
3321 if (multiplier_allowed_in_address_p (i
, mem_mode
, as
))
3327 /* Compute the cost of various addressing modes. */
3329 reg0
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3330 reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 2);
3332 if (HAVE_PRE_DECREMENT
)
3334 addr
= gen_rtx_PRE_DEC (address_mode
, reg0
);
3335 has_predec
[mem_mode
]
3336 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3338 if (HAVE_POST_DECREMENT
)
3340 addr
= gen_rtx_POST_DEC (address_mode
, reg0
);
3341 has_postdec
[mem_mode
]
3342 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3344 if (HAVE_PRE_INCREMENT
)
3346 addr
= gen_rtx_PRE_INC (address_mode
, reg0
);
3347 has_preinc
[mem_mode
]
3348 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3350 if (HAVE_POST_INCREMENT
)
3352 addr
= gen_rtx_POST_INC (address_mode
, reg0
);
3353 has_postinc
[mem_mode
]
3354 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3356 for (i
= 0; i
< 16; i
++)
3359 var_p
= (i
>> 1) & 1;
3360 off_p
= (i
>> 2) & 1;
3361 rat_p
= (i
>> 3) & 1;
3365 addr
= gen_rtx_fmt_ee (MULT
, address_mode
, addr
,
3366 gen_int_mode (rat
, address_mode
));
3369 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, addr
, reg1
);
3373 base
= gen_rtx_SYMBOL_REF (address_mode
, ggc_strdup (""));
3374 /* ??? We can run into trouble with some backends by presenting
3375 it with symbols which haven't been properly passed through
3376 targetm.encode_section_info. By setting the local bit, we
3377 enhance the probability of things working. */
3378 SYMBOL_REF_FLAGS (base
) = SYMBOL_FLAG_LOCAL
;
3381 base
= gen_rtx_fmt_e (CONST
, address_mode
,
3383 (PLUS
, address_mode
, base
,
3384 gen_int_mode (off
, address_mode
)));
3387 base
= gen_int_mode (off
, address_mode
);
3392 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, addr
, base
);
3395 /* To avoid splitting addressing modes, pretend that no cse will
3397 old_cse_not_expected
= cse_not_expected
;
3398 cse_not_expected
= true;
3399 addr
= memory_address_addr_space (mem_mode
, addr
, as
);
3400 cse_not_expected
= old_cse_not_expected
;
3404 acost
= seq_cost (seq
, speed
);
3405 acost
+= address_cost (addr
, mem_mode
, as
, speed
);
3409 data
->costs
[sym_p
][var_p
][off_p
][rat_p
] = acost
;
3412 /* On some targets, it is quite expensive to load symbol to a register,
3413 which makes addresses that contain symbols look much more expensive.
3414 However, the symbol will have to be loaded in any case before the
3415 loop (and quite likely we have it in register already), so it does not
3416 make much sense to penalize them too heavily. So make some final
3417 tweaks for the SYMBOL_PRESENT modes:
3419 If VAR_PRESENT is false, and the mode obtained by changing symbol to
3420 var is cheaper, use this mode with small penalty.
3421 If VAR_PRESENT is true, try whether the mode with
3422 SYMBOL_PRESENT = false is cheaper even with cost of addition, and
3423 if this is the case, use it. */
3424 add_c
= add_cost (address_mode
, speed
);
3425 for (i
= 0; i
< 8; i
++)
3428 off_p
= (i
>> 1) & 1;
3429 rat_p
= (i
>> 2) & 1;
3431 acost
= data
->costs
[0][1][off_p
][rat_p
] + 1;
3435 if (acost
< data
->costs
[1][var_p
][off_p
][rat_p
])
3436 data
->costs
[1][var_p
][off_p
][rat_p
] = acost
;
3439 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3441 fprintf (dump_file
, "Address costs:\n");
3443 for (i
= 0; i
< 16; i
++)
3446 var_p
= (i
>> 1) & 1;
3447 off_p
= (i
>> 2) & 1;
3448 rat_p
= (i
>> 3) & 1;
3450 fprintf (dump_file
, " ");
3452 fprintf (dump_file
, "sym + ");
3454 fprintf (dump_file
, "var + ");
3456 fprintf (dump_file
, "cst + ");
3458 fprintf (dump_file
, "rat * ");
3460 acost
= data
->costs
[sym_p
][var_p
][off_p
][rat_p
];
3461 fprintf (dump_file
, "index costs %d\n", acost
);
3463 if (has_predec
[mem_mode
] || has_postdec
[mem_mode
]
3464 || has_preinc
[mem_mode
] || has_postinc
[mem_mode
])
3465 fprintf (dump_file
, " May include autoinc/dec\n");
3466 fprintf (dump_file
, "\n");
3469 VEC_replace (address_cost_data
, address_cost_data_list
,
3473 bits
= GET_MODE_BITSIZE (address_mode
);
3474 mask
= ~(~(unsigned HOST_WIDE_INT
) 0 << (bits
- 1) << 1);
3476 if ((offset
>> (bits
- 1) & 1))
3481 msize
= GET_MODE_SIZE (mem_mode
);
3482 autoinc_offset
= offset
;
3484 autoinc_offset
+= ratio
* cstep
;
3485 if (symbol_present
|| var_present
|| ratio
!= 1)
3487 else if ((has_postinc
[mem_mode
] && autoinc_offset
== 0
3489 || (has_postdec
[mem_mode
] && autoinc_offset
== 0
3491 || (has_preinc
[mem_mode
] && autoinc_offset
== msize
3493 || (has_predec
[mem_mode
] && autoinc_offset
== -msize
3494 && msize
== -cstep
))
3498 offset_p
= (s_offset
!= 0
3499 && data
->min_offset
<= s_offset
3500 && s_offset
<= data
->max_offset
);
3501 ratio_p
= (ratio
!= 1
3502 && multiplier_allowed_in_address_p (ratio
, mem_mode
, as
));
3504 if (ratio
!= 1 && !ratio_p
)
3505 cost
+= multiply_by_cost (ratio
, address_mode
, speed
);
3507 if (s_offset
&& !offset_p
&& !symbol_present
)
3508 cost
+= add_cost (address_mode
, speed
);
3511 *may_autoinc
= autoinc
;
3512 acost
= data
->costs
[symbol_present
][var_present
][offset_p
][ratio_p
];
3513 complexity
= (symbol_present
!= 0) + (var_present
!= 0) + offset_p
+ ratio_p
;
3514 return new_cost (cost
+ acost
, complexity
);
3517 /* Calculate the SPEED or size cost of shiftadd EXPR in MODE. MULT is the
3518 the EXPR operand holding the shift. COST0 and COST1 are the costs for
3519 calculating the operands of EXPR. Returns true if successful, and returns
3520 the cost in COST. */
3523 get_shiftadd_cost (tree expr
, enum machine_mode mode
, comp_cost cost0
,
3524 comp_cost cost1
, tree mult
, bool speed
, comp_cost
*cost
)
3527 tree op1
= TREE_OPERAND (expr
, 1);
3528 tree cst
= TREE_OPERAND (mult
, 1);
3529 tree multop
= TREE_OPERAND (mult
, 0);
3530 int m
= exact_log2 (int_cst_value (cst
));
3531 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
3534 if (!(m
>= 0 && m
< maxm
))
3537 sa_cost
= (TREE_CODE (expr
) != MINUS_EXPR
3538 ? shiftadd_cost
[speed
][mode
][m
]
3540 ? shiftsub1_cost
[speed
][mode
][m
]
3541 : shiftsub0_cost
[speed
][mode
][m
]));
3542 res
= new_cost (sa_cost
, 0);
3543 res
= add_costs (res
, mult
== op1
? cost0
: cost1
);
3545 STRIP_NOPS (multop
);
3546 if (!is_gimple_val (multop
))
3547 res
= add_costs (res
, force_expr_to_var_cost (multop
, speed
));
3553 /* Estimates cost of forcing expression EXPR into a variable. */
3556 force_expr_to_var_cost (tree expr
, bool speed
)
3558 static bool costs_initialized
= false;
3559 static unsigned integer_cost
[2];
3560 static unsigned symbol_cost
[2];
3561 static unsigned address_cost
[2];
3563 comp_cost cost0
, cost1
, cost
;
3564 enum machine_mode mode
;
3566 if (!costs_initialized
)
3568 tree type
= build_pointer_type (integer_type_node
);
3573 var
= create_tmp_var_raw (integer_type_node
, "test_var");
3574 TREE_STATIC (var
) = 1;
3575 x
= produce_memory_decl_rtl (var
, NULL
);
3576 SET_DECL_RTL (var
, x
);
3578 addr
= build1 (ADDR_EXPR
, type
, var
);
3581 for (i
= 0; i
< 2; i
++)
3583 integer_cost
[i
] = computation_cost (build_int_cst (integer_type_node
,
3586 symbol_cost
[i
] = computation_cost (addr
, i
) + 1;
3589 = computation_cost (build2 (POINTER_PLUS_EXPR
, type
,
3591 build_int_cst (sizetype
, 2000)), i
) + 1;
3592 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3594 fprintf (dump_file
, "force_expr_to_var_cost %s costs:\n", i
? "speed" : "size");
3595 fprintf (dump_file
, " integer %d\n", (int) integer_cost
[i
]);
3596 fprintf (dump_file
, " symbol %d\n", (int) symbol_cost
[i
]);
3597 fprintf (dump_file
, " address %d\n", (int) address_cost
[i
]);
3598 fprintf (dump_file
, " other %d\n", (int) target_spill_cost
[i
]);
3599 fprintf (dump_file
, "\n");
3603 costs_initialized
= true;
3608 if (SSA_VAR_P (expr
))
3611 if (is_gimple_min_invariant (expr
))
3613 if (TREE_CODE (expr
) == INTEGER_CST
)
3614 return new_cost (integer_cost
[speed
], 0);
3616 if (TREE_CODE (expr
) == ADDR_EXPR
)
3618 tree obj
= TREE_OPERAND (expr
, 0);
3620 if (TREE_CODE (obj
) == VAR_DECL
3621 || TREE_CODE (obj
) == PARM_DECL
3622 || TREE_CODE (obj
) == RESULT_DECL
)
3623 return new_cost (symbol_cost
[speed
], 0);
3626 return new_cost (address_cost
[speed
], 0);
3629 switch (TREE_CODE (expr
))
3631 case POINTER_PLUS_EXPR
:
3635 op0
= TREE_OPERAND (expr
, 0);
3636 op1
= TREE_OPERAND (expr
, 1);
3640 if (is_gimple_val (op0
))
3643 cost0
= force_expr_to_var_cost (op0
, speed
);
3645 if (is_gimple_val (op1
))
3648 cost1
= force_expr_to_var_cost (op1
, speed
);
3653 op0
= TREE_OPERAND (expr
, 0);
3657 if (is_gimple_val (op0
))
3660 cost0
= force_expr_to_var_cost (op0
, speed
);
3666 /* Just an arbitrary value, FIXME. */
3667 return new_cost (target_spill_cost
[speed
], 0);
3670 mode
= TYPE_MODE (TREE_TYPE (expr
));
3671 switch (TREE_CODE (expr
))
3673 case POINTER_PLUS_EXPR
:
3677 cost
= new_cost (add_cost (mode
, speed
), 0);
3678 if (TREE_CODE (expr
) != NEGATE_EXPR
)
3680 tree mult
= NULL_TREE
;
3682 if (TREE_CODE (op1
) == MULT_EXPR
)
3684 else if (TREE_CODE (op0
) == MULT_EXPR
)
3687 if (mult
!= NULL_TREE
3688 && cst_and_fits_in_hwi (TREE_OPERAND (mult
, 1))
3689 && get_shiftadd_cost (expr
, mode
, cost0
, cost1
, mult
, speed
,
3696 if (cst_and_fits_in_hwi (op0
))
3697 cost
= new_cost (multiply_by_cost (int_cst_value (op0
), mode
, speed
), 0);
3698 else if (cst_and_fits_in_hwi (op1
))
3699 cost
= new_cost (multiply_by_cost (int_cst_value (op1
), mode
, speed
), 0);
3701 return new_cost (target_spill_cost
[speed
], 0);
3708 cost
= add_costs (cost
, cost0
);
3709 cost
= add_costs (cost
, cost1
);
3711 /* Bound the cost by target_spill_cost. The parts of complicated
3712 computations often are either loop invariant or at least can
3713 be shared between several iv uses, so letting this grow without
3714 limits would not give reasonable results. */
3715 if (cost
.cost
> (int) target_spill_cost
[speed
])
3716 cost
.cost
= target_spill_cost
[speed
];
3721 /* Estimates cost of forcing EXPR into a variable. DEPENDS_ON is a set of the
3722 invariants the computation depends on. */
3725 force_var_cost (struct ivopts_data
*data
,
3726 tree expr
, bitmap
*depends_on
)
3730 fd_ivopts_data
= data
;
3731 walk_tree (&expr
, find_depends
, depends_on
, NULL
);
3734 return force_expr_to_var_cost (expr
, data
->speed
);
3737 /* Estimates cost of expressing address ADDR as var + symbol + offset. The
3738 value of offset is added to OFFSET, SYMBOL_PRESENT and VAR_PRESENT are set
3739 to false if the corresponding part is missing. DEPENDS_ON is a set of the
3740 invariants the computation depends on. */
3743 split_address_cost (struct ivopts_data
*data
,
3744 tree addr
, bool *symbol_present
, bool *var_present
,
3745 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3748 HOST_WIDE_INT bitsize
;
3749 HOST_WIDE_INT bitpos
;
3751 enum machine_mode mode
;
3752 int unsignedp
, volatilep
;
3754 core
= get_inner_reference (addr
, &bitsize
, &bitpos
, &toffset
, &mode
,
3755 &unsignedp
, &volatilep
, false);
3758 || bitpos
% BITS_PER_UNIT
!= 0
3759 || TREE_CODE (core
) != VAR_DECL
)
3761 *symbol_present
= false;
3762 *var_present
= true;
3763 fd_ivopts_data
= data
;
3764 walk_tree (&addr
, find_depends
, depends_on
, NULL
);
3765 return new_cost (target_spill_cost
[data
->speed
], 0);
3768 *offset
+= bitpos
/ BITS_PER_UNIT
;
3769 if (TREE_STATIC (core
)
3770 || DECL_EXTERNAL (core
))
3772 *symbol_present
= true;
3773 *var_present
= false;
3777 *symbol_present
= false;
3778 *var_present
= true;
3782 /* Estimates cost of expressing difference of addresses E1 - E2 as
3783 var + symbol + offset. The value of offset is added to OFFSET,
3784 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
3785 part is missing. DEPENDS_ON is a set of the invariants the computation
3789 ptr_difference_cost (struct ivopts_data
*data
,
3790 tree e1
, tree e2
, bool *symbol_present
, bool *var_present
,
3791 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3793 HOST_WIDE_INT diff
= 0;
3794 aff_tree aff_e1
, aff_e2
;
3797 gcc_assert (TREE_CODE (e1
) == ADDR_EXPR
);
3799 if (ptr_difference_const (e1
, e2
, &diff
))
3802 *symbol_present
= false;
3803 *var_present
= false;
3807 if (integer_zerop (e2
))
3808 return split_address_cost (data
, TREE_OPERAND (e1
, 0),
3809 symbol_present
, var_present
, offset
, depends_on
);
3811 *symbol_present
= false;
3812 *var_present
= true;
3814 type
= signed_type_for (TREE_TYPE (e1
));
3815 tree_to_aff_combination (e1
, type
, &aff_e1
);
3816 tree_to_aff_combination (e2
, type
, &aff_e2
);
3817 aff_combination_scale (&aff_e2
, double_int_minus_one
);
3818 aff_combination_add (&aff_e1
, &aff_e2
);
3820 return force_var_cost (data
, aff_combination_to_tree (&aff_e1
), depends_on
);
3823 /* Estimates cost of expressing difference E1 - E2 as
3824 var + symbol + offset. The value of offset is added to OFFSET,
3825 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
3826 part is missing. DEPENDS_ON is a set of the invariants the computation
3830 difference_cost (struct ivopts_data
*data
,
3831 tree e1
, tree e2
, bool *symbol_present
, bool *var_present
,
3832 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3834 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (e1
));
3835 unsigned HOST_WIDE_INT off1
, off2
;
3836 aff_tree aff_e1
, aff_e2
;
3839 e1
= strip_offset (e1
, &off1
);
3840 e2
= strip_offset (e2
, &off2
);
3841 *offset
+= off1
- off2
;
3846 if (TREE_CODE (e1
) == ADDR_EXPR
)
3847 return ptr_difference_cost (data
, e1
, e2
, symbol_present
, var_present
,
3848 offset
, depends_on
);
3849 *symbol_present
= false;
3851 if (operand_equal_p (e1
, e2
, 0))
3853 *var_present
= false;
3857 *var_present
= true;
3859 if (integer_zerop (e2
))
3860 return force_var_cost (data
, e1
, depends_on
);
3862 if (integer_zerop (e1
))
3864 comp_cost cost
= force_var_cost (data
, e2
, depends_on
);
3865 cost
.cost
+= multiply_by_cost (-1, mode
, data
->speed
);
3869 type
= signed_type_for (TREE_TYPE (e1
));
3870 tree_to_aff_combination (e1
, type
, &aff_e1
);
3871 tree_to_aff_combination (e2
, type
, &aff_e2
);
3872 aff_combination_scale (&aff_e2
, double_int_minus_one
);
3873 aff_combination_add (&aff_e1
, &aff_e2
);
3875 return force_var_cost (data
, aff_combination_to_tree (&aff_e1
), depends_on
);
3878 /* Returns true if AFF1 and AFF2 are identical. */
3881 compare_aff_trees (aff_tree
*aff1
, aff_tree
*aff2
)
3885 if (aff1
->n
!= aff2
->n
)
3888 for (i
= 0; i
< aff1
->n
; i
++)
3890 if (double_int_cmp (aff1
->elts
[i
].coef
, aff2
->elts
[i
].coef
, 0) != 0)
3893 if (!operand_equal_p (aff1
->elts
[i
].val
, aff2
->elts
[i
].val
, 0))
3899 /* Stores EXPR in DATA->inv_expr_tab, and assigns it an inv_expr_id. */
3902 get_expr_id (struct ivopts_data
*data
, tree expr
)
3904 struct iv_inv_expr_ent ent
;
3905 struct iv_inv_expr_ent
**slot
;
3908 ent
.hash
= iterative_hash_expr (expr
, 0);
3909 slot
= (struct iv_inv_expr_ent
**) htab_find_slot (data
->inv_expr_tab
,
3914 *slot
= XNEW (struct iv_inv_expr_ent
);
3915 (*slot
)->expr
= expr
;
3916 (*slot
)->hash
= ent
.hash
;
3917 (*slot
)->id
= data
->inv_expr_id
++;
3921 /* Returns the pseudo expr id if expression UBASE - RATIO * CBASE
3922 requires a new compiler generated temporary. Returns -1 otherwise.
3923 ADDRESS_P is a flag indicating if the expression is for address
3927 get_loop_invariant_expr_id (struct ivopts_data
*data
, tree ubase
,
3928 tree cbase
, HOST_WIDE_INT ratio
,
3931 aff_tree ubase_aff
, cbase_aff
;
3939 if ((TREE_CODE (ubase
) == INTEGER_CST
)
3940 && (TREE_CODE (cbase
) == INTEGER_CST
))
3943 /* Strips the constant part. */
3944 if (TREE_CODE (ubase
) == PLUS_EXPR
3945 || TREE_CODE (ubase
) == MINUS_EXPR
3946 || TREE_CODE (ubase
) == POINTER_PLUS_EXPR
)
3948 if (TREE_CODE (TREE_OPERAND (ubase
, 1)) == INTEGER_CST
)
3949 ubase
= TREE_OPERAND (ubase
, 0);
3952 /* Strips the constant part. */
3953 if (TREE_CODE (cbase
) == PLUS_EXPR
3954 || TREE_CODE (cbase
) == MINUS_EXPR
3955 || TREE_CODE (cbase
) == POINTER_PLUS_EXPR
)
3957 if (TREE_CODE (TREE_OPERAND (cbase
, 1)) == INTEGER_CST
)
3958 cbase
= TREE_OPERAND (cbase
, 0);
3963 if (((TREE_CODE (ubase
) == SSA_NAME
)
3964 || (TREE_CODE (ubase
) == ADDR_EXPR
3965 && is_gimple_min_invariant (ubase
)))
3966 && (TREE_CODE (cbase
) == INTEGER_CST
))
3969 if (((TREE_CODE (cbase
) == SSA_NAME
)
3970 || (TREE_CODE (cbase
) == ADDR_EXPR
3971 && is_gimple_min_invariant (cbase
)))
3972 && (TREE_CODE (ubase
) == INTEGER_CST
))
3978 if(operand_equal_p (ubase
, cbase
, 0))
3981 if (TREE_CODE (ubase
) == ADDR_EXPR
3982 && TREE_CODE (cbase
) == ADDR_EXPR
)
3986 usym
= TREE_OPERAND (ubase
, 0);
3987 csym
= TREE_OPERAND (cbase
, 0);
3988 if (TREE_CODE (usym
) == ARRAY_REF
)
3990 tree ind
= TREE_OPERAND (usym
, 1);
3991 if (TREE_CODE (ind
) == INTEGER_CST
3992 && host_integerp (ind
, 0)
3993 && TREE_INT_CST_LOW (ind
) == 0)
3994 usym
= TREE_OPERAND (usym
, 0);
3996 if (TREE_CODE (csym
) == ARRAY_REF
)
3998 tree ind
= TREE_OPERAND (csym
, 1);
3999 if (TREE_CODE (ind
) == INTEGER_CST
4000 && host_integerp (ind
, 0)
4001 && TREE_INT_CST_LOW (ind
) == 0)
4002 csym
= TREE_OPERAND (csym
, 0);
4004 if (operand_equal_p (usym
, csym
, 0))
4007 /* Now do more complex comparison */
4008 tree_to_aff_combination (ubase
, TREE_TYPE (ubase
), &ubase_aff
);
4009 tree_to_aff_combination (cbase
, TREE_TYPE (cbase
), &cbase_aff
);
4010 if (compare_aff_trees (&ubase_aff
, &cbase_aff
))
4014 tree_to_aff_combination (ub
, TREE_TYPE (ub
), &ubase_aff
);
4015 tree_to_aff_combination (cb
, TREE_TYPE (cb
), &cbase_aff
);
4017 aff_combination_scale (&cbase_aff
, shwi_to_double_int (-1 * ratio
));
4018 aff_combination_add (&ubase_aff
, &cbase_aff
);
4019 expr
= aff_combination_to_tree (&ubase_aff
);
4020 return get_expr_id (data
, expr
);
4025 /* Determines the cost of the computation by that USE is expressed
4026 from induction variable CAND. If ADDRESS_P is true, we just need
4027 to create an address from it, otherwise we want to get it into
4028 register. A set of invariants we depend on is stored in
4029 DEPENDS_ON. AT is the statement at that the value is computed.
4030 If CAN_AUTOINC is nonnull, use it to record whether autoinc
4031 addressing is likely. */
4034 get_computation_cost_at (struct ivopts_data
*data
,
4035 struct iv_use
*use
, struct iv_cand
*cand
,
4036 bool address_p
, bitmap
*depends_on
, gimple at
,
4040 tree ubase
= use
->iv
->base
, ustep
= use
->iv
->step
;
4042 tree utype
= TREE_TYPE (ubase
), ctype
;
4043 unsigned HOST_WIDE_INT cstepi
, offset
= 0;
4044 HOST_WIDE_INT ratio
, aratio
;
4045 bool var_present
, symbol_present
, stmt_is_after_inc
;
4048 bool speed
= optimize_bb_for_speed_p (gimple_bb (at
));
4052 /* Only consider real candidates. */
4054 return infinite_cost
;
4056 cbase
= cand
->iv
->base
;
4057 cstep
= cand
->iv
->step
;
4058 ctype
= TREE_TYPE (cbase
);
4060 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
4062 /* We do not have a precision to express the values of use. */
4063 return infinite_cost
;
4068 /* Do not try to express address of an object with computation based
4069 on address of a different object. This may cause problems in rtl
4070 level alias analysis (that does not expect this to be happening,
4071 as this is illegal in C), and would be unlikely to be useful
4073 if (use
->iv
->base_object
4074 && cand
->iv
->base_object
4075 && !operand_equal_p (use
->iv
->base_object
, cand
->iv
->base_object
, 0))
4076 return infinite_cost
;
4079 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
4081 /* TODO -- add direct handling of this case. */
4085 /* CSTEPI is removed from the offset in case statement is after the
4086 increment. If the step is not constant, we use zero instead.
4087 This is a bit imprecise (there is the extra addition), but
4088 redundancy elimination is likely to transform the code so that
4089 it uses value of the variable before increment anyway,
4090 so it is not that much unrealistic. */
4091 if (cst_and_fits_in_hwi (cstep
))
4092 cstepi
= int_cst_value (cstep
);
4096 if (!constant_multiple_of (ustep
, cstep
, &rat
))
4097 return infinite_cost
;
4099 if (double_int_fits_in_shwi_p (rat
))
4100 ratio
= double_int_to_shwi (rat
);
4102 return infinite_cost
;
4105 ctype
= TREE_TYPE (cbase
);
4107 stmt_is_after_inc
= stmt_after_increment (data
->current_loop
, cand
, at
);
4109 /* use = ubase + ratio * (var - cbase). If either cbase is a constant
4110 or ratio == 1, it is better to handle this like
4112 ubase - ratio * cbase + ratio * var
4114 (also holds in the case ratio == -1, TODO. */
4116 if (cst_and_fits_in_hwi (cbase
))
4118 offset
= - ratio
* int_cst_value (cbase
);
4119 cost
= difference_cost (data
,
4120 ubase
, build_int_cst (utype
, 0),
4121 &symbol_present
, &var_present
, &offset
,
4123 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4125 else if (ratio
== 1)
4127 tree real_cbase
= cbase
;
4129 /* Check to see if any adjustment is needed. */
4130 if (cstepi
== 0 && stmt_is_after_inc
)
4132 aff_tree real_cbase_aff
;
4135 tree_to_aff_combination (cbase
, TREE_TYPE (real_cbase
),
4137 tree_to_aff_combination (cstep
, TREE_TYPE (cstep
), &cstep_aff
);
4139 aff_combination_add (&real_cbase_aff
, &cstep_aff
);
4140 real_cbase
= aff_combination_to_tree (&real_cbase_aff
);
4143 cost
= difference_cost (data
,
4145 &symbol_present
, &var_present
, &offset
,
4147 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4150 && !POINTER_TYPE_P (ctype
)
4151 && multiplier_allowed_in_address_p
4152 (ratio
, TYPE_MODE (TREE_TYPE (utype
)),
4153 TYPE_ADDR_SPACE (TREE_TYPE (utype
))))
4156 = fold_build2 (MULT_EXPR
, ctype
, cbase
, build_int_cst (ctype
, ratio
));
4157 cost
= difference_cost (data
,
4159 &symbol_present
, &var_present
, &offset
,
4161 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4165 cost
= force_var_cost (data
, cbase
, depends_on
);
4166 cost
= add_costs (cost
,
4167 difference_cost (data
,
4168 ubase
, build_int_cst (utype
, 0),
4169 &symbol_present
, &var_present
,
4170 &offset
, depends_on
));
4171 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4172 cost
.cost
+= add_cost (TYPE_MODE (ctype
), data
->speed
);
4178 get_loop_invariant_expr_id (data
, ubase
, cbase
, ratio
, address_p
);
4179 /* Clear depends on. */
4180 if (*inv_expr_id
!= -1 && depends_on
&& *depends_on
)
4181 bitmap_clear (*depends_on
);
4184 /* If we are after the increment, the value of the candidate is higher by
4186 if (stmt_is_after_inc
)
4187 offset
-= ratio
* cstepi
;
4189 /* Now the computation is in shape symbol + var1 + const + ratio * var2.
4190 (symbol/var1/const parts may be omitted). If we are looking for an
4191 address, find the cost of addressing this. */
4193 return add_costs (cost
,
4194 get_address_cost (symbol_present
, var_present
,
4195 offset
, ratio
, cstepi
,
4196 TYPE_MODE (TREE_TYPE (utype
)),
4197 TYPE_ADDR_SPACE (TREE_TYPE (utype
)),
4198 speed
, stmt_is_after_inc
,
4201 /* Otherwise estimate the costs for computing the expression. */
4202 if (!symbol_present
&& !var_present
&& !offset
)
4205 cost
.cost
+= multiply_by_cost (ratio
, TYPE_MODE (ctype
), speed
);
4209 /* Symbol + offset should be compile-time computable so consider that they
4210 are added once to the variable, if present. */
4211 if (var_present
&& (symbol_present
|| offset
))
4212 cost
.cost
+= adjust_setup_cost (data
,
4213 add_cost (TYPE_MODE (ctype
), speed
));
4215 /* Having offset does not affect runtime cost in case it is added to
4216 symbol, but it increases complexity. */
4220 cost
.cost
+= add_cost (TYPE_MODE (ctype
), speed
);
4222 aratio
= ratio
> 0 ? ratio
: -ratio
;
4224 cost
.cost
+= multiply_by_cost (aratio
, TYPE_MODE (ctype
), speed
);
4229 *can_autoinc
= false;
4232 /* Just get the expression, expand it and measure the cost. */
4233 tree comp
= get_computation_at (data
->current_loop
, use
, cand
, at
);
4236 return infinite_cost
;
4239 comp
= build_simple_mem_ref (comp
);
4241 return new_cost (computation_cost (comp
, speed
), 0);
4245 /* Determines the cost of the computation by that USE is expressed
4246 from induction variable CAND. If ADDRESS_P is true, we just need
4247 to create an address from it, otherwise we want to get it into
4248 register. A set of invariants we depend on is stored in
4249 DEPENDS_ON. If CAN_AUTOINC is nonnull, use it to record whether
4250 autoinc addressing is likely. */
4253 get_computation_cost (struct ivopts_data
*data
,
4254 struct iv_use
*use
, struct iv_cand
*cand
,
4255 bool address_p
, bitmap
*depends_on
,
4256 bool *can_autoinc
, int *inv_expr_id
)
4258 return get_computation_cost_at (data
,
4259 use
, cand
, address_p
, depends_on
, use
->stmt
,
4260 can_autoinc
, inv_expr_id
);
4263 /* Determines cost of basing replacement of USE on CAND in a generic
4267 determine_use_iv_cost_generic (struct ivopts_data
*data
,
4268 struct iv_use
*use
, struct iv_cand
*cand
)
4272 int inv_expr_id
= -1;
4274 /* The simple case first -- if we need to express value of the preserved
4275 original biv, the cost is 0. This also prevents us from counting the
4276 cost of increment twice -- once at this use and once in the cost of
4278 if (cand
->pos
== IP_ORIGINAL
4279 && cand
->incremented_at
== use
->stmt
)
4281 set_use_iv_cost (data
, use
, cand
, zero_cost
, NULL
, NULL_TREE
, -1);
4285 cost
= get_computation_cost (data
, use
, cand
, false, &depends_on
,
4286 NULL
, &inv_expr_id
);
4288 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, NULL_TREE
,
4291 return !infinite_cost_p (cost
);
4294 /* Determines cost of basing replacement of USE on CAND in an address. */
4297 determine_use_iv_cost_address (struct ivopts_data
*data
,
4298 struct iv_use
*use
, struct iv_cand
*cand
)
4302 int inv_expr_id
= -1;
4303 comp_cost cost
= get_computation_cost (data
, use
, cand
, true, &depends_on
,
4304 &can_autoinc
, &inv_expr_id
);
4306 if (cand
->ainc_use
== use
)
4309 cost
.cost
-= cand
->cost_step
;
4310 /* If we generated the candidate solely for exploiting autoincrement
4311 opportunities, and it turns out it can't be used, set the cost to
4312 infinity to make sure we ignore it. */
4313 else if (cand
->pos
== IP_AFTER_USE
|| cand
->pos
== IP_BEFORE_USE
)
4314 cost
= infinite_cost
;
4316 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, NULL_TREE
,
4319 return !infinite_cost_p (cost
);
4322 /* Computes value of candidate CAND at position AT in iteration NITER, and
4323 stores it to VAL. */
4326 cand_value_at (struct loop
*loop
, struct iv_cand
*cand
, gimple at
, tree niter
,
4329 aff_tree step
, delta
, nit
;
4330 struct iv
*iv
= cand
->iv
;
4331 tree type
= TREE_TYPE (iv
->base
);
4332 tree steptype
= type
;
4333 if (POINTER_TYPE_P (type
))
4334 steptype
= sizetype
;
4336 tree_to_aff_combination (iv
->step
, steptype
, &step
);
4337 tree_to_aff_combination (niter
, TREE_TYPE (niter
), &nit
);
4338 aff_combination_convert (&nit
, steptype
);
4339 aff_combination_mult (&nit
, &step
, &delta
);
4340 if (stmt_after_increment (loop
, cand
, at
))
4341 aff_combination_add (&delta
, &step
);
4343 tree_to_aff_combination (iv
->base
, type
, val
);
4344 aff_combination_add (val
, &delta
);
4347 /* Returns period of induction variable iv. */
4350 iv_period (struct iv
*iv
)
4352 tree step
= iv
->step
, period
, type
;
4355 gcc_assert (step
&& TREE_CODE (step
) == INTEGER_CST
);
4357 type
= unsigned_type_for (TREE_TYPE (step
));
4358 /* Period of the iv is lcm (step, type_range)/step -1,
4359 i.e., N*type_range/step - 1. Since type range is power
4360 of two, N == (step >> num_of_ending_zeros_binary (step),
4361 so the final result is
4363 (type_range >> num_of_ending_zeros_binary (step)) - 1
4366 pow2div
= num_ending_zeros (step
);
4368 period
= build_low_bits_mask (type
,
4369 (TYPE_PRECISION (type
)
4370 - tree_low_cst (pow2div
, 1)));
4375 /* Returns the comparison operator used when eliminating the iv USE. */
4377 static enum tree_code
4378 iv_elimination_compare (struct ivopts_data
*data
, struct iv_use
*use
)
4380 struct loop
*loop
= data
->current_loop
;
4384 ex_bb
= gimple_bb (use
->stmt
);
4385 exit
= EDGE_SUCC (ex_bb
, 0);
4386 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4387 exit
= EDGE_SUCC (ex_bb
, 1);
4389 return (exit
->flags
& EDGE_TRUE_VALUE
? EQ_EXPR
: NE_EXPR
);
4392 /* Check whether it is possible to express the condition in USE by comparison
4393 of candidate CAND. If so, store the value compared with to BOUND. */
4396 may_eliminate_iv (struct ivopts_data
*data
,
4397 struct iv_use
*use
, struct iv_cand
*cand
, tree
*bound
)
4402 struct loop
*loop
= data
->current_loop
;
4404 struct tree_niter_desc
*desc
= NULL
;
4406 if (TREE_CODE (cand
->iv
->step
) != INTEGER_CST
)
4409 /* For now works only for exits that dominate the loop latch.
4410 TODO: extend to other conditions inside loop body. */
4411 ex_bb
= gimple_bb (use
->stmt
);
4412 if (use
->stmt
!= last_stmt (ex_bb
)
4413 || gimple_code (use
->stmt
) != GIMPLE_COND
4414 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, ex_bb
))
4417 exit
= EDGE_SUCC (ex_bb
, 0);
4418 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4419 exit
= EDGE_SUCC (ex_bb
, 1);
4420 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4423 nit
= niter_for_exit (data
, exit
, &desc
);
4427 /* Determine whether we can use the variable to test the exit condition.
4428 This is the case iff the period of the induction variable is greater
4429 than the number of iterations for which the exit condition is true. */
4430 period
= iv_period (cand
->iv
);
4432 /* If the number of iterations is constant, compare against it directly. */
4433 if (TREE_CODE (nit
) == INTEGER_CST
)
4435 /* See cand_value_at. */
4436 if (stmt_after_increment (loop
, cand
, use
->stmt
))
4438 if (!tree_int_cst_lt (nit
, period
))
4443 if (tree_int_cst_lt (period
, nit
))
4448 /* If not, and if this is the only possible exit of the loop, see whether
4449 we can get a conservative estimate on the number of iterations of the
4450 entire loop and compare against that instead. */
4453 double_int period_value
, max_niter
;
4455 max_niter
= desc
->max
;
4456 if (stmt_after_increment (loop
, cand
, use
->stmt
))
4457 max_niter
= double_int_add (max_niter
, double_int_one
);
4458 period_value
= tree_to_double_int (period
);
4459 if (double_int_ucmp (max_niter
, period_value
) > 0)
4461 /* See if we can take advantage of infered loop bound information. */
4462 if (loop_only_exit_p (loop
, exit
))
4464 if (!estimated_loop_iterations (loop
, true, &max_niter
))
4466 /* The loop bound is already adjusted by adding 1. */
4467 if (double_int_ucmp (max_niter
, period_value
) > 0)
4475 cand_value_at (loop
, cand
, use
->stmt
, nit
, &bnd
);
4477 *bound
= aff_combination_to_tree (&bnd
);
4478 /* It is unlikely that computing the number of iterations using division
4479 would be more profitable than keeping the original induction variable. */
4480 if (expression_expensive_p (*bound
))
4485 /* Calculates the cost of BOUND, if it is a PARM_DECL. A PARM_DECL must
4486 be copied, if is is used in the loop body and DATA->body_includes_call. */
4489 parm_decl_cost (struct ivopts_data
*data
, tree bound
)
4491 tree sbound
= bound
;
4492 STRIP_NOPS (sbound
);
4494 if (TREE_CODE (sbound
) == SSA_NAME
4495 && TREE_CODE (SSA_NAME_VAR (sbound
)) == PARM_DECL
4496 && gimple_nop_p (SSA_NAME_DEF_STMT (sbound
))
4497 && data
->body_includes_call
)
4498 return COSTS_N_INSNS (1);
4503 /* Determines cost of basing replacement of USE on CAND in a condition. */
4506 determine_use_iv_cost_condition (struct ivopts_data
*data
,
4507 struct iv_use
*use
, struct iv_cand
*cand
)
4509 tree bound
= NULL_TREE
;
4511 bitmap depends_on_elim
= NULL
, depends_on_express
= NULL
, depends_on
;
4512 comp_cost elim_cost
, express_cost
, cost
, bound_cost
;
4514 int elim_inv_expr_id
= -1, express_inv_expr_id
= -1, inv_expr_id
;
4515 tree
*control_var
, *bound_cst
;
4517 /* Only consider real candidates. */
4520 set_use_iv_cost (data
, use
, cand
, infinite_cost
, NULL
, NULL_TREE
, -1);
4524 /* Try iv elimination. */
4525 if (may_eliminate_iv (data
, use
, cand
, &bound
))
4527 elim_cost
= force_var_cost (data
, bound
, &depends_on_elim
);
4528 if (elim_cost
.cost
== 0)
4529 elim_cost
.cost
= parm_decl_cost (data
, bound
);
4530 else if (TREE_CODE (bound
) == INTEGER_CST
)
4532 /* If we replace a loop condition 'i < n' with 'p < base + n',
4533 depends_on_elim will have 'base' and 'n' set, which implies
4534 that both 'base' and 'n' will be live during the loop. More likely,
4535 'base + n' will be loop invariant, resulting in only one live value
4536 during the loop. So in that case we clear depends_on_elim and set
4537 elim_inv_expr_id instead. */
4538 if (depends_on_elim
&& bitmap_count_bits (depends_on_elim
) > 1)
4540 elim_inv_expr_id
= get_expr_id (data
, bound
);
4541 bitmap_clear (depends_on_elim
);
4543 /* The bound is a loop invariant, so it will be only computed
4545 elim_cost
.cost
= adjust_setup_cost (data
, elim_cost
.cost
);
4548 elim_cost
= infinite_cost
;
4550 /* Try expressing the original giv. If it is compared with an invariant,
4551 note that we cannot get rid of it. */
4552 ok
= extract_cond_operands (data
, use
->stmt
, &control_var
, &bound_cst
,
4556 /* When the condition is a comparison of the candidate IV against
4557 zero, prefer this IV.
4559 TODO: The constant that we're substracting from the cost should
4560 be target-dependent. This information should be added to the
4561 target costs for each backend. */
4562 if (!infinite_cost_p (elim_cost
) /* Do not try to decrease infinite! */
4563 && integer_zerop (*bound_cst
)
4564 && (operand_equal_p (*control_var
, cand
->var_after
, 0)
4565 || operand_equal_p (*control_var
, cand
->var_before
, 0)))
4566 elim_cost
.cost
-= 1;
4568 express_cost
= get_computation_cost (data
, use
, cand
, false,
4569 &depends_on_express
, NULL
,
4570 &express_inv_expr_id
);
4571 fd_ivopts_data
= data
;
4572 walk_tree (&cmp_iv
->base
, find_depends
, &depends_on_express
, NULL
);
4574 /* Count the cost of the original bound as well. */
4575 bound_cost
= force_var_cost (data
, *bound_cst
, NULL
);
4576 if (bound_cost
.cost
== 0)
4577 bound_cost
.cost
= parm_decl_cost (data
, *bound_cst
);
4578 else if (TREE_CODE (*bound_cst
) == INTEGER_CST
)
4579 bound_cost
.cost
= 0;
4580 express_cost
.cost
+= bound_cost
.cost
;
4582 /* Choose the better approach, preferring the eliminated IV. */
4583 if (compare_costs (elim_cost
, express_cost
) <= 0)
4586 depends_on
= depends_on_elim
;
4587 depends_on_elim
= NULL
;
4588 inv_expr_id
= elim_inv_expr_id
;
4592 cost
= express_cost
;
4593 depends_on
= depends_on_express
;
4594 depends_on_express
= NULL
;
4596 inv_expr_id
= express_inv_expr_id
;
4599 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, bound
, inv_expr_id
);
4601 if (depends_on_elim
)
4602 BITMAP_FREE (depends_on_elim
);
4603 if (depends_on_express
)
4604 BITMAP_FREE (depends_on_express
);
4606 return !infinite_cost_p (cost
);
4609 /* Determines cost of basing replacement of USE on CAND. Returns false
4610 if USE cannot be based on CAND. */
4613 determine_use_iv_cost (struct ivopts_data
*data
,
4614 struct iv_use
*use
, struct iv_cand
*cand
)
4618 case USE_NONLINEAR_EXPR
:
4619 return determine_use_iv_cost_generic (data
, use
, cand
);
4622 return determine_use_iv_cost_address (data
, use
, cand
);
4625 return determine_use_iv_cost_condition (data
, use
, cand
);
4632 /* Return true if get_computation_cost indicates that autoincrement is
4633 a possibility for the pair of USE and CAND, false otherwise. */
4636 autoinc_possible_for_pair (struct ivopts_data
*data
, struct iv_use
*use
,
4637 struct iv_cand
*cand
)
4643 if (use
->type
!= USE_ADDRESS
)
4646 cost
= get_computation_cost (data
, use
, cand
, true, &depends_on
,
4647 &can_autoinc
, NULL
);
4649 BITMAP_FREE (depends_on
);
4651 return !infinite_cost_p (cost
) && can_autoinc
;
4654 /* Examine IP_ORIGINAL candidates to see if they are incremented next to a
4655 use that allows autoincrement, and set their AINC_USE if possible. */
4658 set_autoinc_for_original_candidates (struct ivopts_data
*data
)
4662 for (i
= 0; i
< n_iv_cands (data
); i
++)
4664 struct iv_cand
*cand
= iv_cand (data
, i
);
4665 struct iv_use
*closest
= NULL
;
4666 if (cand
->pos
!= IP_ORIGINAL
)
4668 for (j
= 0; j
< n_iv_uses (data
); j
++)
4670 struct iv_use
*use
= iv_use (data
, j
);
4671 unsigned uid
= gimple_uid (use
->stmt
);
4672 if (gimple_bb (use
->stmt
) != gimple_bb (cand
->incremented_at
)
4673 || uid
> gimple_uid (cand
->incremented_at
))
4675 if (closest
== NULL
|| uid
> gimple_uid (closest
->stmt
))
4678 if (closest
== NULL
|| !autoinc_possible_for_pair (data
, closest
, cand
))
4680 cand
->ainc_use
= closest
;
4684 /* Finds the candidates for the induction variables. */
4687 find_iv_candidates (struct ivopts_data
*data
)
4689 /* Add commonly used ivs. */
4690 add_standard_iv_candidates (data
);
4692 /* Add old induction variables. */
4693 add_old_ivs_candidates (data
);
4695 /* Add induction variables derived from uses. */
4696 add_derived_ivs_candidates (data
);
4698 set_autoinc_for_original_candidates (data
);
4700 /* Record the important candidates. */
4701 record_important_candidates (data
);
4704 /* Determines costs of basing the use of the iv on an iv candidate. */
4707 determine_use_iv_costs (struct ivopts_data
*data
)
4711 struct iv_cand
*cand
;
4712 bitmap to_clear
= BITMAP_ALLOC (NULL
);
4714 alloc_use_cost_map (data
);
4716 for (i
= 0; i
< n_iv_uses (data
); i
++)
4718 use
= iv_use (data
, i
);
4720 if (data
->consider_all_candidates
)
4722 for (j
= 0; j
< n_iv_cands (data
); j
++)
4724 cand
= iv_cand (data
, j
);
4725 determine_use_iv_cost (data
, use
, cand
);
4732 EXECUTE_IF_SET_IN_BITMAP (use
->related_cands
, 0, j
, bi
)
4734 cand
= iv_cand (data
, j
);
4735 if (!determine_use_iv_cost (data
, use
, cand
))
4736 bitmap_set_bit (to_clear
, j
);
4739 /* Remove the candidates for that the cost is infinite from
4740 the list of related candidates. */
4741 bitmap_and_compl_into (use
->related_cands
, to_clear
);
4742 bitmap_clear (to_clear
);
4746 BITMAP_FREE (to_clear
);
4748 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4750 fprintf (dump_file
, "Use-candidate costs:\n");
4752 for (i
= 0; i
< n_iv_uses (data
); i
++)
4754 use
= iv_use (data
, i
);
4756 fprintf (dump_file
, "Use %d:\n", i
);
4757 fprintf (dump_file
, " cand\tcost\tcompl.\tdepends on\n");
4758 for (j
= 0; j
< use
->n_map_members
; j
++)
4760 if (!use
->cost_map
[j
].cand
4761 || infinite_cost_p (use
->cost_map
[j
].cost
))
4764 fprintf (dump_file
, " %d\t%d\t%d\t",
4765 use
->cost_map
[j
].cand
->id
,
4766 use
->cost_map
[j
].cost
.cost
,
4767 use
->cost_map
[j
].cost
.complexity
);
4768 if (use
->cost_map
[j
].depends_on
)
4769 bitmap_print (dump_file
,
4770 use
->cost_map
[j
].depends_on
, "","");
4771 if (use
->cost_map
[j
].inv_expr_id
!= -1)
4772 fprintf (dump_file
, " inv_expr:%d", use
->cost_map
[j
].inv_expr_id
);
4773 fprintf (dump_file
, "\n");
4776 fprintf (dump_file
, "\n");
4778 fprintf (dump_file
, "\n");
4782 /* Determines cost of the candidate CAND. */
4785 determine_iv_cost (struct ivopts_data
*data
, struct iv_cand
*cand
)
4787 comp_cost cost_base
;
4788 unsigned cost
, cost_step
;
4797 /* There are two costs associated with the candidate -- its increment
4798 and its initialization. The second is almost negligible for any loop
4799 that rolls enough, so we take it just very little into account. */
4801 base
= cand
->iv
->base
;
4802 cost_base
= force_var_cost (data
, base
, NULL
);
4803 /* It will be exceptional that the iv register happens to be initialized with
4804 the proper value at no cost. In general, there will at least be a regcopy
4806 if (cost_base
.cost
== 0)
4807 cost_base
.cost
= COSTS_N_INSNS (1);
4808 cost_step
= add_cost (TYPE_MODE (TREE_TYPE (base
)), data
->speed
);
4810 cost
= cost_step
+ adjust_setup_cost (data
, cost_base
.cost
);
4812 /* Prefer the original ivs unless we may gain something by replacing it.
4813 The reason is to make debugging simpler; so this is not relevant for
4814 artificial ivs created by other optimization passes. */
4815 if (cand
->pos
!= IP_ORIGINAL
4816 || DECL_ARTIFICIAL (SSA_NAME_VAR (cand
->var_before
)))
4819 /* Prefer not to insert statements into latch unless there are some
4820 already (so that we do not create unnecessary jumps). */
4821 if (cand
->pos
== IP_END
4822 && empty_block_p (ip_end_pos (data
->current_loop
)))
4826 cand
->cost_step
= cost_step
;
4829 /* Determines costs of computation of the candidates. */
4832 determine_iv_costs (struct ivopts_data
*data
)
4836 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4838 fprintf (dump_file
, "Candidate costs:\n");
4839 fprintf (dump_file
, " cand\tcost\n");
4842 for (i
= 0; i
< n_iv_cands (data
); i
++)
4844 struct iv_cand
*cand
= iv_cand (data
, i
);
4846 determine_iv_cost (data
, cand
);
4848 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4849 fprintf (dump_file
, " %d\t%d\n", i
, cand
->cost
);
4852 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4853 fprintf (dump_file
, "\n");
4856 /* Calculates cost for having SIZE induction variables. */
4859 ivopts_global_cost_for_size (struct ivopts_data
*data
, unsigned size
)
4861 /* We add size to the cost, so that we prefer eliminating ivs
4863 return size
+ estimate_reg_pressure_cost (size
, data
->regs_used
, data
->speed
,
4864 data
->body_includes_call
);
4867 /* For each size of the induction variable set determine the penalty. */
4870 determine_set_costs (struct ivopts_data
*data
)
4874 gimple_stmt_iterator psi
;
4876 struct loop
*loop
= data
->current_loop
;
4879 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4881 fprintf (dump_file
, "Global costs:\n");
4882 fprintf (dump_file
, " target_avail_regs %d\n", target_avail_regs
);
4883 fprintf (dump_file
, " target_clobbered_regs %d\n", target_clobbered_regs
);
4884 fprintf (dump_file
, " target_reg_cost %d\n", target_reg_cost
[data
->speed
]);
4885 fprintf (dump_file
, " target_spill_cost %d\n", target_spill_cost
[data
->speed
]);
4889 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
4891 phi
= gsi_stmt (psi
);
4892 op
= PHI_RESULT (phi
);
4894 if (!is_gimple_reg (op
))
4897 if (get_iv (data
, op
))
4903 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
4905 struct version_info
*info
= ver_info (data
, j
);
4907 if (info
->inv_id
&& info
->has_nonlin_use
)
4911 data
->regs_used
= n
;
4912 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4913 fprintf (dump_file
, " regs_used %d\n", n
);
4915 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4917 fprintf (dump_file
, " cost for size:\n");
4918 fprintf (dump_file
, " ivs\tcost\n");
4919 for (j
= 0; j
<= 2 * target_avail_regs
; j
++)
4920 fprintf (dump_file
, " %d\t%d\n", j
,
4921 ivopts_global_cost_for_size (data
, j
));
4922 fprintf (dump_file
, "\n");
4926 /* Returns true if A is a cheaper cost pair than B. */
4929 cheaper_cost_pair (struct cost_pair
*a
, struct cost_pair
*b
)
4939 cmp
= compare_costs (a
->cost
, b
->cost
);
4946 /* In case the costs are the same, prefer the cheaper candidate. */
4947 if (a
->cand
->cost
< b
->cand
->cost
)
4954 /* Returns candidate by that USE is expressed in IVS. */
4956 static struct cost_pair
*
4957 iv_ca_cand_for_use (struct iv_ca
*ivs
, struct iv_use
*use
)
4959 return ivs
->cand_for_use
[use
->id
];
4962 /* Computes the cost field of IVS structure. */
4965 iv_ca_recount_cost (struct ivopts_data
*data
, struct iv_ca
*ivs
)
4967 comp_cost cost
= ivs
->cand_use_cost
;
4969 cost
.cost
+= ivs
->cand_cost
;
4971 cost
.cost
+= ivopts_global_cost_for_size (data
,
4972 ivs
->n_regs
+ ivs
->num_used_inv_expr
);
4977 /* Remove invariants in set INVS to set IVS. */
4980 iv_ca_set_remove_invariants (struct iv_ca
*ivs
, bitmap invs
)
4988 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
4990 ivs
->n_invariant_uses
[iid
]--;
4991 if (ivs
->n_invariant_uses
[iid
] == 0)
4996 /* Set USE not to be expressed by any candidate in IVS. */
4999 iv_ca_set_no_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5002 unsigned uid
= use
->id
, cid
;
5003 struct cost_pair
*cp
;
5005 cp
= ivs
->cand_for_use
[uid
];
5011 ivs
->cand_for_use
[uid
] = NULL
;
5012 ivs
->n_cand_uses
[cid
]--;
5014 if (ivs
->n_cand_uses
[cid
] == 0)
5016 bitmap_clear_bit (ivs
->cands
, cid
);
5017 /* Do not count the pseudocandidates. */
5021 ivs
->cand_cost
-= cp
->cand
->cost
;
5023 iv_ca_set_remove_invariants (ivs
, cp
->cand
->depends_on
);
5026 ivs
->cand_use_cost
= sub_costs (ivs
->cand_use_cost
, cp
->cost
);
5028 iv_ca_set_remove_invariants (ivs
, cp
->depends_on
);
5030 if (cp
->inv_expr_id
!= -1)
5032 ivs
->used_inv_expr
[cp
->inv_expr_id
]--;
5033 if (ivs
->used_inv_expr
[cp
->inv_expr_id
] == 0)
5034 ivs
->num_used_inv_expr
--;
5036 iv_ca_recount_cost (data
, ivs
);
5039 /* Add invariants in set INVS to set IVS. */
5042 iv_ca_set_add_invariants (struct iv_ca
*ivs
, bitmap invs
)
5050 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
5052 ivs
->n_invariant_uses
[iid
]++;
5053 if (ivs
->n_invariant_uses
[iid
] == 1)
5058 /* Set cost pair for USE in set IVS to CP. */
5061 iv_ca_set_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5062 struct iv_use
*use
, struct cost_pair
*cp
)
5064 unsigned uid
= use
->id
, cid
;
5066 if (ivs
->cand_for_use
[uid
] == cp
)
5069 if (ivs
->cand_for_use
[uid
])
5070 iv_ca_set_no_cp (data
, ivs
, use
);
5077 ivs
->cand_for_use
[uid
] = cp
;
5078 ivs
->n_cand_uses
[cid
]++;
5079 if (ivs
->n_cand_uses
[cid
] == 1)
5081 bitmap_set_bit (ivs
->cands
, cid
);
5082 /* Do not count the pseudocandidates. */
5086 ivs
->cand_cost
+= cp
->cand
->cost
;
5088 iv_ca_set_add_invariants (ivs
, cp
->cand
->depends_on
);
5091 ivs
->cand_use_cost
= add_costs (ivs
->cand_use_cost
, cp
->cost
);
5092 iv_ca_set_add_invariants (ivs
, cp
->depends_on
);
5094 if (cp
->inv_expr_id
!= -1)
5096 ivs
->used_inv_expr
[cp
->inv_expr_id
]++;
5097 if (ivs
->used_inv_expr
[cp
->inv_expr_id
] == 1)
5098 ivs
->num_used_inv_expr
++;
5100 iv_ca_recount_cost (data
, ivs
);
5104 /* Extend set IVS by expressing USE by some of the candidates in it
5105 if possible. All important candidates will be considered
5106 if IMPORTANT_CANDIDATES is true. */
5109 iv_ca_add_use (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5110 struct iv_use
*use
, bool important_candidates
)
5112 struct cost_pair
*best_cp
= NULL
, *cp
;
5117 gcc_assert (ivs
->upto
>= use
->id
);
5119 if (ivs
->upto
== use
->id
)
5125 cands
= (important_candidates
? data
->important_candidates
: ivs
->cands
);
5126 EXECUTE_IF_SET_IN_BITMAP (cands
, 0, i
, bi
)
5128 struct iv_cand
*cand
= iv_cand (data
, i
);
5130 cp
= get_use_iv_cost (data
, use
, cand
);
5132 if (cheaper_cost_pair (cp
, best_cp
))
5136 iv_ca_set_cp (data
, ivs
, use
, best_cp
);
5139 /* Get cost for assignment IVS. */
5142 iv_ca_cost (struct iv_ca
*ivs
)
5144 /* This was a conditional expression but it triggered a bug in
5147 return infinite_cost
;
5152 /* Returns true if all dependences of CP are among invariants in IVS. */
5155 iv_ca_has_deps (struct iv_ca
*ivs
, struct cost_pair
*cp
)
5160 if (!cp
->depends_on
)
5163 EXECUTE_IF_SET_IN_BITMAP (cp
->depends_on
, 0, i
, bi
)
5165 if (ivs
->n_invariant_uses
[i
] == 0)
5172 /* Creates change of expressing USE by NEW_CP instead of OLD_CP and chains
5173 it before NEXT_CHANGE. */
5175 static struct iv_ca_delta
*
5176 iv_ca_delta_add (struct iv_use
*use
, struct cost_pair
*old_cp
,
5177 struct cost_pair
*new_cp
, struct iv_ca_delta
*next_change
)
5179 struct iv_ca_delta
*change
= XNEW (struct iv_ca_delta
);
5182 change
->old_cp
= old_cp
;
5183 change
->new_cp
= new_cp
;
5184 change
->next_change
= next_change
;
5189 /* Joins two lists of changes L1 and L2. Destructive -- old lists
5192 static struct iv_ca_delta
*
5193 iv_ca_delta_join (struct iv_ca_delta
*l1
, struct iv_ca_delta
*l2
)
5195 struct iv_ca_delta
*last
;
5203 for (last
= l1
; last
->next_change
; last
= last
->next_change
)
5205 last
->next_change
= l2
;
5210 /* Reverse the list of changes DELTA, forming the inverse to it. */
5212 static struct iv_ca_delta
*
5213 iv_ca_delta_reverse (struct iv_ca_delta
*delta
)
5215 struct iv_ca_delta
*act
, *next
, *prev
= NULL
;
5216 struct cost_pair
*tmp
;
5218 for (act
= delta
; act
; act
= next
)
5220 next
= act
->next_change
;
5221 act
->next_change
= prev
;
5225 act
->old_cp
= act
->new_cp
;
5232 /* Commit changes in DELTA to IVS. If FORWARD is false, the changes are
5233 reverted instead. */
5236 iv_ca_delta_commit (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5237 struct iv_ca_delta
*delta
, bool forward
)
5239 struct cost_pair
*from
, *to
;
5240 struct iv_ca_delta
*act
;
5243 delta
= iv_ca_delta_reverse (delta
);
5245 for (act
= delta
; act
; act
= act
->next_change
)
5249 gcc_assert (iv_ca_cand_for_use (ivs
, act
->use
) == from
);
5250 iv_ca_set_cp (data
, ivs
, act
->use
, to
);
5254 iv_ca_delta_reverse (delta
);
5257 /* Returns true if CAND is used in IVS. */
5260 iv_ca_cand_used_p (struct iv_ca
*ivs
, struct iv_cand
*cand
)
5262 return ivs
->n_cand_uses
[cand
->id
] > 0;
5265 /* Returns number of induction variable candidates in the set IVS. */
5268 iv_ca_n_cands (struct iv_ca
*ivs
)
5270 return ivs
->n_cands
;
5273 /* Free the list of changes DELTA. */
5276 iv_ca_delta_free (struct iv_ca_delta
**delta
)
5278 struct iv_ca_delta
*act
, *next
;
5280 for (act
= *delta
; act
; act
= next
)
5282 next
= act
->next_change
;
5289 /* Allocates new iv candidates assignment. */
5291 static struct iv_ca
*
5292 iv_ca_new (struct ivopts_data
*data
)
5294 struct iv_ca
*nw
= XNEW (struct iv_ca
);
5298 nw
->cand_for_use
= XCNEWVEC (struct cost_pair
*, n_iv_uses (data
));
5299 nw
->n_cand_uses
= XCNEWVEC (unsigned, n_iv_cands (data
));
5300 nw
->cands
= BITMAP_ALLOC (NULL
);
5303 nw
->cand_use_cost
= zero_cost
;
5305 nw
->n_invariant_uses
= XCNEWVEC (unsigned, data
->max_inv_id
+ 1);
5306 nw
->cost
= zero_cost
;
5307 nw
->used_inv_expr
= XCNEWVEC (unsigned, data
->inv_expr_id
+ 1);
5308 nw
->num_used_inv_expr
= 0;
5313 /* Free memory occupied by the set IVS. */
5316 iv_ca_free (struct iv_ca
**ivs
)
5318 free ((*ivs
)->cand_for_use
);
5319 free ((*ivs
)->n_cand_uses
);
5320 BITMAP_FREE ((*ivs
)->cands
);
5321 free ((*ivs
)->n_invariant_uses
);
5322 free ((*ivs
)->used_inv_expr
);
5327 /* Dumps IVS to FILE. */
5330 iv_ca_dump (struct ivopts_data
*data
, FILE *file
, struct iv_ca
*ivs
)
5332 const char *pref
= " invariants ";
5334 comp_cost cost
= iv_ca_cost (ivs
);
5336 fprintf (file
, " cost: %d (complexity %d)\n", cost
.cost
, cost
.complexity
);
5337 fprintf (file
, " cand_cost: %d\n cand_use_cost: %d (complexity %d)\n",
5338 ivs
->cand_cost
, ivs
->cand_use_cost
.cost
, ivs
->cand_use_cost
.complexity
);
5339 bitmap_print (file
, ivs
->cands
, " candidates: ","\n");
5341 for (i
= 0; i
< ivs
->upto
; i
++)
5343 struct iv_use
*use
= iv_use (data
, i
);
5344 struct cost_pair
*cp
= iv_ca_cand_for_use (ivs
, use
);
5346 fprintf (file
, " use:%d --> iv_cand:%d, cost=(%d,%d)\n",
5347 use
->id
, cp
->cand
->id
, cp
->cost
.cost
, cp
->cost
.complexity
);
5349 fprintf (file
, " use:%d --> ??\n", use
->id
);
5352 for (i
= 1; i
<= data
->max_inv_id
; i
++)
5353 if (ivs
->n_invariant_uses
[i
])
5355 fprintf (file
, "%s%d", pref
, i
);
5358 fprintf (file
, "\n\n");
5361 /* Try changing candidate in IVS to CAND for each use. Return cost of the
5362 new set, and store differences in DELTA. Number of induction variables
5363 in the new set is stored to N_IVS. MIN_NCAND is a flag. When it is true
5364 the function will try to find a solution with mimimal iv candidates. */
5367 iv_ca_extend (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5368 struct iv_cand
*cand
, struct iv_ca_delta
**delta
,
5369 unsigned *n_ivs
, bool min_ncand
)
5374 struct cost_pair
*old_cp
, *new_cp
;
5377 for (i
= 0; i
< ivs
->upto
; i
++)
5379 use
= iv_use (data
, i
);
5380 old_cp
= iv_ca_cand_for_use (ivs
, use
);
5383 && old_cp
->cand
== cand
)
5386 new_cp
= get_use_iv_cost (data
, use
, cand
);
5390 if (!min_ncand
&& !iv_ca_has_deps (ivs
, new_cp
))
5393 if (!min_ncand
&& !cheaper_cost_pair (new_cp
, old_cp
))
5396 *delta
= iv_ca_delta_add (use
, old_cp
, new_cp
, *delta
);
5399 iv_ca_delta_commit (data
, ivs
, *delta
, true);
5400 cost
= iv_ca_cost (ivs
);
5402 *n_ivs
= iv_ca_n_cands (ivs
);
5403 iv_ca_delta_commit (data
, ivs
, *delta
, false);
5408 /* Try narrowing set IVS by removing CAND. Return the cost of
5409 the new set and store the differences in DELTA. */
5412 iv_ca_narrow (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5413 struct iv_cand
*cand
, struct iv_ca_delta
**delta
)
5417 struct cost_pair
*old_cp
, *new_cp
, *cp
;
5419 struct iv_cand
*cnd
;
5423 for (i
= 0; i
< n_iv_uses (data
); i
++)
5425 use
= iv_use (data
, i
);
5427 old_cp
= iv_ca_cand_for_use (ivs
, use
);
5428 if (old_cp
->cand
!= cand
)
5433 if (data
->consider_all_candidates
)
5435 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, ci
, bi
)
5440 cnd
= iv_cand (data
, ci
);
5442 cp
= get_use_iv_cost (data
, use
, cnd
);
5446 if (!iv_ca_has_deps (ivs
, cp
))
5449 if (!cheaper_cost_pair (cp
, new_cp
))
5457 EXECUTE_IF_AND_IN_BITMAP (use
->related_cands
, ivs
->cands
, 0, ci
, bi
)
5462 cnd
= iv_cand (data
, ci
);
5464 cp
= get_use_iv_cost (data
, use
, cnd
);
5467 if (!iv_ca_has_deps (ivs
, cp
))
5470 if (!cheaper_cost_pair (cp
, new_cp
))
5479 iv_ca_delta_free (delta
);
5480 return infinite_cost
;
5483 *delta
= iv_ca_delta_add (use
, old_cp
, new_cp
, *delta
);
5486 iv_ca_delta_commit (data
, ivs
, *delta
, true);
5487 cost
= iv_ca_cost (ivs
);
5488 iv_ca_delta_commit (data
, ivs
, *delta
, false);
5493 /* Try optimizing the set of candidates IVS by removing candidates different
5494 from to EXCEPT_CAND from it. Return cost of the new set, and store
5495 differences in DELTA. */
5498 iv_ca_prune (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5499 struct iv_cand
*except_cand
, struct iv_ca_delta
**delta
)
5502 struct iv_ca_delta
*act_delta
, *best_delta
;
5504 comp_cost best_cost
, acost
;
5505 struct iv_cand
*cand
;
5508 best_cost
= iv_ca_cost (ivs
);
5510 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
5512 cand
= iv_cand (data
, i
);
5514 if (cand
== except_cand
)
5517 acost
= iv_ca_narrow (data
, ivs
, cand
, &act_delta
);
5519 if (compare_costs (acost
, best_cost
) < 0)
5522 iv_ca_delta_free (&best_delta
);
5523 best_delta
= act_delta
;
5526 iv_ca_delta_free (&act_delta
);
5535 /* Recurse to possibly remove other unnecessary ivs. */
5536 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5537 best_cost
= iv_ca_prune (data
, ivs
, except_cand
, delta
);
5538 iv_ca_delta_commit (data
, ivs
, best_delta
, false);
5539 *delta
= iv_ca_delta_join (best_delta
, *delta
);
5543 /* Tries to extend the sets IVS in the best possible way in order
5544 to express the USE. If ORIGINALP is true, prefer candidates from
5545 the original set of IVs, otherwise favor important candidates not
5546 based on any memory object. */
5549 try_add_cand_for (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5550 struct iv_use
*use
, bool originalp
)
5552 comp_cost best_cost
, act_cost
;
5555 struct iv_cand
*cand
;
5556 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
;
5557 struct cost_pair
*cp
;
5559 iv_ca_add_use (data
, ivs
, use
, false);
5560 best_cost
= iv_ca_cost (ivs
);
5562 cp
= iv_ca_cand_for_use (ivs
, use
);
5567 iv_ca_add_use (data
, ivs
, use
, true);
5568 best_cost
= iv_ca_cost (ivs
);
5569 cp
= iv_ca_cand_for_use (ivs
, use
);
5573 best_delta
= iv_ca_delta_add (use
, NULL
, cp
, NULL
);
5574 iv_ca_set_no_cp (data
, ivs
, use
);
5577 /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
5578 first try important candidates not based on any memory object. Only if
5579 this fails, try the specific ones. Rationale -- in loops with many
5580 variables the best choice often is to use just one generic biv. If we
5581 added here many ivs specific to the uses, the optimization algorithm later
5582 would be likely to get stuck in a local minimum, thus causing us to create
5583 too many ivs. The approach from few ivs to more seems more likely to be
5584 successful -- starting from few ivs, replacing an expensive use by a
5585 specific iv should always be a win. */
5586 EXECUTE_IF_SET_IN_BITMAP (data
->important_candidates
, 0, i
, bi
)
5588 cand
= iv_cand (data
, i
);
5590 if (originalp
&& cand
->pos
!=IP_ORIGINAL
)
5593 if (!originalp
&& cand
->iv
->base_object
!= NULL_TREE
)
5596 if (iv_ca_cand_used_p (ivs
, cand
))
5599 cp
= get_use_iv_cost (data
, use
, cand
);
5603 iv_ca_set_cp (data
, ivs
, use
, cp
);
5604 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
,
5606 iv_ca_set_no_cp (data
, ivs
, use
);
5607 act_delta
= iv_ca_delta_add (use
, NULL
, cp
, act_delta
);
5609 if (compare_costs (act_cost
, best_cost
) < 0)
5611 best_cost
= act_cost
;
5613 iv_ca_delta_free (&best_delta
);
5614 best_delta
= act_delta
;
5617 iv_ca_delta_free (&act_delta
);
5620 if (infinite_cost_p (best_cost
))
5622 for (i
= 0; i
< use
->n_map_members
; i
++)
5624 cp
= use
->cost_map
+ i
;
5629 /* Already tried this. */
5630 if (cand
->important
)
5632 if (originalp
&& cand
->pos
== IP_ORIGINAL
)
5634 if (!originalp
&& cand
->iv
->base_object
== NULL_TREE
)
5638 if (iv_ca_cand_used_p (ivs
, cand
))
5642 iv_ca_set_cp (data
, ivs
, use
, cp
);
5643 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
, true);
5644 iv_ca_set_no_cp (data
, ivs
, use
);
5645 act_delta
= iv_ca_delta_add (use
, iv_ca_cand_for_use (ivs
, use
),
5648 if (compare_costs (act_cost
, best_cost
) < 0)
5650 best_cost
= act_cost
;
5653 iv_ca_delta_free (&best_delta
);
5654 best_delta
= act_delta
;
5657 iv_ca_delta_free (&act_delta
);
5661 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5662 iv_ca_delta_free (&best_delta
);
5664 return !infinite_cost_p (best_cost
);
5667 /* Finds an initial assignment of candidates to uses. */
5669 static struct iv_ca
*
5670 get_initial_solution (struct ivopts_data
*data
, bool originalp
)
5672 struct iv_ca
*ivs
= iv_ca_new (data
);
5675 for (i
= 0; i
< n_iv_uses (data
); i
++)
5676 if (!try_add_cand_for (data
, ivs
, iv_use (data
, i
), originalp
))
5685 /* Tries to improve set of induction variables IVS. */
5688 try_improve_iv_set (struct ivopts_data
*data
, struct iv_ca
*ivs
)
5691 comp_cost acost
, best_cost
= iv_ca_cost (ivs
);
5692 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
, *tmp_delta
;
5693 struct iv_cand
*cand
;
5695 /* Try extending the set of induction variables by one. */
5696 for (i
= 0; i
< n_iv_cands (data
); i
++)
5698 cand
= iv_cand (data
, i
);
5700 if (iv_ca_cand_used_p (ivs
, cand
))
5703 acost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, &n_ivs
, false);
5707 /* If we successfully added the candidate and the set is small enough,
5708 try optimizing it by removing other candidates. */
5709 if (n_ivs
<= ALWAYS_PRUNE_CAND_SET_BOUND
)
5711 iv_ca_delta_commit (data
, ivs
, act_delta
, true);
5712 acost
= iv_ca_prune (data
, ivs
, cand
, &tmp_delta
);
5713 iv_ca_delta_commit (data
, ivs
, act_delta
, false);
5714 act_delta
= iv_ca_delta_join (act_delta
, tmp_delta
);
5717 if (compare_costs (acost
, best_cost
) < 0)
5720 iv_ca_delta_free (&best_delta
);
5721 best_delta
= act_delta
;
5724 iv_ca_delta_free (&act_delta
);
5729 /* Try removing the candidates from the set instead. */
5730 best_cost
= iv_ca_prune (data
, ivs
, NULL
, &best_delta
);
5732 /* Nothing more we can do. */
5737 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5738 gcc_assert (compare_costs (best_cost
, iv_ca_cost (ivs
)) == 0);
5739 iv_ca_delta_free (&best_delta
);
5743 /* Attempts to find the optimal set of induction variables. We do simple
5744 greedy heuristic -- we try to replace at most one candidate in the selected
5745 solution and remove the unused ivs while this improves the cost. */
5747 static struct iv_ca
*
5748 find_optimal_iv_set_1 (struct ivopts_data
*data
, bool originalp
)
5752 /* Get the initial solution. */
5753 set
= get_initial_solution (data
, originalp
);
5756 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5757 fprintf (dump_file
, "Unable to substitute for ivs, failed.\n");
5761 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5763 fprintf (dump_file
, "Initial set of candidates:\n");
5764 iv_ca_dump (data
, dump_file
, set
);
5767 while (try_improve_iv_set (data
, set
))
5769 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5771 fprintf (dump_file
, "Improved to:\n");
5772 iv_ca_dump (data
, dump_file
, set
);
5779 static struct iv_ca
*
5780 find_optimal_iv_set (struct ivopts_data
*data
)
5783 struct iv_ca
*set
, *origset
;
5785 comp_cost cost
, origcost
;
5787 /* Determine the cost based on a strategy that starts with original IVs,
5788 and try again using a strategy that prefers candidates not based
5790 origset
= find_optimal_iv_set_1 (data
, true);
5791 set
= find_optimal_iv_set_1 (data
, false);
5793 if (!origset
&& !set
)
5796 origcost
= origset
? iv_ca_cost (origset
) : infinite_cost
;
5797 cost
= set
? iv_ca_cost (set
) : infinite_cost
;
5799 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5801 fprintf (dump_file
, "Original cost %d (complexity %d)\n\n",
5802 origcost
.cost
, origcost
.complexity
);
5803 fprintf (dump_file
, "Final cost %d (complexity %d)\n\n",
5804 cost
.cost
, cost
.complexity
);
5807 /* Choose the one with the best cost. */
5808 if (compare_costs (origcost
, cost
) <= 0)
5815 iv_ca_free (&origset
);
5817 for (i
= 0; i
< n_iv_uses (data
); i
++)
5819 use
= iv_use (data
, i
);
5820 use
->selected
= iv_ca_cand_for_use (set
, use
)->cand
;
5826 /* Creates a new induction variable corresponding to CAND. */
5829 create_new_iv (struct ivopts_data
*data
, struct iv_cand
*cand
)
5831 gimple_stmt_iterator incr_pos
;
5841 incr_pos
= gsi_last_bb (ip_normal_pos (data
->current_loop
));
5845 incr_pos
= gsi_last_bb (ip_end_pos (data
->current_loop
));
5853 incr_pos
= gsi_for_stmt (cand
->incremented_at
);
5857 /* Mark that the iv is preserved. */
5858 name_info (data
, cand
->var_before
)->preserve_biv
= true;
5859 name_info (data
, cand
->var_after
)->preserve_biv
= true;
5861 /* Rewrite the increment so that it uses var_before directly. */
5862 find_interesting_uses_op (data
, cand
->var_after
)->selected
= cand
;
5866 gimple_add_tmp_var (cand
->var_before
);
5867 add_referenced_var (cand
->var_before
);
5869 base
= unshare_expr (cand
->iv
->base
);
5871 create_iv (base
, unshare_expr (cand
->iv
->step
),
5872 cand
->var_before
, data
->current_loop
,
5873 &incr_pos
, after
, &cand
->var_before
, &cand
->var_after
);
5876 /* Creates new induction variables described in SET. */
5879 create_new_ivs (struct ivopts_data
*data
, struct iv_ca
*set
)
5882 struct iv_cand
*cand
;
5885 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
5887 cand
= iv_cand (data
, i
);
5888 create_new_iv (data
, cand
);
5891 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5893 fprintf (dump_file
, "\nSelected IV set: \n");
5894 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
5896 cand
= iv_cand (data
, i
);
5897 dump_cand (dump_file
, cand
);
5899 fprintf (dump_file
, "\n");
5903 /* Rewrites USE (definition of iv used in a nonlinear expression)
5904 using candidate CAND. */
5907 rewrite_use_nonlinear_expr (struct ivopts_data
*data
,
5908 struct iv_use
*use
, struct iv_cand
*cand
)
5913 gimple_stmt_iterator bsi
;
5915 /* An important special case -- if we are asked to express value of
5916 the original iv by itself, just exit; there is no need to
5917 introduce a new computation (that might also need casting the
5918 variable to unsigned and back). */
5919 if (cand
->pos
== IP_ORIGINAL
5920 && cand
->incremented_at
== use
->stmt
)
5922 tree step
, ctype
, utype
;
5923 enum tree_code incr_code
= PLUS_EXPR
, old_code
;
5925 gcc_assert (is_gimple_assign (use
->stmt
));
5926 gcc_assert (gimple_assign_lhs (use
->stmt
) == cand
->var_after
);
5928 step
= cand
->iv
->step
;
5929 ctype
= TREE_TYPE (step
);
5930 utype
= TREE_TYPE (cand
->var_after
);
5931 if (TREE_CODE (step
) == NEGATE_EXPR
)
5933 incr_code
= MINUS_EXPR
;
5934 step
= TREE_OPERAND (step
, 0);
5937 /* Check whether we may leave the computation unchanged.
5938 This is the case only if it does not rely on other
5939 computations in the loop -- otherwise, the computation
5940 we rely upon may be removed in remove_unused_ivs,
5941 thus leading to ICE. */
5942 old_code
= gimple_assign_rhs_code (use
->stmt
);
5943 if (old_code
== PLUS_EXPR
5944 || old_code
== MINUS_EXPR
5945 || old_code
== POINTER_PLUS_EXPR
)
5947 if (gimple_assign_rhs1 (use
->stmt
) == cand
->var_before
)
5948 op
= gimple_assign_rhs2 (use
->stmt
);
5949 else if (old_code
!= MINUS_EXPR
5950 && gimple_assign_rhs2 (use
->stmt
) == cand
->var_before
)
5951 op
= gimple_assign_rhs1 (use
->stmt
);
5959 && (TREE_CODE (op
) == INTEGER_CST
5960 || operand_equal_p (op
, step
, 0)))
5963 /* Otherwise, add the necessary computations to express
5965 op
= fold_convert (ctype
, cand
->var_before
);
5966 comp
= fold_convert (utype
,
5967 build2 (incr_code
, ctype
, op
,
5968 unshare_expr (step
)));
5972 comp
= get_computation (data
->current_loop
, use
, cand
);
5973 gcc_assert (comp
!= NULL_TREE
);
5976 switch (gimple_code (use
->stmt
))
5979 tgt
= PHI_RESULT (use
->stmt
);
5981 /* If we should keep the biv, do not replace it. */
5982 if (name_info (data
, tgt
)->preserve_biv
)
5985 bsi
= gsi_after_labels (gimple_bb (use
->stmt
));
5989 tgt
= gimple_assign_lhs (use
->stmt
);
5990 bsi
= gsi_for_stmt (use
->stmt
);
5997 if (!valid_gimple_rhs_p (comp
)
5998 || (gimple_code (use
->stmt
) != GIMPLE_PHI
5999 /* We can't allow re-allocating the stmt as it might be pointed
6001 && (get_gimple_rhs_num_ops (TREE_CODE (comp
))
6002 >= gimple_num_ops (gsi_stmt (bsi
)))))
6004 comp
= force_gimple_operand_gsi (&bsi
, comp
, true, NULL_TREE
,
6005 true, GSI_SAME_STMT
);
6006 if (POINTER_TYPE_P (TREE_TYPE (tgt
)))
6008 duplicate_ssa_name_ptr_info (comp
, SSA_NAME_PTR_INFO (tgt
));
6009 /* As this isn't a plain copy we have to reset alignment
6011 if (SSA_NAME_PTR_INFO (comp
))
6013 SSA_NAME_PTR_INFO (comp
)->align
= 1;
6014 SSA_NAME_PTR_INFO (comp
)->misalign
= 0;
6019 if (gimple_code (use
->stmt
) == GIMPLE_PHI
)
6021 ass
= gimple_build_assign (tgt
, comp
);
6022 gsi_insert_before (&bsi
, ass
, GSI_SAME_STMT
);
6024 bsi
= gsi_for_stmt (use
->stmt
);
6025 remove_phi_node (&bsi
, false);
6029 gimple_assign_set_rhs_from_tree (&bsi
, comp
);
6030 use
->stmt
= gsi_stmt (bsi
);
6034 /* Copies the reference information from OLD_REF to NEW_REF. */
6037 copy_ref_info (tree new_ref
, tree old_ref
)
6039 tree new_ptr_base
= NULL_TREE
;
6041 TREE_SIDE_EFFECTS (new_ref
) = TREE_SIDE_EFFECTS (old_ref
);
6042 TREE_THIS_VOLATILE (new_ref
) = TREE_THIS_VOLATILE (old_ref
);
6044 new_ptr_base
= TREE_OPERAND (new_ref
, 0);
6046 /* We can transfer points-to information from an old pointer
6047 or decl base to the new one. */
6049 && TREE_CODE (new_ptr_base
) == SSA_NAME
6050 && !SSA_NAME_PTR_INFO (new_ptr_base
))
6052 tree base
= get_base_address (old_ref
);
6055 else if ((TREE_CODE (base
) == MEM_REF
6056 || TREE_CODE (base
) == TARGET_MEM_REF
)
6057 && TREE_CODE (TREE_OPERAND (base
, 0)) == SSA_NAME
6058 && SSA_NAME_PTR_INFO (TREE_OPERAND (base
, 0)))
6060 struct ptr_info_def
*new_pi
;
6061 duplicate_ssa_name_ptr_info
6062 (new_ptr_base
, SSA_NAME_PTR_INFO (TREE_OPERAND (base
, 0)));
6063 new_pi
= SSA_NAME_PTR_INFO (new_ptr_base
);
6064 /* We have to be careful about transfering alignment information. */
6065 if (TREE_CODE (old_ref
) == MEM_REF
6066 && !(TREE_CODE (new_ref
) == TARGET_MEM_REF
6067 && (TMR_INDEX2 (new_ref
)
6068 || (TMR_STEP (new_ref
)
6069 && (TREE_INT_CST_LOW (TMR_STEP (new_ref
))
6070 < new_pi
->align
)))))
6072 new_pi
->misalign
+= double_int_sub (mem_ref_offset (old_ref
),
6073 mem_ref_offset (new_ref
)).low
;
6074 new_pi
->misalign
&= (new_pi
->align
- 1);
6079 new_pi
->misalign
= 0;
6082 else if (TREE_CODE (base
) == VAR_DECL
6083 || TREE_CODE (base
) == PARM_DECL
6084 || TREE_CODE (base
) == RESULT_DECL
)
6086 struct ptr_info_def
*pi
= get_ptr_info (new_ptr_base
);
6087 pt_solution_set_var (&pi
->pt
, base
);
6092 /* Performs a peephole optimization to reorder the iv update statement with
6093 a mem ref to enable instruction combining in later phases. The mem ref uses
6094 the iv value before the update, so the reordering transformation requires
6095 adjustment of the offset. CAND is the selected IV_CAND.
6099 t = MEM_REF (base, iv1, 8, 16); // base, index, stride, offset
6107 directly propagating t over to (1) will introduce overlapping live range
6108 thus increase register pressure. This peephole transform it into:
6112 t = MEM_REF (base, iv2, 8, 8);
6119 adjust_iv_update_pos (struct iv_cand
*cand
, struct iv_use
*use
)
6122 gimple iv_update
, stmt
;
6124 gimple_stmt_iterator gsi
, gsi_iv
;
6126 if (cand
->pos
!= IP_NORMAL
)
6129 var_after
= cand
->var_after
;
6130 iv_update
= SSA_NAME_DEF_STMT (var_after
);
6132 bb
= gimple_bb (iv_update
);
6133 gsi
= gsi_last_nondebug_bb (bb
);
6134 stmt
= gsi_stmt (gsi
);
6136 /* Only handle conditional statement for now. */
6137 if (gimple_code (stmt
) != GIMPLE_COND
)
6140 gsi_prev_nondebug (&gsi
);
6141 stmt
= gsi_stmt (gsi
);
6142 if (stmt
!= iv_update
)
6145 gsi_prev_nondebug (&gsi
);
6146 if (gsi_end_p (gsi
))
6149 stmt
= gsi_stmt (gsi
);
6150 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
6153 if (stmt
!= use
->stmt
)
6156 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
6159 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6161 fprintf (dump_file
, "Reordering \n");
6162 print_gimple_stmt (dump_file
, iv_update
, 0, 0);
6163 print_gimple_stmt (dump_file
, use
->stmt
, 0, 0);
6164 fprintf (dump_file
, "\n");
6167 gsi
= gsi_for_stmt (use
->stmt
);
6168 gsi_iv
= gsi_for_stmt (iv_update
);
6169 gsi_move_before (&gsi_iv
, &gsi
);
6171 cand
->pos
= IP_BEFORE_USE
;
6172 cand
->incremented_at
= use
->stmt
;
6175 /* Rewrites USE (address that is an iv) using candidate CAND. */
6178 rewrite_use_address (struct ivopts_data
*data
,
6179 struct iv_use
*use
, struct iv_cand
*cand
)
6182 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
6183 tree base_hint
= NULL_TREE
;
6187 adjust_iv_update_pos (cand
, use
);
6188 ok
= get_computation_aff (data
->current_loop
, use
, cand
, use
->stmt
, &aff
);
6190 unshare_aff_combination (&aff
);
6192 /* To avoid undefined overflow problems, all IV candidates use unsigned
6193 integer types. The drawback is that this makes it impossible for
6194 create_mem_ref to distinguish an IV that is based on a memory object
6195 from one that represents simply an offset.
6197 To work around this problem, we pass a hint to create_mem_ref that
6198 indicates which variable (if any) in aff is an IV based on a memory
6199 object. Note that we only consider the candidate. If this is not
6200 based on an object, the base of the reference is in some subexpression
6201 of the use -- but these will use pointer types, so they are recognized
6202 by the create_mem_ref heuristics anyway. */
6203 if (cand
->iv
->base_object
)
6204 base_hint
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6206 iv
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6207 ref
= create_mem_ref (&bsi
, TREE_TYPE (*use
->op_p
), &aff
,
6208 reference_alias_ptr_type (*use
->op_p
),
6209 iv
, base_hint
, data
->speed
);
6210 copy_ref_info (ref
, *use
->op_p
);
6214 /* Rewrites USE (the condition such that one of the arguments is an iv) using
6218 rewrite_use_compare (struct ivopts_data
*data
,
6219 struct iv_use
*use
, struct iv_cand
*cand
)
6221 tree comp
, *var_p
, op
, bound
;
6222 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
6223 enum tree_code compare
;
6224 struct cost_pair
*cp
= get_use_iv_cost (data
, use
, cand
);
6230 tree var
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6231 tree var_type
= TREE_TYPE (var
);
6234 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6236 fprintf (dump_file
, "Replacing exit test: ");
6237 print_gimple_stmt (dump_file
, use
->stmt
, 0, TDF_SLIM
);
6239 compare
= iv_elimination_compare (data
, use
);
6240 bound
= unshare_expr (fold_convert (var_type
, bound
));
6241 op
= force_gimple_operand (bound
, &stmts
, true, NULL_TREE
);
6243 gsi_insert_seq_on_edge_immediate (
6244 loop_preheader_edge (data
->current_loop
),
6247 gimple_cond_set_lhs (use
->stmt
, var
);
6248 gimple_cond_set_code (use
->stmt
, compare
);
6249 gimple_cond_set_rhs (use
->stmt
, op
);
6253 /* The induction variable elimination failed; just express the original
6255 comp
= get_computation (data
->current_loop
, use
, cand
);
6256 gcc_assert (comp
!= NULL_TREE
);
6258 ok
= extract_cond_operands (data
, use
->stmt
, &var_p
, NULL
, NULL
, NULL
);
6261 *var_p
= force_gimple_operand_gsi (&bsi
, comp
, true, SSA_NAME_VAR (*var_p
),
6262 true, GSI_SAME_STMT
);
6265 /* Rewrites USE using candidate CAND. */
6268 rewrite_use (struct ivopts_data
*data
, struct iv_use
*use
, struct iv_cand
*cand
)
6272 case USE_NONLINEAR_EXPR
:
6273 rewrite_use_nonlinear_expr (data
, use
, cand
);
6277 rewrite_use_address (data
, use
, cand
);
6281 rewrite_use_compare (data
, use
, cand
);
6288 update_stmt (use
->stmt
);
6291 /* Rewrite the uses using the selected induction variables. */
6294 rewrite_uses (struct ivopts_data
*data
)
6297 struct iv_cand
*cand
;
6300 for (i
= 0; i
< n_iv_uses (data
); i
++)
6302 use
= iv_use (data
, i
);
6303 cand
= use
->selected
;
6306 rewrite_use (data
, use
, cand
);
6310 /* Removes the ivs that are not used after rewriting. */
6313 remove_unused_ivs (struct ivopts_data
*data
)
6317 bitmap toremove
= BITMAP_ALLOC (NULL
);
6319 /* Figure out an order in which to release SSA DEFs so that we don't
6320 release something that we'd have to propagate into a debug stmt
6322 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
6324 struct version_info
*info
;
6326 info
= ver_info (data
, j
);
6328 && !integer_zerop (info
->iv
->step
)
6330 && !info
->iv
->have_use_for
6331 && !info
->preserve_biv
)
6332 bitmap_set_bit (toremove
, SSA_NAME_VERSION (info
->iv
->ssa_name
));
6335 release_defs_bitset (toremove
);
6337 BITMAP_FREE (toremove
);
6340 /* Frees memory occupied by struct tree_niter_desc in *VALUE. Callback
6341 for pointer_map_traverse. */
6344 free_tree_niter_desc (const void *key ATTRIBUTE_UNUSED
, void **value
,
6345 void *data ATTRIBUTE_UNUSED
)
6347 struct tree_niter_desc
*const niter
= (struct tree_niter_desc
*) *value
;
6353 /* Frees data allocated by the optimization of a single loop. */
6356 free_loop_data (struct ivopts_data
*data
)
6364 pointer_map_traverse (data
->niters
, free_tree_niter_desc
, NULL
);
6365 pointer_map_destroy (data
->niters
);
6366 data
->niters
= NULL
;
6369 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
6371 struct version_info
*info
;
6373 info
= ver_info (data
, i
);
6376 info
->has_nonlin_use
= false;
6377 info
->preserve_biv
= false;
6380 bitmap_clear (data
->relevant
);
6381 bitmap_clear (data
->important_candidates
);
6383 for (i
= 0; i
< n_iv_uses (data
); i
++)
6385 struct iv_use
*use
= iv_use (data
, i
);
6388 BITMAP_FREE (use
->related_cands
);
6389 for (j
= 0; j
< use
->n_map_members
; j
++)
6390 if (use
->cost_map
[j
].depends_on
)
6391 BITMAP_FREE (use
->cost_map
[j
].depends_on
);
6392 free (use
->cost_map
);
6395 VEC_truncate (iv_use_p
, data
->iv_uses
, 0);
6397 for (i
= 0; i
< n_iv_cands (data
); i
++)
6399 struct iv_cand
*cand
= iv_cand (data
, i
);
6402 if (cand
->depends_on
)
6403 BITMAP_FREE (cand
->depends_on
);
6406 VEC_truncate (iv_cand_p
, data
->iv_candidates
, 0);
6408 if (data
->version_info_size
< num_ssa_names
)
6410 data
->version_info_size
= 2 * num_ssa_names
;
6411 free (data
->version_info
);
6412 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
6415 data
->max_inv_id
= 0;
6417 FOR_EACH_VEC_ELT (tree
, decl_rtl_to_reset
, i
, obj
)
6418 SET_DECL_RTL (obj
, NULL_RTX
);
6420 VEC_truncate (tree
, decl_rtl_to_reset
, 0);
6422 htab_empty (data
->inv_expr_tab
);
6423 data
->inv_expr_id
= 0;
6426 /* Finalizes data structures used by the iv optimization pass. LOOPS is the
6430 tree_ssa_iv_optimize_finalize (struct ivopts_data
*data
)
6432 free_loop_data (data
);
6433 free (data
->version_info
);
6434 BITMAP_FREE (data
->relevant
);
6435 BITMAP_FREE (data
->important_candidates
);
6437 VEC_free (tree
, heap
, decl_rtl_to_reset
);
6438 VEC_free (iv_use_p
, heap
, data
->iv_uses
);
6439 VEC_free (iv_cand_p
, heap
, data
->iv_candidates
);
6440 htab_delete (data
->inv_expr_tab
);
6443 /* Returns true if the loop body BODY includes any function calls. */
6446 loop_body_includes_call (basic_block
*body
, unsigned num_nodes
)
6448 gimple_stmt_iterator gsi
;
6451 for (i
= 0; i
< num_nodes
; i
++)
6452 for (gsi
= gsi_start_bb (body
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
6454 gimple stmt
= gsi_stmt (gsi
);
6455 if (is_gimple_call (stmt
)
6456 && !is_inexpensive_builtin (gimple_call_fndecl (stmt
)))
6462 /* Optimizes the LOOP. Returns true if anything changed. */
6465 tree_ssa_iv_optimize_loop (struct ivopts_data
*data
, struct loop
*loop
)
6467 bool changed
= false;
6468 struct iv_ca
*iv_ca
;
6472 gcc_assert (!data
->niters
);
6473 data
->current_loop
= loop
;
6474 data
->speed
= optimize_loop_for_speed_p (loop
);
6476 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6478 fprintf (dump_file
, "Processing loop %d\n", loop
->num
);
6480 exit
= single_dom_exit (loop
);
6483 fprintf (dump_file
, " single exit %d -> %d, exit condition ",
6484 exit
->src
->index
, exit
->dest
->index
);
6485 print_gimple_stmt (dump_file
, last_stmt (exit
->src
), 0, TDF_SLIM
);
6486 fprintf (dump_file
, "\n");
6489 fprintf (dump_file
, "\n");
6492 body
= get_loop_body (loop
);
6493 data
->body_includes_call
= loop_body_includes_call (body
, loop
->num_nodes
);
6494 renumber_gimple_stmt_uids_in_blocks (body
, loop
->num_nodes
);
6497 /* For each ssa name determines whether it behaves as an induction variable
6499 if (!find_induction_variables (data
))
6502 /* Finds interesting uses (item 1). */
6503 find_interesting_uses (data
);
6504 if (n_iv_uses (data
) > MAX_CONSIDERED_USES
)
6507 /* Finds candidates for the induction variables (item 2). */
6508 find_iv_candidates (data
);
6510 /* Calculates the costs (item 3, part 1). */
6511 determine_iv_costs (data
);
6512 determine_use_iv_costs (data
);
6513 determine_set_costs (data
);
6515 /* Find the optimal set of induction variables (item 3, part 2). */
6516 iv_ca
= find_optimal_iv_set (data
);
6521 /* Create the new induction variables (item 4, part 1). */
6522 create_new_ivs (data
, iv_ca
);
6523 iv_ca_free (&iv_ca
);
6525 /* Rewrite the uses (item 4, part 2). */
6526 rewrite_uses (data
);
6528 /* Remove the ivs that are unused after rewriting. */
6529 remove_unused_ivs (data
);
6531 /* We have changed the structure of induction variables; it might happen
6532 that definitions in the scev database refer to some of them that were
6537 free_loop_data (data
);
6542 /* Main entry point. Optimizes induction variables in loops. */
6545 tree_ssa_iv_optimize (void)
6548 struct ivopts_data data
;
6551 tree_ssa_iv_optimize_init (&data
);
6553 /* Optimize the loops starting with the innermost ones. */
6554 FOR_EACH_LOOP (li
, loop
, LI_FROM_INNERMOST
)
6556 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6557 flow_loop_dump (loop
, dump_file
, NULL
, 1);
6559 tree_ssa_iv_optimize_loop (&data
, loop
);
6562 tree_ssa_iv_optimize_finalize (&data
);