1 /* Induction variable optimizations.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass tries to find the optimal set of induction variables for the loop.
22 It optimizes just the basic linear induction variables (although adding
23 support for other types should not be too hard). It includes the
24 optimizations commonly known as strength reduction, induction variable
25 coalescing and induction variable elimination. It does it in the
28 1) The interesting uses of induction variables are found. This includes
30 -- uses of induction variables in non-linear expressions
31 -- addresses of arrays
32 -- comparisons of induction variables
34 2) Candidates for the induction variables are found. This includes
36 -- old induction variables
37 -- the variables defined by expressions derived from the "interesting
40 3) The optimal (w.r. to a cost function) set of variables is chosen. The
41 cost function assigns a cost to sets of induction variables and consists
44 -- The use costs. Each of the interesting uses chooses the best induction
45 variable in the set and adds its cost to the sum. The cost reflects
46 the time spent on modifying the induction variables value to be usable
47 for the given purpose (adding base and offset for arrays, etc.).
48 -- The variable costs. Each of the variables has a cost assigned that
49 reflects the costs associated with incrementing the value of the
50 variable. The original variables are somewhat preferred.
51 -- The set cost. Depending on the size of the set, extra cost may be
52 added to reflect register pressure.
54 All the costs are defined in a machine-specific way, using the target
55 hooks and machine descriptions to determine them.
57 4) The trees are transformed to use the new variables, the dead code is
60 All of this is done loop by loop. Doing it globally is theoretically
61 possible, it might give a better performance and it might enable us
62 to decide costs more precisely, but getting all the interactions right
63 would be complicated. */
67 #include "coretypes.h"
71 #include "basic-block.h"
73 #include "tree-pretty-print.h"
74 #include "gimple-pretty-print.h"
75 #include "tree-flow.h"
76 #include "tree-dump.h"
79 #include "tree-pass.h"
81 #include "insn-config.h"
83 #include "pointer-set.h"
85 #include "tree-chrec.h"
86 #include "tree-scalar-evolution.h"
89 #include "langhooks.h"
90 #include "tree-affine.h"
92 #include "tree-inline.h"
93 #include "tree-ssa-propagate.h"
95 /* FIXME: add_cost and zero_cost defined in exprmed.h conflict with local uses.
101 /* FIXME: Expressions are expanded to RTL in this pass to determine the
102 cost of different addressing modes. This should be moved to a TBD
103 interface between the GIMPLE and RTL worlds. */
106 /* The infinite cost. */
107 #define INFTY 10000000
109 #define AVG_LOOP_NITER(LOOP) 5
111 /* Returns the expected number of loop iterations for LOOP.
112 The average trip count is computed from profile data if it
115 static inline HOST_WIDE_INT
116 avg_loop_niter (struct loop
*loop
)
118 HOST_WIDE_INT niter
= max_stmt_executions_int (loop
, false);
120 return AVG_LOOP_NITER (loop
);
125 /* Representation of the induction variable. */
128 tree base
; /* Initial value of the iv. */
129 tree base_object
; /* A memory object to that the induction variable points. */
130 tree step
; /* Step of the iv (constant only). */
131 tree ssa_name
; /* The ssa name with the value. */
132 bool biv_p
; /* Is it a biv? */
133 bool have_use_for
; /* Do we already have a use for it? */
134 unsigned use_id
; /* The identifier in the use if it is the case. */
137 /* Per-ssa version information (induction variable descriptions, etc.). */
140 tree name
; /* The ssa name. */
141 struct iv
*iv
; /* Induction variable description. */
142 bool has_nonlin_use
; /* For a loop-level invariant, whether it is used in
143 an expression that is not an induction variable. */
144 bool preserve_biv
; /* For the original biv, whether to preserve it. */
145 unsigned inv_id
; /* Id of an invariant. */
151 USE_NONLINEAR_EXPR
, /* Use in a nonlinear expression. */
152 USE_ADDRESS
, /* Use in an address. */
153 USE_COMPARE
/* Use is a compare. */
156 /* Cost of a computation. */
159 int cost
; /* The runtime cost. */
160 unsigned complexity
; /* The estimate of the complexity of the code for
161 the computation (in no concrete units --
162 complexity field should be larger for more
163 complex expressions and addressing modes). */
166 static const comp_cost zero_cost
= {0, 0};
167 static const comp_cost infinite_cost
= {INFTY
, INFTY
};
169 /* The candidate - cost pair. */
172 struct iv_cand
*cand
; /* The candidate. */
173 comp_cost cost
; /* The cost. */
174 bitmap depends_on
; /* The list of invariants that have to be
176 tree value
; /* For final value elimination, the expression for
177 the final value of the iv. For iv elimination,
178 the new bound to compare with. */
179 enum tree_code comp
; /* For iv elimination, the comparison. */
180 int inv_expr_id
; /* Loop invariant expression id. */
186 unsigned id
; /* The id of the use. */
187 enum use_type type
; /* Type of the use. */
188 struct iv
*iv
; /* The induction variable it is based on. */
189 gimple stmt
; /* Statement in that it occurs. */
190 tree
*op_p
; /* The place where it occurs. */
191 bitmap related_cands
; /* The set of "related" iv candidates, plus the common
194 unsigned n_map_members
; /* Number of candidates in the cost_map list. */
195 struct cost_pair
*cost_map
;
196 /* The costs wrto the iv candidates. */
198 struct iv_cand
*selected
;
199 /* The selected candidate. */
202 /* The position where the iv is computed. */
205 IP_NORMAL
, /* At the end, just before the exit condition. */
206 IP_END
, /* At the end of the latch block. */
207 IP_BEFORE_USE
, /* Immediately before a specific use. */
208 IP_AFTER_USE
, /* Immediately after a specific use. */
209 IP_ORIGINAL
/* The original biv. */
212 /* The induction variable candidate. */
215 unsigned id
; /* The number of the candidate. */
216 bool important
; /* Whether this is an "important" candidate, i.e. such
217 that it should be considered by all uses. */
218 ENUM_BITFIELD(iv_position
) pos
: 8; /* Where it is computed. */
219 gimple incremented_at
;/* For original biv, the statement where it is
221 tree var_before
; /* The variable used for it before increment. */
222 tree var_after
; /* The variable used for it after increment. */
223 struct iv
*iv
; /* The value of the candidate. NULL for
224 "pseudocandidate" used to indicate the possibility
225 to replace the final value of an iv by direct
226 computation of the value. */
227 unsigned cost
; /* Cost of the candidate. */
228 unsigned cost_step
; /* Cost of the candidate's increment operation. */
229 struct iv_use
*ainc_use
; /* For IP_{BEFORE,AFTER}_USE candidates, the place
230 where it is incremented. */
231 bitmap depends_on
; /* The list of invariants that are used in step of the
235 /* Loop invariant expression hashtable entry. */
236 struct iv_inv_expr_ent
243 /* The data used by the induction variable optimizations. */
245 typedef struct iv_use
*iv_use_p
;
247 DEF_VEC_ALLOC_P(iv_use_p
,heap
);
249 typedef struct iv_cand
*iv_cand_p
;
250 DEF_VEC_P(iv_cand_p
);
251 DEF_VEC_ALLOC_P(iv_cand_p
,heap
);
255 /* The currently optimized loop. */
256 struct loop
*current_loop
;
258 /* Numbers of iterations for all exits of the current loop. */
259 struct pointer_map_t
*niters
;
261 /* Number of registers used in it. */
264 /* The size of version_info array allocated. */
265 unsigned version_info_size
;
267 /* The array of information for the ssa names. */
268 struct version_info
*version_info
;
270 /* The hashtable of loop invariant expressions created
274 /* Loop invariant expression id. */
277 /* The bitmap of indices in version_info whose value was changed. */
280 /* The uses of induction variables. */
281 VEC(iv_use_p
,heap
) *iv_uses
;
283 /* The candidates. */
284 VEC(iv_cand_p
,heap
) *iv_candidates
;
286 /* A bitmap of important candidates. */
287 bitmap important_candidates
;
289 /* The maximum invariant id. */
292 /* Whether to consider just related and important candidates when replacing a
294 bool consider_all_candidates
;
296 /* Are we optimizing for speed? */
299 /* Whether the loop body includes any function calls. */
300 bool body_includes_call
;
302 /* Whether the loop body can only be exited via single exit. */
303 bool loop_single_exit_p
;
306 /* An assignment of iv candidates to uses. */
310 /* The number of uses covered by the assignment. */
313 /* Number of uses that cannot be expressed by the candidates in the set. */
316 /* Candidate assigned to a use, together with the related costs. */
317 struct cost_pair
**cand_for_use
;
319 /* Number of times each candidate is used. */
320 unsigned *n_cand_uses
;
322 /* The candidates used. */
325 /* The number of candidates in the set. */
328 /* Total number of registers needed. */
331 /* Total cost of expressing uses. */
332 comp_cost cand_use_cost
;
334 /* Total cost of candidates. */
337 /* Number of times each invariant is used. */
338 unsigned *n_invariant_uses
;
340 /* The array holding the number of uses of each loop
341 invariant expressions created by ivopt. */
342 unsigned *used_inv_expr
;
344 /* The number of created loop invariants. */
345 unsigned num_used_inv_expr
;
347 /* Total cost of the assignment. */
351 /* Difference of two iv candidate assignments. */
358 /* An old assignment (for rollback purposes). */
359 struct cost_pair
*old_cp
;
361 /* A new assignment. */
362 struct cost_pair
*new_cp
;
364 /* Next change in the list. */
365 struct iv_ca_delta
*next_change
;
368 /* Bound on number of candidates below that all candidates are considered. */
370 #define CONSIDER_ALL_CANDIDATES_BOUND \
371 ((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
373 /* If there are more iv occurrences, we just give up (it is quite unlikely that
374 optimizing such a loop would help, and it would take ages). */
376 #define MAX_CONSIDERED_USES \
377 ((unsigned) PARAM_VALUE (PARAM_IV_MAX_CONSIDERED_USES))
379 /* If there are at most this number of ivs in the set, try removing unnecessary
380 ivs from the set always. */
382 #define ALWAYS_PRUNE_CAND_SET_BOUND \
383 ((unsigned) PARAM_VALUE (PARAM_IV_ALWAYS_PRUNE_CAND_SET_BOUND))
385 /* The list of trees for that the decl_rtl field must be reset is stored
388 static VEC(tree
,heap
) *decl_rtl_to_reset
;
390 static comp_cost
force_expr_to_var_cost (tree
, bool);
392 /* Number of uses recorded in DATA. */
394 static inline unsigned
395 n_iv_uses (struct ivopts_data
*data
)
397 return VEC_length (iv_use_p
, data
->iv_uses
);
400 /* Ith use recorded in DATA. */
402 static inline struct iv_use
*
403 iv_use (struct ivopts_data
*data
, unsigned i
)
405 return VEC_index (iv_use_p
, data
->iv_uses
, i
);
408 /* Number of candidates recorded in DATA. */
410 static inline unsigned
411 n_iv_cands (struct ivopts_data
*data
)
413 return VEC_length (iv_cand_p
, data
->iv_candidates
);
416 /* Ith candidate recorded in DATA. */
418 static inline struct iv_cand
*
419 iv_cand (struct ivopts_data
*data
, unsigned i
)
421 return VEC_index (iv_cand_p
, data
->iv_candidates
, i
);
424 /* The single loop exit if it dominates the latch, NULL otherwise. */
427 single_dom_exit (struct loop
*loop
)
429 edge exit
= single_exit (loop
);
434 if (!just_once_each_iteration_p (loop
, exit
->src
))
440 /* Dumps information about the induction variable IV to FILE. */
442 extern void dump_iv (FILE *, struct iv
*);
444 dump_iv (FILE *file
, struct iv
*iv
)
448 fprintf (file
, "ssa name ");
449 print_generic_expr (file
, iv
->ssa_name
, TDF_SLIM
);
450 fprintf (file
, "\n");
453 fprintf (file
, " type ");
454 print_generic_expr (file
, TREE_TYPE (iv
->base
), TDF_SLIM
);
455 fprintf (file
, "\n");
459 fprintf (file
, " base ");
460 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
461 fprintf (file
, "\n");
463 fprintf (file
, " step ");
464 print_generic_expr (file
, iv
->step
, TDF_SLIM
);
465 fprintf (file
, "\n");
469 fprintf (file
, " invariant ");
470 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
471 fprintf (file
, "\n");
476 fprintf (file
, " base object ");
477 print_generic_expr (file
, iv
->base_object
, TDF_SLIM
);
478 fprintf (file
, "\n");
482 fprintf (file
, " is a biv\n");
485 /* Dumps information about the USE to FILE. */
487 extern void dump_use (FILE *, struct iv_use
*);
489 dump_use (FILE *file
, struct iv_use
*use
)
491 fprintf (file
, "use %d\n", use
->id
);
495 case USE_NONLINEAR_EXPR
:
496 fprintf (file
, " generic\n");
500 fprintf (file
, " address\n");
504 fprintf (file
, " compare\n");
511 fprintf (file
, " in statement ");
512 print_gimple_stmt (file
, use
->stmt
, 0, 0);
513 fprintf (file
, "\n");
515 fprintf (file
, " at position ");
517 print_generic_expr (file
, *use
->op_p
, TDF_SLIM
);
518 fprintf (file
, "\n");
520 dump_iv (file
, use
->iv
);
522 if (use
->related_cands
)
524 fprintf (file
, " related candidates ");
525 dump_bitmap (file
, use
->related_cands
);
529 /* Dumps information about the uses to FILE. */
531 extern void dump_uses (FILE *, struct ivopts_data
*);
533 dump_uses (FILE *file
, struct ivopts_data
*data
)
538 for (i
= 0; i
< n_iv_uses (data
); i
++)
540 use
= iv_use (data
, i
);
542 dump_use (file
, use
);
543 fprintf (file
, "\n");
547 /* Dumps information about induction variable candidate CAND to FILE. */
549 extern void dump_cand (FILE *, struct iv_cand
*);
551 dump_cand (FILE *file
, struct iv_cand
*cand
)
553 struct iv
*iv
= cand
->iv
;
555 fprintf (file
, "candidate %d%s\n",
556 cand
->id
, cand
->important
? " (important)" : "");
558 if (cand
->depends_on
)
560 fprintf (file
, " depends on ");
561 dump_bitmap (file
, cand
->depends_on
);
566 fprintf (file
, " final value replacement\n");
570 if (cand
->var_before
)
572 fprintf (file
, " var_before ");
573 print_generic_expr (file
, cand
->var_before
, TDF_SLIM
);
574 fprintf (file
, "\n");
578 fprintf (file
, " var_after ");
579 print_generic_expr (file
, cand
->var_after
, TDF_SLIM
);
580 fprintf (file
, "\n");
586 fprintf (file
, " incremented before exit test\n");
590 fprintf (file
, " incremented before use %d\n", cand
->ainc_use
->id
);
594 fprintf (file
, " incremented after use %d\n", cand
->ainc_use
->id
);
598 fprintf (file
, " incremented at end\n");
602 fprintf (file
, " original biv\n");
609 /* Returns the info for ssa version VER. */
611 static inline struct version_info
*
612 ver_info (struct ivopts_data
*data
, unsigned ver
)
614 return data
->version_info
+ ver
;
617 /* Returns the info for ssa name NAME. */
619 static inline struct version_info
*
620 name_info (struct ivopts_data
*data
, tree name
)
622 return ver_info (data
, SSA_NAME_VERSION (name
));
625 /* Returns true if STMT is after the place where the IP_NORMAL ivs will be
629 stmt_after_ip_normal_pos (struct loop
*loop
, gimple stmt
)
631 basic_block bb
= ip_normal_pos (loop
), sbb
= gimple_bb (stmt
);
635 if (sbb
== loop
->latch
)
641 return stmt
== last_stmt (bb
);
644 /* Returns true if STMT if after the place where the original induction
645 variable CAND is incremented. If TRUE_IF_EQUAL is set, we return true
646 if the positions are identical. */
649 stmt_after_inc_pos (struct iv_cand
*cand
, gimple stmt
, bool true_if_equal
)
651 basic_block cand_bb
= gimple_bb (cand
->incremented_at
);
652 basic_block stmt_bb
= gimple_bb (stmt
);
654 if (!dominated_by_p (CDI_DOMINATORS
, stmt_bb
, cand_bb
))
657 if (stmt_bb
!= cand_bb
)
661 && gimple_uid (stmt
) == gimple_uid (cand
->incremented_at
))
663 return gimple_uid (stmt
) > gimple_uid (cand
->incremented_at
);
666 /* Returns true if STMT if after the place where the induction variable
667 CAND is incremented in LOOP. */
670 stmt_after_increment (struct loop
*loop
, struct iv_cand
*cand
, gimple stmt
)
678 return stmt_after_ip_normal_pos (loop
, stmt
);
682 return stmt_after_inc_pos (cand
, stmt
, false);
685 return stmt_after_inc_pos (cand
, stmt
, true);
692 /* Returns true if EXP is a ssa name that occurs in an abnormal phi node. */
695 abnormal_ssa_name_p (tree exp
)
700 if (TREE_CODE (exp
) != SSA_NAME
)
703 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp
) != 0;
706 /* Returns false if BASE or INDEX contains a ssa name that occurs in an
707 abnormal phi node. Callback for for_each_index. */
710 idx_contains_abnormal_ssa_name_p (tree base
, tree
*index
,
711 void *data ATTRIBUTE_UNUSED
)
713 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
715 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 2)))
717 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 3)))
721 return !abnormal_ssa_name_p (*index
);
724 /* Returns true if EXPR contains a ssa name that occurs in an
725 abnormal phi node. */
728 contains_abnormal_ssa_name_p (tree expr
)
731 enum tree_code_class codeclass
;
736 code
= TREE_CODE (expr
);
737 codeclass
= TREE_CODE_CLASS (code
);
739 if (code
== SSA_NAME
)
740 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
) != 0;
742 if (code
== INTEGER_CST
743 || is_gimple_min_invariant (expr
))
746 if (code
== ADDR_EXPR
)
747 return !for_each_index (&TREE_OPERAND (expr
, 0),
748 idx_contains_abnormal_ssa_name_p
,
751 if (code
== COND_EXPR
)
752 return contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0))
753 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1))
754 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 2));
760 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1)))
765 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0)))
777 /* Returns the structure describing number of iterations determined from
778 EXIT of DATA->current_loop, or NULL if something goes wrong. */
780 static struct tree_niter_desc
*
781 niter_for_exit (struct ivopts_data
*data
, edge exit
)
783 struct tree_niter_desc
*desc
;
788 data
->niters
= pointer_map_create ();
792 slot
= pointer_map_contains (data
->niters
, exit
);
796 /* Try to determine number of iterations. We cannot safely work with ssa
797 names that appear in phi nodes on abnormal edges, so that we do not
798 create overlapping life ranges for them (PR 27283). */
799 desc
= XNEW (struct tree_niter_desc
);
800 if (!number_of_iterations_exit (data
->current_loop
,
802 || contains_abnormal_ssa_name_p (desc
->niter
))
807 slot
= pointer_map_insert (data
->niters
, exit
);
811 desc
= (struct tree_niter_desc
*) *slot
;
816 /* Returns the structure describing number of iterations determined from
817 single dominating exit of DATA->current_loop, or NULL if something
820 static struct tree_niter_desc
*
821 niter_for_single_dom_exit (struct ivopts_data
*data
)
823 edge exit
= single_dom_exit (data
->current_loop
);
828 return niter_for_exit (data
, exit
);
831 /* Hash table equality function for expressions. */
834 htab_inv_expr_eq (const void *ent1
, const void *ent2
)
836 const struct iv_inv_expr_ent
*expr1
=
837 (const struct iv_inv_expr_ent
*)ent1
;
838 const struct iv_inv_expr_ent
*expr2
=
839 (const struct iv_inv_expr_ent
*)ent2
;
841 return expr1
->hash
== expr2
->hash
842 && operand_equal_p (expr1
->expr
, expr2
->expr
, 0);
845 /* Hash function for loop invariant expressions. */
848 htab_inv_expr_hash (const void *ent
)
850 const struct iv_inv_expr_ent
*expr
=
851 (const struct iv_inv_expr_ent
*)ent
;
855 /* Initializes data structures used by the iv optimization pass, stored
859 tree_ssa_iv_optimize_init (struct ivopts_data
*data
)
861 data
->version_info_size
= 2 * num_ssa_names
;
862 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
863 data
->relevant
= BITMAP_ALLOC (NULL
);
864 data
->important_candidates
= BITMAP_ALLOC (NULL
);
865 data
->max_inv_id
= 0;
867 data
->iv_uses
= VEC_alloc (iv_use_p
, heap
, 20);
868 data
->iv_candidates
= VEC_alloc (iv_cand_p
, heap
, 20);
869 data
->inv_expr_tab
= htab_create (10, htab_inv_expr_hash
,
870 htab_inv_expr_eq
, free
);
871 data
->inv_expr_id
= 0;
872 decl_rtl_to_reset
= VEC_alloc (tree
, heap
, 20);
875 /* Returns a memory object to that EXPR points. In case we are able to
876 determine that it does not point to any such object, NULL is returned. */
879 determine_base_object (tree expr
)
881 enum tree_code code
= TREE_CODE (expr
);
884 /* If this is a pointer casted to any type, we need to determine
885 the base object for the pointer; so handle conversions before
886 throwing away non-pointer expressions. */
887 if (CONVERT_EXPR_P (expr
))
888 return determine_base_object (TREE_OPERAND (expr
, 0));
890 if (!POINTER_TYPE_P (TREE_TYPE (expr
)))
899 obj
= TREE_OPERAND (expr
, 0);
900 base
= get_base_address (obj
);
905 if (TREE_CODE (base
) == MEM_REF
)
906 return determine_base_object (TREE_OPERAND (base
, 0));
908 return fold_convert (ptr_type_node
,
909 build_fold_addr_expr (base
));
911 case POINTER_PLUS_EXPR
:
912 return determine_base_object (TREE_OPERAND (expr
, 0));
916 /* Pointer addition is done solely using POINTER_PLUS_EXPR. */
920 return fold_convert (ptr_type_node
, expr
);
924 /* Allocates an induction variable with given initial value BASE and step STEP
928 alloc_iv (tree base
, tree step
)
930 struct iv
*iv
= XCNEW (struct iv
);
931 gcc_assert (step
!= NULL_TREE
);
934 iv
->base_object
= determine_base_object (base
);
937 iv
->have_use_for
= false;
939 iv
->ssa_name
= NULL_TREE
;
944 /* Sets STEP and BASE for induction variable IV. */
947 set_iv (struct ivopts_data
*data
, tree iv
, tree base
, tree step
)
949 struct version_info
*info
= name_info (data
, iv
);
951 gcc_assert (!info
->iv
);
953 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (iv
));
954 info
->iv
= alloc_iv (base
, step
);
955 info
->iv
->ssa_name
= iv
;
958 /* Finds induction variable declaration for VAR. */
961 get_iv (struct ivopts_data
*data
, tree var
)
964 tree type
= TREE_TYPE (var
);
966 if (!POINTER_TYPE_P (type
)
967 && !INTEGRAL_TYPE_P (type
))
970 if (!name_info (data
, var
)->iv
)
972 bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
975 || !flow_bb_inside_loop_p (data
->current_loop
, bb
))
976 set_iv (data
, var
, var
, build_int_cst (type
, 0));
979 return name_info (data
, var
)->iv
;
982 /* Determines the step of a biv defined in PHI. Returns NULL if PHI does
983 not define a simple affine biv with nonzero step. */
986 determine_biv_step (gimple phi
)
988 struct loop
*loop
= gimple_bb (phi
)->loop_father
;
989 tree name
= PHI_RESULT (phi
);
992 if (!is_gimple_reg (name
))
995 if (!simple_iv (loop
, loop
, name
, &iv
, true))
998 return integer_zerop (iv
.step
) ? NULL_TREE
: iv
.step
;
1001 /* Finds basic ivs. */
1004 find_bivs (struct ivopts_data
*data
)
1007 tree step
, type
, base
;
1009 struct loop
*loop
= data
->current_loop
;
1010 gimple_stmt_iterator psi
;
1012 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1014 phi
= gsi_stmt (psi
);
1016 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi
)))
1019 step
= determine_biv_step (phi
);
1023 base
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
1024 base
= expand_simple_operations (base
);
1025 if (contains_abnormal_ssa_name_p (base
)
1026 || contains_abnormal_ssa_name_p (step
))
1029 type
= TREE_TYPE (PHI_RESULT (phi
));
1030 base
= fold_convert (type
, base
);
1033 if (POINTER_TYPE_P (type
))
1034 step
= convert_to_ptrofftype (step
);
1036 step
= fold_convert (type
, step
);
1039 set_iv (data
, PHI_RESULT (phi
), base
, step
);
1046 /* Marks basic ivs. */
1049 mark_bivs (struct ivopts_data
*data
)
1053 struct iv
*iv
, *incr_iv
;
1054 struct loop
*loop
= data
->current_loop
;
1055 basic_block incr_bb
;
1056 gimple_stmt_iterator psi
;
1058 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1060 phi
= gsi_stmt (psi
);
1062 iv
= get_iv (data
, PHI_RESULT (phi
));
1066 var
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
1067 incr_iv
= get_iv (data
, var
);
1071 /* If the increment is in the subloop, ignore it. */
1072 incr_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
1073 if (incr_bb
->loop_father
!= data
->current_loop
1074 || (incr_bb
->flags
& BB_IRREDUCIBLE_LOOP
))
1078 incr_iv
->biv_p
= true;
1082 /* Checks whether STMT defines a linear induction variable and stores its
1083 parameters to IV. */
1086 find_givs_in_stmt_scev (struct ivopts_data
*data
, gimple stmt
, affine_iv
*iv
)
1089 struct loop
*loop
= data
->current_loop
;
1091 iv
->base
= NULL_TREE
;
1092 iv
->step
= NULL_TREE
;
1094 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1097 lhs
= gimple_assign_lhs (stmt
);
1098 if (TREE_CODE (lhs
) != SSA_NAME
)
1101 if (!simple_iv (loop
, loop_containing_stmt (stmt
), lhs
, iv
, true))
1103 iv
->base
= expand_simple_operations (iv
->base
);
1105 if (contains_abnormal_ssa_name_p (iv
->base
)
1106 || contains_abnormal_ssa_name_p (iv
->step
))
1109 /* If STMT could throw, then do not consider STMT as defining a GIV.
1110 While this will suppress optimizations, we can not safely delete this
1111 GIV and associated statements, even if it appears it is not used. */
1112 if (stmt_could_throw_p (stmt
))
1118 /* Finds general ivs in statement STMT. */
1121 find_givs_in_stmt (struct ivopts_data
*data
, gimple stmt
)
1125 if (!find_givs_in_stmt_scev (data
, stmt
, &iv
))
1128 set_iv (data
, gimple_assign_lhs (stmt
), iv
.base
, iv
.step
);
1131 /* Finds general ivs in basic block BB. */
1134 find_givs_in_bb (struct ivopts_data
*data
, basic_block bb
)
1136 gimple_stmt_iterator bsi
;
1138 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1139 find_givs_in_stmt (data
, gsi_stmt (bsi
));
1142 /* Finds general ivs. */
1145 find_givs (struct ivopts_data
*data
)
1147 struct loop
*loop
= data
->current_loop
;
1148 basic_block
*body
= get_loop_body_in_dom_order (loop
);
1151 for (i
= 0; i
< loop
->num_nodes
; i
++)
1152 find_givs_in_bb (data
, body
[i
]);
1156 /* For each ssa name defined in LOOP determines whether it is an induction
1157 variable and if so, its initial value and step. */
1160 find_induction_variables (struct ivopts_data
*data
)
1165 if (!find_bivs (data
))
1171 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1173 struct tree_niter_desc
*niter
= niter_for_single_dom_exit (data
);
1177 fprintf (dump_file
, " number of iterations ");
1178 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
1179 if (!integer_zerop (niter
->may_be_zero
))
1181 fprintf (dump_file
, "; zero if ");
1182 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
1184 fprintf (dump_file
, "\n\n");
1187 fprintf (dump_file
, "Induction variables:\n\n");
1189 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1191 if (ver_info (data
, i
)->iv
)
1192 dump_iv (dump_file
, ver_info (data
, i
)->iv
);
1199 /* Records a use of type USE_TYPE at *USE_P in STMT whose value is IV. */
1201 static struct iv_use
*
1202 record_use (struct ivopts_data
*data
, tree
*use_p
, struct iv
*iv
,
1203 gimple stmt
, enum use_type use_type
)
1205 struct iv_use
*use
= XCNEW (struct iv_use
);
1207 use
->id
= n_iv_uses (data
);
1208 use
->type
= use_type
;
1212 use
->related_cands
= BITMAP_ALLOC (NULL
);
1214 /* To avoid showing ssa name in the dumps, if it was not reset by the
1216 iv
->ssa_name
= NULL_TREE
;
1218 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1219 dump_use (dump_file
, use
);
1221 VEC_safe_push (iv_use_p
, heap
, data
->iv_uses
, use
);
1226 /* Checks whether OP is a loop-level invariant and if so, records it.
1227 NONLINEAR_USE is true if the invariant is used in a way we do not
1228 handle specially. */
1231 record_invariant (struct ivopts_data
*data
, tree op
, bool nonlinear_use
)
1234 struct version_info
*info
;
1236 if (TREE_CODE (op
) != SSA_NAME
1237 || !is_gimple_reg (op
))
1240 bb
= gimple_bb (SSA_NAME_DEF_STMT (op
));
1242 && flow_bb_inside_loop_p (data
->current_loop
, bb
))
1245 info
= name_info (data
, op
);
1247 info
->has_nonlin_use
|= nonlinear_use
;
1249 info
->inv_id
= ++data
->max_inv_id
;
1250 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (op
));
1253 /* Checks whether the use OP is interesting and if so, records it. */
1255 static struct iv_use
*
1256 find_interesting_uses_op (struct ivopts_data
*data
, tree op
)
1263 if (TREE_CODE (op
) != SSA_NAME
)
1266 iv
= get_iv (data
, op
);
1270 if (iv
->have_use_for
)
1272 use
= iv_use (data
, iv
->use_id
);
1274 gcc_assert (use
->type
== USE_NONLINEAR_EXPR
);
1278 if (integer_zerop (iv
->step
))
1280 record_invariant (data
, op
, true);
1283 iv
->have_use_for
= true;
1285 civ
= XNEW (struct iv
);
1288 stmt
= SSA_NAME_DEF_STMT (op
);
1289 gcc_assert (gimple_code (stmt
) == GIMPLE_PHI
1290 || is_gimple_assign (stmt
));
1292 use
= record_use (data
, NULL
, civ
, stmt
, USE_NONLINEAR_EXPR
);
1293 iv
->use_id
= use
->id
;
1298 /* Given a condition in statement STMT, checks whether it is a compare
1299 of an induction variable and an invariant. If this is the case,
1300 CONTROL_VAR is set to location of the iv, BOUND to the location of
1301 the invariant, IV_VAR and IV_BOUND are set to the corresponding
1302 induction variable descriptions, and true is returned. If this is not
1303 the case, CONTROL_VAR and BOUND are set to the arguments of the
1304 condition and false is returned. */
1307 extract_cond_operands (struct ivopts_data
*data
, gimple stmt
,
1308 tree
**control_var
, tree
**bound
,
1309 struct iv
**iv_var
, struct iv
**iv_bound
)
1311 /* The objects returned when COND has constant operands. */
1312 static struct iv const_iv
;
1314 tree
*op0
= &zero
, *op1
= &zero
, *tmp_op
;
1315 struct iv
*iv0
= &const_iv
, *iv1
= &const_iv
, *tmp_iv
;
1318 if (gimple_code (stmt
) == GIMPLE_COND
)
1320 op0
= gimple_cond_lhs_ptr (stmt
);
1321 op1
= gimple_cond_rhs_ptr (stmt
);
1325 op0
= gimple_assign_rhs1_ptr (stmt
);
1326 op1
= gimple_assign_rhs2_ptr (stmt
);
1329 zero
= integer_zero_node
;
1330 const_iv
.step
= integer_zero_node
;
1332 if (TREE_CODE (*op0
) == SSA_NAME
)
1333 iv0
= get_iv (data
, *op0
);
1334 if (TREE_CODE (*op1
) == SSA_NAME
)
1335 iv1
= get_iv (data
, *op1
);
1337 /* Exactly one of the compared values must be an iv, and the other one must
1342 if (integer_zerop (iv0
->step
))
1344 /* Control variable may be on the other side. */
1345 tmp_op
= op0
; op0
= op1
; op1
= tmp_op
;
1346 tmp_iv
= iv0
; iv0
= iv1
; iv1
= tmp_iv
;
1348 ret
= !integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
);
1352 *control_var
= op0
;;
1363 /* Checks whether the condition in STMT is interesting and if so,
1367 find_interesting_uses_cond (struct ivopts_data
*data
, gimple stmt
)
1369 tree
*var_p
, *bound_p
;
1370 struct iv
*var_iv
, *civ
;
1372 if (!extract_cond_operands (data
, stmt
, &var_p
, &bound_p
, &var_iv
, NULL
))
1374 find_interesting_uses_op (data
, *var_p
);
1375 find_interesting_uses_op (data
, *bound_p
);
1379 civ
= XNEW (struct iv
);
1381 record_use (data
, NULL
, civ
, stmt
, USE_COMPARE
);
1384 /* Returns true if expression EXPR is obviously invariant in LOOP,
1385 i.e. if all its operands are defined outside of the LOOP. LOOP
1386 should not be the function body. */
1389 expr_invariant_in_loop_p (struct loop
*loop
, tree expr
)
1394 gcc_assert (loop_depth (loop
) > 0);
1396 if (is_gimple_min_invariant (expr
))
1399 if (TREE_CODE (expr
) == SSA_NAME
)
1401 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1403 && flow_bb_inside_loop_p (loop
, def_bb
))
1412 len
= TREE_OPERAND_LENGTH (expr
);
1413 for (i
= 0; i
< len
; i
++)
1414 if (!expr_invariant_in_loop_p (loop
, TREE_OPERAND (expr
, i
)))
1420 /* Returns true if statement STMT is obviously invariant in LOOP,
1421 i.e. if all its operands on the RHS are defined outside of the LOOP.
1422 LOOP should not be the function body. */
1425 stmt_invariant_in_loop_p (struct loop
*loop
, gimple stmt
)
1430 gcc_assert (loop_depth (loop
) > 0);
1432 lhs
= gimple_get_lhs (stmt
);
1433 for (i
= 0; i
< gimple_num_ops (stmt
); i
++)
1435 tree op
= gimple_op (stmt
, i
);
1436 if (op
!= lhs
&& !expr_invariant_in_loop_p (loop
, op
))
1443 /* Cumulates the steps of indices into DATA and replaces their values with the
1444 initial ones. Returns false when the value of the index cannot be determined.
1445 Callback for for_each_index. */
1447 struct ifs_ivopts_data
1449 struct ivopts_data
*ivopts_data
;
1455 idx_find_step (tree base
, tree
*idx
, void *data
)
1457 struct ifs_ivopts_data
*dta
= (struct ifs_ivopts_data
*) data
;
1459 tree step
, iv_base
, iv_step
, lbound
, off
;
1460 struct loop
*loop
= dta
->ivopts_data
->current_loop
;
1462 /* If base is a component ref, require that the offset of the reference
1464 if (TREE_CODE (base
) == COMPONENT_REF
)
1466 off
= component_ref_field_offset (base
);
1467 return expr_invariant_in_loop_p (loop
, off
);
1470 /* If base is array, first check whether we will be able to move the
1471 reference out of the loop (in order to take its address in strength
1472 reduction). In order for this to work we need both lower bound
1473 and step to be loop invariants. */
1474 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1476 /* Moreover, for a range, the size needs to be invariant as well. */
1477 if (TREE_CODE (base
) == ARRAY_RANGE_REF
1478 && !expr_invariant_in_loop_p (loop
, TYPE_SIZE (TREE_TYPE (base
))))
1481 step
= array_ref_element_size (base
);
1482 lbound
= array_ref_low_bound (base
);
1484 if (!expr_invariant_in_loop_p (loop
, step
)
1485 || !expr_invariant_in_loop_p (loop
, lbound
))
1489 if (TREE_CODE (*idx
) != SSA_NAME
)
1492 iv
= get_iv (dta
->ivopts_data
, *idx
);
1496 /* XXX We produce for a base of *D42 with iv->base being &x[0]
1497 *&x[0], which is not folded and does not trigger the
1498 ARRAY_REF path below. */
1501 if (integer_zerop (iv
->step
))
1504 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1506 step
= array_ref_element_size (base
);
1508 /* We only handle addresses whose step is an integer constant. */
1509 if (TREE_CODE (step
) != INTEGER_CST
)
1513 /* The step for pointer arithmetics already is 1 byte. */
1514 step
= size_one_node
;
1518 if (!convert_affine_scev (dta
->ivopts_data
->current_loop
,
1519 sizetype
, &iv_base
, &iv_step
, dta
->stmt
,
1522 /* The index might wrap. */
1526 step
= fold_build2 (MULT_EXPR
, sizetype
, step
, iv_step
);
1527 dta
->step
= fold_build2 (PLUS_EXPR
, sizetype
, dta
->step
, step
);
1532 /* Records use in index IDX. Callback for for_each_index. Ivopts data
1533 object is passed to it in DATA. */
1536 idx_record_use (tree base
, tree
*idx
,
1539 struct ivopts_data
*data
= (struct ivopts_data
*) vdata
;
1540 find_interesting_uses_op (data
, *idx
);
1541 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1543 find_interesting_uses_op (data
, array_ref_element_size (base
));
1544 find_interesting_uses_op (data
, array_ref_low_bound (base
));
1549 /* If we can prove that TOP = cst * BOT for some constant cst,
1550 store cst to MUL and return true. Otherwise return false.
1551 The returned value is always sign-extended, regardless of the
1552 signedness of TOP and BOT. */
1555 constant_multiple_of (tree top
, tree bot
, double_int
*mul
)
1558 enum tree_code code
;
1559 double_int res
, p0
, p1
;
1560 unsigned precision
= TYPE_PRECISION (TREE_TYPE (top
));
1565 if (operand_equal_p (top
, bot
, 0))
1567 *mul
= double_int_one
;
1571 code
= TREE_CODE (top
);
1575 mby
= TREE_OPERAND (top
, 1);
1576 if (TREE_CODE (mby
) != INTEGER_CST
)
1579 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &res
))
1582 *mul
= double_int_sext (double_int_mul (res
, tree_to_double_int (mby
)),
1588 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &p0
)
1589 || !constant_multiple_of (TREE_OPERAND (top
, 1), bot
, &p1
))
1592 if (code
== MINUS_EXPR
)
1593 p1
= double_int_neg (p1
);
1594 *mul
= double_int_sext (double_int_add (p0
, p1
), precision
);
1598 if (TREE_CODE (bot
) != INTEGER_CST
)
1601 p0
= double_int_sext (tree_to_double_int (top
), precision
);
1602 p1
= double_int_sext (tree_to_double_int (bot
), precision
);
1603 if (double_int_zero_p (p1
))
1605 *mul
= double_int_sext (double_int_sdivmod (p0
, p1
, FLOOR_DIV_EXPR
, &res
),
1607 return double_int_zero_p (res
);
1614 /* Returns true if memory reference REF with step STEP may be unaligned. */
1617 may_be_unaligned_p (tree ref
, tree step
)
1621 HOST_WIDE_INT bitsize
;
1622 HOST_WIDE_INT bitpos
;
1624 enum machine_mode mode
;
1625 int unsignedp
, volatilep
;
1626 unsigned base_align
;
1628 /* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
1629 thus they are not misaligned. */
1630 if (TREE_CODE (ref
) == TARGET_MEM_REF
)
1633 /* The test below is basically copy of what expr.c:normal_inner_ref
1634 does to check whether the object must be loaded by parts when
1635 STRICT_ALIGNMENT is true. */
1636 base
= get_inner_reference (ref
, &bitsize
, &bitpos
, &toffset
, &mode
,
1637 &unsignedp
, &volatilep
, true);
1638 base_type
= TREE_TYPE (base
);
1639 base_align
= get_object_alignment (base
);
1640 base_align
= MAX (base_align
, TYPE_ALIGN (base_type
));
1642 if (mode
!= BLKmode
)
1644 unsigned mode_align
= GET_MODE_ALIGNMENT (mode
);
1646 if (base_align
< mode_align
1647 || (bitpos
% mode_align
) != 0
1648 || (bitpos
% BITS_PER_UNIT
) != 0)
1652 && (highest_pow2_factor (toffset
) * BITS_PER_UNIT
) < mode_align
)
1655 if ((highest_pow2_factor (step
) * BITS_PER_UNIT
) < mode_align
)
1662 /* Return true if EXPR may be non-addressable. */
1665 may_be_nonaddressable_p (tree expr
)
1667 switch (TREE_CODE (expr
))
1669 case TARGET_MEM_REF
:
1670 /* TARGET_MEM_REFs are translated directly to valid MEMs on the
1671 target, thus they are always addressable. */
1675 return DECL_NONADDRESSABLE_P (TREE_OPERAND (expr
, 1))
1676 || may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
1678 case VIEW_CONVERT_EXPR
:
1679 /* This kind of view-conversions may wrap non-addressable objects
1680 and make them look addressable. After some processing the
1681 non-addressability may be uncovered again, causing ADDR_EXPRs
1682 of inappropriate objects to be built. */
1683 if (is_gimple_reg (TREE_OPERAND (expr
, 0))
1684 || !is_gimple_addressable (TREE_OPERAND (expr
, 0)))
1687 /* ... fall through ... */
1690 case ARRAY_RANGE_REF
:
1691 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
1703 /* Finds addresses in *OP_P inside STMT. */
1706 find_interesting_uses_address (struct ivopts_data
*data
, gimple stmt
, tree
*op_p
)
1708 tree base
= *op_p
, step
= size_zero_node
;
1710 struct ifs_ivopts_data ifs_ivopts_data
;
1712 /* Do not play with volatile memory references. A bit too conservative,
1713 perhaps, but safe. */
1714 if (gimple_has_volatile_ops (stmt
))
1717 /* Ignore bitfields for now. Not really something terribly complicated
1719 if (TREE_CODE (base
) == BIT_FIELD_REF
)
1722 base
= unshare_expr (base
);
1724 if (TREE_CODE (base
) == TARGET_MEM_REF
)
1726 tree type
= build_pointer_type (TREE_TYPE (base
));
1730 && TREE_CODE (TMR_BASE (base
)) == SSA_NAME
)
1732 civ
= get_iv (data
, TMR_BASE (base
));
1736 TMR_BASE (base
) = civ
->base
;
1739 if (TMR_INDEX2 (base
)
1740 && TREE_CODE (TMR_INDEX2 (base
)) == SSA_NAME
)
1742 civ
= get_iv (data
, TMR_INDEX2 (base
));
1746 TMR_INDEX2 (base
) = civ
->base
;
1749 if (TMR_INDEX (base
)
1750 && TREE_CODE (TMR_INDEX (base
)) == SSA_NAME
)
1752 civ
= get_iv (data
, TMR_INDEX (base
));
1756 TMR_INDEX (base
) = civ
->base
;
1761 if (TMR_STEP (base
))
1762 astep
= fold_build2 (MULT_EXPR
, type
, TMR_STEP (base
), astep
);
1764 step
= fold_build2 (PLUS_EXPR
, type
, step
, astep
);
1768 if (integer_zerop (step
))
1770 base
= tree_mem_ref_addr (type
, base
);
1774 ifs_ivopts_data
.ivopts_data
= data
;
1775 ifs_ivopts_data
.stmt
= stmt
;
1776 ifs_ivopts_data
.step
= size_zero_node
;
1777 if (!for_each_index (&base
, idx_find_step
, &ifs_ivopts_data
)
1778 || integer_zerop (ifs_ivopts_data
.step
))
1780 step
= ifs_ivopts_data
.step
;
1782 /* Check that the base expression is addressable. This needs
1783 to be done after substituting bases of IVs into it. */
1784 if (may_be_nonaddressable_p (base
))
1787 /* Moreover, on strict alignment platforms, check that it is
1788 sufficiently aligned. */
1789 if (STRICT_ALIGNMENT
&& may_be_unaligned_p (base
, step
))
1792 base
= build_fold_addr_expr (base
);
1794 /* Substituting bases of IVs into the base expression might
1795 have caused folding opportunities. */
1796 if (TREE_CODE (base
) == ADDR_EXPR
)
1798 tree
*ref
= &TREE_OPERAND (base
, 0);
1799 while (handled_component_p (*ref
))
1800 ref
= &TREE_OPERAND (*ref
, 0);
1801 if (TREE_CODE (*ref
) == MEM_REF
)
1803 tree tem
= fold_binary (MEM_REF
, TREE_TYPE (*ref
),
1804 TREE_OPERAND (*ref
, 0),
1805 TREE_OPERAND (*ref
, 1));
1812 civ
= alloc_iv (base
, step
);
1813 record_use (data
, op_p
, civ
, stmt
, USE_ADDRESS
);
1817 for_each_index (op_p
, idx_record_use
, data
);
1820 /* Finds and records invariants used in STMT. */
1823 find_invariants_stmt (struct ivopts_data
*data
, gimple stmt
)
1826 use_operand_p use_p
;
1829 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
1831 op
= USE_FROM_PTR (use_p
);
1832 record_invariant (data
, op
, false);
1836 /* Finds interesting uses of induction variables in the statement STMT. */
1839 find_interesting_uses_stmt (struct ivopts_data
*data
, gimple stmt
)
1842 tree op
, *lhs
, *rhs
;
1844 use_operand_p use_p
;
1845 enum tree_code code
;
1847 find_invariants_stmt (data
, stmt
);
1849 if (gimple_code (stmt
) == GIMPLE_COND
)
1851 find_interesting_uses_cond (data
, stmt
);
1855 if (is_gimple_assign (stmt
))
1857 lhs
= gimple_assign_lhs_ptr (stmt
);
1858 rhs
= gimple_assign_rhs1_ptr (stmt
);
1860 if (TREE_CODE (*lhs
) == SSA_NAME
)
1862 /* If the statement defines an induction variable, the uses are not
1863 interesting by themselves. */
1865 iv
= get_iv (data
, *lhs
);
1867 if (iv
&& !integer_zerop (iv
->step
))
1871 code
= gimple_assign_rhs_code (stmt
);
1872 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
1873 && (REFERENCE_CLASS_P (*rhs
)
1874 || is_gimple_val (*rhs
)))
1876 if (REFERENCE_CLASS_P (*rhs
))
1877 find_interesting_uses_address (data
, stmt
, rhs
);
1879 find_interesting_uses_op (data
, *rhs
);
1881 if (REFERENCE_CLASS_P (*lhs
))
1882 find_interesting_uses_address (data
, stmt
, lhs
);
1885 else if (TREE_CODE_CLASS (code
) == tcc_comparison
)
1887 find_interesting_uses_cond (data
, stmt
);
1891 /* TODO -- we should also handle address uses of type
1893 memory = call (whatever);
1900 if (gimple_code (stmt
) == GIMPLE_PHI
1901 && gimple_bb (stmt
) == data
->current_loop
->header
)
1903 iv
= get_iv (data
, PHI_RESULT (stmt
));
1905 if (iv
&& !integer_zerop (iv
->step
))
1909 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
1911 op
= USE_FROM_PTR (use_p
);
1913 if (TREE_CODE (op
) != SSA_NAME
)
1916 iv
= get_iv (data
, op
);
1920 find_interesting_uses_op (data
, op
);
1924 /* Finds interesting uses of induction variables outside of loops
1925 on loop exit edge EXIT. */
1928 find_interesting_uses_outside (struct ivopts_data
*data
, edge exit
)
1931 gimple_stmt_iterator psi
;
1934 for (psi
= gsi_start_phis (exit
->dest
); !gsi_end_p (psi
); gsi_next (&psi
))
1936 phi
= gsi_stmt (psi
);
1937 def
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
1938 if (is_gimple_reg (def
))
1939 find_interesting_uses_op (data
, def
);
1943 /* Finds uses of the induction variables that are interesting. */
1946 find_interesting_uses (struct ivopts_data
*data
)
1949 gimple_stmt_iterator bsi
;
1950 basic_block
*body
= get_loop_body (data
->current_loop
);
1952 struct version_info
*info
;
1955 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1956 fprintf (dump_file
, "Uses:\n\n");
1958 for (i
= 0; i
< data
->current_loop
->num_nodes
; i
++)
1963 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1964 if (e
->dest
!= EXIT_BLOCK_PTR
1965 && !flow_bb_inside_loop_p (data
->current_loop
, e
->dest
))
1966 find_interesting_uses_outside (data
, e
);
1968 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1969 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
1970 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1971 if (!is_gimple_debug (gsi_stmt (bsi
)))
1972 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
1975 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1979 fprintf (dump_file
, "\n");
1981 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1983 info
= ver_info (data
, i
);
1986 fprintf (dump_file
, " ");
1987 print_generic_expr (dump_file
, info
->name
, TDF_SLIM
);
1988 fprintf (dump_file
, " is invariant (%d)%s\n",
1989 info
->inv_id
, info
->has_nonlin_use
? "" : ", eliminable");
1993 fprintf (dump_file
, "\n");
1999 /* Strips constant offsets from EXPR and stores them to OFFSET. If INSIDE_ADDR
2000 is true, assume we are inside an address. If TOP_COMPREF is true, assume
2001 we are at the top-level of the processed address. */
2004 strip_offset_1 (tree expr
, bool inside_addr
, bool top_compref
,
2005 unsigned HOST_WIDE_INT
*offset
)
2007 tree op0
= NULL_TREE
, op1
= NULL_TREE
, tmp
, step
;
2008 enum tree_code code
;
2009 tree type
, orig_type
= TREE_TYPE (expr
);
2010 unsigned HOST_WIDE_INT off0
, off1
, st
;
2011 tree orig_expr
= expr
;
2015 type
= TREE_TYPE (expr
);
2016 code
= TREE_CODE (expr
);
2022 if (!cst_and_fits_in_hwi (expr
)
2023 || integer_zerop (expr
))
2026 *offset
= int_cst_value (expr
);
2027 return build_int_cst (orig_type
, 0);
2029 case POINTER_PLUS_EXPR
:
2032 op0
= TREE_OPERAND (expr
, 0);
2033 op1
= TREE_OPERAND (expr
, 1);
2035 op0
= strip_offset_1 (op0
, false, false, &off0
);
2036 op1
= strip_offset_1 (op1
, false, false, &off1
);
2038 *offset
= (code
== MINUS_EXPR
? off0
- off1
: off0
+ off1
);
2039 if (op0
== TREE_OPERAND (expr
, 0)
2040 && op1
== TREE_OPERAND (expr
, 1))
2043 if (integer_zerop (op1
))
2045 else if (integer_zerop (op0
))
2047 if (code
== MINUS_EXPR
)
2048 expr
= fold_build1 (NEGATE_EXPR
, type
, op1
);
2053 expr
= fold_build2 (code
, type
, op0
, op1
);
2055 return fold_convert (orig_type
, expr
);
2058 op1
= TREE_OPERAND (expr
, 1);
2059 if (!cst_and_fits_in_hwi (op1
))
2062 op0
= TREE_OPERAND (expr
, 0);
2063 op0
= strip_offset_1 (op0
, false, false, &off0
);
2064 if (op0
== TREE_OPERAND (expr
, 0))
2067 *offset
= off0
* int_cst_value (op1
);
2068 if (integer_zerop (op0
))
2071 expr
= fold_build2 (MULT_EXPR
, type
, op0
, op1
);
2073 return fold_convert (orig_type
, expr
);
2076 case ARRAY_RANGE_REF
:
2080 step
= array_ref_element_size (expr
);
2081 if (!cst_and_fits_in_hwi (step
))
2084 st
= int_cst_value (step
);
2085 op1
= TREE_OPERAND (expr
, 1);
2086 op1
= strip_offset_1 (op1
, false, false, &off1
);
2087 *offset
= off1
* st
;
2090 && integer_zerop (op1
))
2092 /* Strip the component reference completely. */
2093 op0
= TREE_OPERAND (expr
, 0);
2094 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2104 tmp
= component_ref_field_offset (expr
);
2106 && cst_and_fits_in_hwi (tmp
))
2108 /* Strip the component reference completely. */
2109 op0
= TREE_OPERAND (expr
, 0);
2110 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2111 *offset
= off0
+ int_cst_value (tmp
);
2117 op0
= TREE_OPERAND (expr
, 0);
2118 op0
= strip_offset_1 (op0
, true, true, &off0
);
2121 if (op0
== TREE_OPERAND (expr
, 0))
2124 expr
= build_fold_addr_expr (op0
);
2125 return fold_convert (orig_type
, expr
);
2128 /* ??? Offset operand? */
2129 inside_addr
= false;
2136 /* Default handling of expressions for that we want to recurse into
2137 the first operand. */
2138 op0
= TREE_OPERAND (expr
, 0);
2139 op0
= strip_offset_1 (op0
, inside_addr
, false, &off0
);
2142 if (op0
== TREE_OPERAND (expr
, 0)
2143 && (!op1
|| op1
== TREE_OPERAND (expr
, 1)))
2146 expr
= copy_node (expr
);
2147 TREE_OPERAND (expr
, 0) = op0
;
2149 TREE_OPERAND (expr
, 1) = op1
;
2151 /* Inside address, we might strip the top level component references,
2152 thus changing type of the expression. Handling of ADDR_EXPR
2154 expr
= fold_convert (orig_type
, expr
);
2159 /* Strips constant offsets from EXPR and stores them to OFFSET. */
2162 strip_offset (tree expr
, unsigned HOST_WIDE_INT
*offset
)
2164 return strip_offset_1 (expr
, false, false, offset
);
2167 /* Returns variant of TYPE that can be used as base for different uses.
2168 We return unsigned type with the same precision, which avoids problems
2172 generic_type_for (tree type
)
2174 if (POINTER_TYPE_P (type
))
2175 return unsigned_type_for (type
);
2177 if (TYPE_UNSIGNED (type
))
2180 return unsigned_type_for (type
);
2183 /* Records invariants in *EXPR_P. Callback for walk_tree. DATA contains
2184 the bitmap to that we should store it. */
2186 static struct ivopts_data
*fd_ivopts_data
;
2188 find_depends (tree
*expr_p
, int *ws ATTRIBUTE_UNUSED
, void *data
)
2190 bitmap
*depends_on
= (bitmap
*) data
;
2191 struct version_info
*info
;
2193 if (TREE_CODE (*expr_p
) != SSA_NAME
)
2195 info
= name_info (fd_ivopts_data
, *expr_p
);
2197 if (!info
->inv_id
|| info
->has_nonlin_use
)
2201 *depends_on
= BITMAP_ALLOC (NULL
);
2202 bitmap_set_bit (*depends_on
, info
->inv_id
);
2207 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2208 position to POS. If USE is not NULL, the candidate is set as related to
2209 it. If both BASE and STEP are NULL, we add a pseudocandidate for the
2210 replacement of the final value of the iv by a direct computation. */
2212 static struct iv_cand
*
2213 add_candidate_1 (struct ivopts_data
*data
,
2214 tree base
, tree step
, bool important
, enum iv_position pos
,
2215 struct iv_use
*use
, gimple incremented_at
)
2218 struct iv_cand
*cand
= NULL
;
2219 tree type
, orig_type
;
2221 /* For non-original variables, make sure their values are computed in a type
2222 that does not invoke undefined behavior on overflows (since in general,
2223 we cannot prove that these induction variables are non-wrapping). */
2224 if (pos
!= IP_ORIGINAL
)
2226 orig_type
= TREE_TYPE (base
);
2227 type
= generic_type_for (orig_type
);
2228 if (type
!= orig_type
)
2230 base
= fold_convert (type
, base
);
2231 step
= fold_convert (type
, step
);
2235 for (i
= 0; i
< n_iv_cands (data
); i
++)
2237 cand
= iv_cand (data
, i
);
2239 if (cand
->pos
!= pos
)
2242 if (cand
->incremented_at
!= incremented_at
2243 || ((pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
2244 && cand
->ainc_use
!= use
))
2258 if (operand_equal_p (base
, cand
->iv
->base
, 0)
2259 && operand_equal_p (step
, cand
->iv
->step
, 0)
2260 && (TYPE_PRECISION (TREE_TYPE (base
))
2261 == TYPE_PRECISION (TREE_TYPE (cand
->iv
->base
))))
2265 if (i
== n_iv_cands (data
))
2267 cand
= XCNEW (struct iv_cand
);
2273 cand
->iv
= alloc_iv (base
, step
);
2276 if (pos
!= IP_ORIGINAL
&& cand
->iv
)
2278 cand
->var_before
= create_tmp_var_raw (TREE_TYPE (base
), "ivtmp");
2279 cand
->var_after
= cand
->var_before
;
2281 cand
->important
= important
;
2282 cand
->incremented_at
= incremented_at
;
2283 VEC_safe_push (iv_cand_p
, heap
, data
->iv_candidates
, cand
);
2286 && TREE_CODE (step
) != INTEGER_CST
)
2288 fd_ivopts_data
= data
;
2289 walk_tree (&step
, find_depends
, &cand
->depends_on
, NULL
);
2292 if (pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
2293 cand
->ainc_use
= use
;
2295 cand
->ainc_use
= NULL
;
2297 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2298 dump_cand (dump_file
, cand
);
2301 if (important
&& !cand
->important
)
2303 cand
->important
= true;
2304 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2305 fprintf (dump_file
, "Candidate %d is important\n", cand
->id
);
2310 bitmap_set_bit (use
->related_cands
, i
);
2311 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2312 fprintf (dump_file
, "Candidate %d is related to use %d\n",
2319 /* Returns true if incrementing the induction variable at the end of the LOOP
2322 The purpose is to avoid splitting latch edge with a biv increment, thus
2323 creating a jump, possibly confusing other optimization passes and leaving
2324 less freedom to scheduler. So we allow IP_END_POS only if IP_NORMAL_POS
2325 is not available (so we do not have a better alternative), or if the latch
2326 edge is already nonempty. */
2329 allow_ip_end_pos_p (struct loop
*loop
)
2331 if (!ip_normal_pos (loop
))
2334 if (!empty_block_p (ip_end_pos (loop
)))
2340 /* If possible, adds autoincrement candidates BASE + STEP * i based on use USE.
2341 Important field is set to IMPORTANT. */
2344 add_autoinc_candidates (struct ivopts_data
*data
, tree base
, tree step
,
2345 bool important
, struct iv_use
*use
)
2347 basic_block use_bb
= gimple_bb (use
->stmt
);
2348 enum machine_mode mem_mode
;
2349 unsigned HOST_WIDE_INT cstepi
;
2351 /* If we insert the increment in any position other than the standard
2352 ones, we must ensure that it is incremented once per iteration.
2353 It must not be in an inner nested loop, or one side of an if
2355 if (use_bb
->loop_father
!= data
->current_loop
2356 || !dominated_by_p (CDI_DOMINATORS
, data
->current_loop
->latch
, use_bb
)
2357 || stmt_could_throw_p (use
->stmt
)
2358 || !cst_and_fits_in_hwi (step
))
2361 cstepi
= int_cst_value (step
);
2363 mem_mode
= TYPE_MODE (TREE_TYPE (*use
->op_p
));
2364 if ((HAVE_PRE_INCREMENT
&& GET_MODE_SIZE (mem_mode
) == cstepi
)
2365 || (HAVE_PRE_DECREMENT
&& GET_MODE_SIZE (mem_mode
) == -cstepi
))
2367 enum tree_code code
= MINUS_EXPR
;
2369 tree new_step
= step
;
2371 if (POINTER_TYPE_P (TREE_TYPE (base
)))
2373 new_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (step
), step
);
2374 code
= POINTER_PLUS_EXPR
;
2377 new_step
= fold_convert (TREE_TYPE (base
), new_step
);
2378 new_base
= fold_build2 (code
, TREE_TYPE (base
), base
, new_step
);
2379 add_candidate_1 (data
, new_base
, step
, important
, IP_BEFORE_USE
, use
,
2382 if ((HAVE_POST_INCREMENT
&& GET_MODE_SIZE (mem_mode
) == cstepi
)
2383 || (HAVE_POST_DECREMENT
&& GET_MODE_SIZE (mem_mode
) == -cstepi
))
2385 add_candidate_1 (data
, base
, step
, important
, IP_AFTER_USE
, use
,
2390 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2391 position to POS. If USE is not NULL, the candidate is set as related to
2392 it. The candidate computation is scheduled on all available positions. */
2395 add_candidate (struct ivopts_data
*data
,
2396 tree base
, tree step
, bool important
, struct iv_use
*use
)
2398 if (ip_normal_pos (data
->current_loop
))
2399 add_candidate_1 (data
, base
, step
, important
, IP_NORMAL
, use
, NULL
);
2400 if (ip_end_pos (data
->current_loop
)
2401 && allow_ip_end_pos_p (data
->current_loop
))
2402 add_candidate_1 (data
, base
, step
, important
, IP_END
, use
, NULL
);
2404 if (use
!= NULL
&& use
->type
== USE_ADDRESS
)
2405 add_autoinc_candidates (data
, base
, step
, important
, use
);
2408 /* Add a standard "0 + 1 * iteration" iv candidate for a
2409 type with SIZE bits. */
2412 add_standard_iv_candidates_for_size (struct ivopts_data
*data
,
2415 tree type
= lang_hooks
.types
.type_for_size (size
, true);
2416 add_candidate (data
, build_int_cst (type
, 0), build_int_cst (type
, 1),
2420 /* Adds standard iv candidates. */
2423 add_standard_iv_candidates (struct ivopts_data
*data
)
2425 add_standard_iv_candidates_for_size (data
, INT_TYPE_SIZE
);
2427 /* The same for a double-integer type if it is still fast enough. */
2428 if (BITS_PER_WORD
>= INT_TYPE_SIZE
* 2)
2429 add_standard_iv_candidates_for_size (data
, INT_TYPE_SIZE
* 2);
2433 /* Adds candidates bases on the old induction variable IV. */
2436 add_old_iv_candidates (struct ivopts_data
*data
, struct iv
*iv
)
2440 struct iv_cand
*cand
;
2442 add_candidate (data
, iv
->base
, iv
->step
, true, NULL
);
2444 /* The same, but with initial value zero. */
2445 if (POINTER_TYPE_P (TREE_TYPE (iv
->base
)))
2446 add_candidate (data
, size_int (0), iv
->step
, true, NULL
);
2448 add_candidate (data
, build_int_cst (TREE_TYPE (iv
->base
), 0),
2449 iv
->step
, true, NULL
);
2451 phi
= SSA_NAME_DEF_STMT (iv
->ssa_name
);
2452 if (gimple_code (phi
) == GIMPLE_PHI
)
2454 /* Additionally record the possibility of leaving the original iv
2456 def
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (data
->current_loop
));
2457 cand
= add_candidate_1 (data
,
2458 iv
->base
, iv
->step
, true, IP_ORIGINAL
, NULL
,
2459 SSA_NAME_DEF_STMT (def
));
2460 cand
->var_before
= iv
->ssa_name
;
2461 cand
->var_after
= def
;
2465 /* Adds candidates based on the old induction variables. */
2468 add_old_ivs_candidates (struct ivopts_data
*data
)
2474 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
2476 iv
= ver_info (data
, i
)->iv
;
2477 if (iv
&& iv
->biv_p
&& !integer_zerop (iv
->step
))
2478 add_old_iv_candidates (data
, iv
);
2482 /* Adds candidates based on the value of the induction variable IV and USE. */
2485 add_iv_value_candidates (struct ivopts_data
*data
,
2486 struct iv
*iv
, struct iv_use
*use
)
2488 unsigned HOST_WIDE_INT offset
;
2492 add_candidate (data
, iv
->base
, iv
->step
, false, use
);
2494 /* The same, but with initial value zero. Make such variable important,
2495 since it is generic enough so that possibly many uses may be based
2497 basetype
= TREE_TYPE (iv
->base
);
2498 if (POINTER_TYPE_P (basetype
))
2499 basetype
= sizetype
;
2500 add_candidate (data
, build_int_cst (basetype
, 0),
2501 iv
->step
, true, use
);
2503 /* Third, try removing the constant offset. Make sure to even
2504 add a candidate for &a[0] vs. (T *)&a. */
2505 base
= strip_offset (iv
->base
, &offset
);
2507 || base
!= iv
->base
)
2508 add_candidate (data
, base
, iv
->step
, false, use
);
2511 /* Adds candidates based on the uses. */
2514 add_derived_ivs_candidates (struct ivopts_data
*data
)
2518 for (i
= 0; i
< n_iv_uses (data
); i
++)
2520 struct iv_use
*use
= iv_use (data
, i
);
2527 case USE_NONLINEAR_EXPR
:
2530 /* Just add the ivs based on the value of the iv used here. */
2531 add_iv_value_candidates (data
, use
->iv
, use
);
2540 /* Record important candidates and add them to related_cands bitmaps
2544 record_important_candidates (struct ivopts_data
*data
)
2549 for (i
= 0; i
< n_iv_cands (data
); i
++)
2551 struct iv_cand
*cand
= iv_cand (data
, i
);
2553 if (cand
->important
)
2554 bitmap_set_bit (data
->important_candidates
, i
);
2557 data
->consider_all_candidates
= (n_iv_cands (data
)
2558 <= CONSIDER_ALL_CANDIDATES_BOUND
);
2560 if (data
->consider_all_candidates
)
2562 /* We will not need "related_cands" bitmaps in this case,
2563 so release them to decrease peak memory consumption. */
2564 for (i
= 0; i
< n_iv_uses (data
); i
++)
2566 use
= iv_use (data
, i
);
2567 BITMAP_FREE (use
->related_cands
);
2572 /* Add important candidates to the related_cands bitmaps. */
2573 for (i
= 0; i
< n_iv_uses (data
); i
++)
2574 bitmap_ior_into (iv_use (data
, i
)->related_cands
,
2575 data
->important_candidates
);
2579 /* Allocates the data structure mapping the (use, candidate) pairs to costs.
2580 If consider_all_candidates is true, we use a two-dimensional array, otherwise
2581 we allocate a simple list to every use. */
2584 alloc_use_cost_map (struct ivopts_data
*data
)
2586 unsigned i
, size
, s
, j
;
2588 for (i
= 0; i
< n_iv_uses (data
); i
++)
2590 struct iv_use
*use
= iv_use (data
, i
);
2593 if (data
->consider_all_candidates
)
2594 size
= n_iv_cands (data
);
2598 EXECUTE_IF_SET_IN_BITMAP (use
->related_cands
, 0, j
, bi
)
2603 /* Round up to the power of two, so that moduling by it is fast. */
2604 for (size
= 1; size
< s
; size
<<= 1)
2608 use
->n_map_members
= size
;
2609 use
->cost_map
= XCNEWVEC (struct cost_pair
, size
);
2613 /* Returns description of computation cost of expression whose runtime
2614 cost is RUNTIME and complexity corresponds to COMPLEXITY. */
2617 new_cost (unsigned runtime
, unsigned complexity
)
2621 cost
.cost
= runtime
;
2622 cost
.complexity
= complexity
;
2627 /* Adds costs COST1 and COST2. */
2630 add_costs (comp_cost cost1
, comp_cost cost2
)
2632 cost1
.cost
+= cost2
.cost
;
2633 cost1
.complexity
+= cost2
.complexity
;
2637 /* Subtracts costs COST1 and COST2. */
2640 sub_costs (comp_cost cost1
, comp_cost cost2
)
2642 cost1
.cost
-= cost2
.cost
;
2643 cost1
.complexity
-= cost2
.complexity
;
2648 /* Returns a negative number if COST1 < COST2, a positive number if
2649 COST1 > COST2, and 0 if COST1 = COST2. */
2652 compare_costs (comp_cost cost1
, comp_cost cost2
)
2654 if (cost1
.cost
== cost2
.cost
)
2655 return cost1
.complexity
- cost2
.complexity
;
2657 return cost1
.cost
- cost2
.cost
;
2660 /* Returns true if COST is infinite. */
2663 infinite_cost_p (comp_cost cost
)
2665 return cost
.cost
== INFTY
;
2668 /* Sets cost of (USE, CANDIDATE) pair to COST and record that it depends
2669 on invariants DEPENDS_ON and that the value used in expressing it
2670 is VALUE, and in case of iv elimination the comparison operator is COMP. */
2673 set_use_iv_cost (struct ivopts_data
*data
,
2674 struct iv_use
*use
, struct iv_cand
*cand
,
2675 comp_cost cost
, bitmap depends_on
, tree value
,
2676 enum tree_code comp
, int inv_expr_id
)
2680 if (infinite_cost_p (cost
))
2682 BITMAP_FREE (depends_on
);
2686 if (data
->consider_all_candidates
)
2688 use
->cost_map
[cand
->id
].cand
= cand
;
2689 use
->cost_map
[cand
->id
].cost
= cost
;
2690 use
->cost_map
[cand
->id
].depends_on
= depends_on
;
2691 use
->cost_map
[cand
->id
].value
= value
;
2692 use
->cost_map
[cand
->id
].comp
= comp
;
2693 use
->cost_map
[cand
->id
].inv_expr_id
= inv_expr_id
;
2697 /* n_map_members is a power of two, so this computes modulo. */
2698 s
= cand
->id
& (use
->n_map_members
- 1);
2699 for (i
= s
; i
< use
->n_map_members
; i
++)
2700 if (!use
->cost_map
[i
].cand
)
2702 for (i
= 0; i
< s
; i
++)
2703 if (!use
->cost_map
[i
].cand
)
2709 use
->cost_map
[i
].cand
= cand
;
2710 use
->cost_map
[i
].cost
= cost
;
2711 use
->cost_map
[i
].depends_on
= depends_on
;
2712 use
->cost_map
[i
].value
= value
;
2713 use
->cost_map
[i
].comp
= comp
;
2714 use
->cost_map
[i
].inv_expr_id
= inv_expr_id
;
2717 /* Gets cost of (USE, CANDIDATE) pair. */
2719 static struct cost_pair
*
2720 get_use_iv_cost (struct ivopts_data
*data
, struct iv_use
*use
,
2721 struct iv_cand
*cand
)
2724 struct cost_pair
*ret
;
2729 if (data
->consider_all_candidates
)
2731 ret
= use
->cost_map
+ cand
->id
;
2738 /* n_map_members is a power of two, so this computes modulo. */
2739 s
= cand
->id
& (use
->n_map_members
- 1);
2740 for (i
= s
; i
< use
->n_map_members
; i
++)
2741 if (use
->cost_map
[i
].cand
== cand
)
2742 return use
->cost_map
+ i
;
2744 for (i
= 0; i
< s
; i
++)
2745 if (use
->cost_map
[i
].cand
== cand
)
2746 return use
->cost_map
+ i
;
2751 /* Returns estimate on cost of computing SEQ. */
2754 seq_cost (rtx seq
, bool speed
)
2759 for (; seq
; seq
= NEXT_INSN (seq
))
2761 set
= single_set (seq
);
2763 cost
+= set_src_cost (SET_SRC (set
), speed
);
2771 /* Produce DECL_RTL for object obj so it looks like it is stored in memory. */
2773 produce_memory_decl_rtl (tree obj
, int *regno
)
2775 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (obj
));
2776 enum machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
2780 if (TREE_STATIC (obj
) || DECL_EXTERNAL (obj
))
2782 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (obj
));
2783 x
= gen_rtx_SYMBOL_REF (address_mode
, name
);
2784 SET_SYMBOL_REF_DECL (x
, obj
);
2785 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
2786 set_mem_addr_space (x
, as
);
2787 targetm
.encode_section_info (obj
, x
, true);
2791 x
= gen_raw_REG (address_mode
, (*regno
)++);
2792 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
2793 set_mem_addr_space (x
, as
);
2799 /* Prepares decl_rtl for variables referred in *EXPR_P. Callback for
2800 walk_tree. DATA contains the actual fake register number. */
2803 prepare_decl_rtl (tree
*expr_p
, int *ws
, void *data
)
2805 tree obj
= NULL_TREE
;
2807 int *regno
= (int *) data
;
2809 switch (TREE_CODE (*expr_p
))
2812 for (expr_p
= &TREE_OPERAND (*expr_p
, 0);
2813 handled_component_p (*expr_p
);
2814 expr_p
= &TREE_OPERAND (*expr_p
, 0))
2817 if (DECL_P (obj
) && !DECL_RTL_SET_P (obj
))
2818 x
= produce_memory_decl_rtl (obj
, regno
);
2823 obj
= SSA_NAME_VAR (*expr_p
);
2824 if (!DECL_RTL_SET_P (obj
))
2825 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
2834 if (DECL_RTL_SET_P (obj
))
2837 if (DECL_MODE (obj
) == BLKmode
)
2838 x
= produce_memory_decl_rtl (obj
, regno
);
2840 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
2850 VEC_safe_push (tree
, heap
, decl_rtl_to_reset
, obj
);
2851 SET_DECL_RTL (obj
, x
);
2857 /* Determines cost of the computation of EXPR. */
2860 computation_cost (tree expr
, bool speed
)
2863 tree type
= TREE_TYPE (expr
);
2865 /* Avoid using hard regs in ways which may be unsupported. */
2866 int regno
= LAST_VIRTUAL_REGISTER
+ 1;
2867 struct cgraph_node
*node
= cgraph_get_node (current_function_decl
);
2868 enum node_frequency real_frequency
= node
->frequency
;
2870 node
->frequency
= NODE_FREQUENCY_NORMAL
;
2871 crtl
->maybe_hot_insn_p
= speed
;
2872 walk_tree (&expr
, prepare_decl_rtl
, ®no
, NULL
);
2874 rslt
= expand_expr (expr
, NULL_RTX
, TYPE_MODE (type
), EXPAND_NORMAL
);
2877 default_rtl_profile ();
2878 node
->frequency
= real_frequency
;
2880 cost
= seq_cost (seq
, speed
);
2882 cost
+= address_cost (XEXP (rslt
, 0), TYPE_MODE (type
),
2883 TYPE_ADDR_SPACE (type
), speed
);
2884 else if (!REG_P (rslt
))
2885 cost
+= set_src_cost (rslt
, speed
);
2890 /* Returns variable containing the value of candidate CAND at statement AT. */
2893 var_at_stmt (struct loop
*loop
, struct iv_cand
*cand
, gimple stmt
)
2895 if (stmt_after_increment (loop
, cand
, stmt
))
2896 return cand
->var_after
;
2898 return cand
->var_before
;
2901 /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
2902 same precision that is at least as wide as the precision of TYPE, stores
2903 BA to A and BB to B, and returns the type of BA. Otherwise, returns the
2907 determine_common_wider_type (tree
*a
, tree
*b
)
2909 tree wider_type
= NULL
;
2911 tree atype
= TREE_TYPE (*a
);
2913 if (CONVERT_EXPR_P (*a
))
2915 suba
= TREE_OPERAND (*a
, 0);
2916 wider_type
= TREE_TYPE (suba
);
2917 if (TYPE_PRECISION (wider_type
) < TYPE_PRECISION (atype
))
2923 if (CONVERT_EXPR_P (*b
))
2925 subb
= TREE_OPERAND (*b
, 0);
2926 if (TYPE_PRECISION (wider_type
) != TYPE_PRECISION (TREE_TYPE (subb
)))
2937 /* Determines the expression by that USE is expressed from induction variable
2938 CAND at statement AT in LOOP. The expression is stored in a decomposed
2939 form into AFF. Returns false if USE cannot be expressed using CAND. */
2942 get_computation_aff (struct loop
*loop
,
2943 struct iv_use
*use
, struct iv_cand
*cand
, gimple at
,
2944 struct affine_tree_combination
*aff
)
2946 tree ubase
= use
->iv
->base
;
2947 tree ustep
= use
->iv
->step
;
2948 tree cbase
= cand
->iv
->base
;
2949 tree cstep
= cand
->iv
->step
, cstep_common
;
2950 tree utype
= TREE_TYPE (ubase
), ctype
= TREE_TYPE (cbase
);
2951 tree common_type
, var
;
2953 aff_tree cbase_aff
, var_aff
;
2956 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
2958 /* We do not have a precision to express the values of use. */
2962 var
= var_at_stmt (loop
, cand
, at
);
2963 uutype
= unsigned_type_for (utype
);
2965 /* If the conversion is not noop, perform it. */
2966 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
2968 cstep
= fold_convert (uutype
, cstep
);
2969 cbase
= fold_convert (uutype
, cbase
);
2970 var
= fold_convert (uutype
, var
);
2973 if (!constant_multiple_of (ustep
, cstep
, &rat
))
2976 /* In case both UBASE and CBASE are shortened to UUTYPE from some common
2977 type, we achieve better folding by computing their difference in this
2978 wider type, and cast the result to UUTYPE. We do not need to worry about
2979 overflows, as all the arithmetics will in the end be performed in UUTYPE
2981 common_type
= determine_common_wider_type (&ubase
, &cbase
);
2983 /* use = ubase - ratio * cbase + ratio * var. */
2984 tree_to_aff_combination (ubase
, common_type
, aff
);
2985 tree_to_aff_combination (cbase
, common_type
, &cbase_aff
);
2986 tree_to_aff_combination (var
, uutype
, &var_aff
);
2988 /* We need to shift the value if we are after the increment. */
2989 if (stmt_after_increment (loop
, cand
, at
))
2993 if (common_type
!= uutype
)
2994 cstep_common
= fold_convert (common_type
, cstep
);
2996 cstep_common
= cstep
;
2998 tree_to_aff_combination (cstep_common
, common_type
, &cstep_aff
);
2999 aff_combination_add (&cbase_aff
, &cstep_aff
);
3002 aff_combination_scale (&cbase_aff
, double_int_neg (rat
));
3003 aff_combination_add (aff
, &cbase_aff
);
3004 if (common_type
!= uutype
)
3005 aff_combination_convert (aff
, uutype
);
3007 aff_combination_scale (&var_aff
, rat
);
3008 aff_combination_add (aff
, &var_aff
);
3013 /* Determines the expression by that USE is expressed from induction variable
3014 CAND at statement AT in LOOP. The computation is unshared. */
3017 get_computation_at (struct loop
*loop
,
3018 struct iv_use
*use
, struct iv_cand
*cand
, gimple at
)
3021 tree type
= TREE_TYPE (use
->iv
->base
);
3023 if (!get_computation_aff (loop
, use
, cand
, at
, &aff
))
3025 unshare_aff_combination (&aff
);
3026 return fold_convert (type
, aff_combination_to_tree (&aff
));
3029 /* Determines the expression by that USE is expressed from induction variable
3030 CAND in LOOP. The computation is unshared. */
3033 get_computation (struct loop
*loop
, struct iv_use
*use
, struct iv_cand
*cand
)
3035 return get_computation_at (loop
, use
, cand
, use
->stmt
);
3038 /* Adjust the cost COST for being in loop setup rather than loop body.
3039 If we're optimizing for space, the loop setup overhead is constant;
3040 if we're optimizing for speed, amortize it over the per-iteration cost. */
3042 adjust_setup_cost (struct ivopts_data
*data
, unsigned cost
)
3046 else if (optimize_loop_for_speed_p (data
->current_loop
))
3047 return cost
/ avg_loop_niter (data
->current_loop
);
3052 /* Returns cost of addition in MODE. */
3055 add_cost (enum machine_mode mode
, bool speed
)
3057 static unsigned costs
[NUM_MACHINE_MODES
];
3065 force_operand (gen_rtx_fmt_ee (PLUS
, mode
,
3066 gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1),
3067 gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 2)),
3072 cost
= seq_cost (seq
, speed
);
3078 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3079 fprintf (dump_file
, "Addition in %s costs %d\n",
3080 GET_MODE_NAME (mode
), cost
);
3084 /* Entry in a hashtable of already known costs for multiplication. */
3087 HOST_WIDE_INT cst
; /* The constant to multiply by. */
3088 enum machine_mode mode
; /* In mode. */
3089 unsigned cost
; /* The cost. */
3092 /* Counts hash value for the ENTRY. */
3095 mbc_entry_hash (const void *entry
)
3097 const struct mbc_entry
*e
= (const struct mbc_entry
*) entry
;
3099 return 57 * (hashval_t
) e
->mode
+ (hashval_t
) (e
->cst
% 877);
3102 /* Compares the hash table entries ENTRY1 and ENTRY2. */
3105 mbc_entry_eq (const void *entry1
, const void *entry2
)
3107 const struct mbc_entry
*e1
= (const struct mbc_entry
*) entry1
;
3108 const struct mbc_entry
*e2
= (const struct mbc_entry
*) entry2
;
3110 return (e1
->mode
== e2
->mode
3111 && e1
->cst
== e2
->cst
);
3114 /* Returns cost of multiplication by constant CST in MODE. */
3117 multiply_by_cost (HOST_WIDE_INT cst
, enum machine_mode mode
, bool speed
)
3119 static htab_t costs
;
3120 struct mbc_entry
**cached
, act
;
3125 costs
= htab_create (100, mbc_entry_hash
, mbc_entry_eq
, free
);
3129 cached
= (struct mbc_entry
**) htab_find_slot (costs
, &act
, INSERT
);
3131 return (*cached
)->cost
;
3133 *cached
= XNEW (struct mbc_entry
);
3134 (*cached
)->mode
= mode
;
3135 (*cached
)->cst
= cst
;
3138 expand_mult (mode
, gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1),
3139 gen_int_mode (cst
, mode
), NULL_RTX
, 0);
3143 cost
= seq_cost (seq
, speed
);
3145 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3146 fprintf (dump_file
, "Multiplication by %d in %s costs %d\n",
3147 (int) cst
, GET_MODE_NAME (mode
), cost
);
3149 (*cached
)->cost
= cost
;
3154 /* Returns true if multiplying by RATIO is allowed in an address. Test the
3155 validity for a memory reference accessing memory of mode MODE in
3156 address space AS. */
3158 DEF_VEC_P (sbitmap
);
3159 DEF_VEC_ALLOC_P (sbitmap
, heap
);
3162 multiplier_allowed_in_address_p (HOST_WIDE_INT ratio
, enum machine_mode mode
,
3165 #define MAX_RATIO 128
3166 unsigned int data_index
= (int) as
* MAX_MACHINE_MODE
+ (int) mode
;
3167 static VEC (sbitmap
, heap
) *valid_mult_list
;
3170 if (data_index
>= VEC_length (sbitmap
, valid_mult_list
))
3171 VEC_safe_grow_cleared (sbitmap
, heap
, valid_mult_list
, data_index
+ 1);
3173 valid_mult
= VEC_index (sbitmap
, valid_mult_list
, data_index
);
3176 enum machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3177 rtx reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3181 valid_mult
= sbitmap_alloc (2 * MAX_RATIO
+ 1);
3182 sbitmap_zero (valid_mult
);
3183 addr
= gen_rtx_fmt_ee (MULT
, address_mode
, reg1
, NULL_RTX
);
3184 for (i
= -MAX_RATIO
; i
<= MAX_RATIO
; i
++)
3186 XEXP (addr
, 1) = gen_int_mode (i
, address_mode
);
3187 if (memory_address_addr_space_p (mode
, addr
, as
))
3188 SET_BIT (valid_mult
, i
+ MAX_RATIO
);
3191 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3193 fprintf (dump_file
, " allowed multipliers:");
3194 for (i
= -MAX_RATIO
; i
<= MAX_RATIO
; i
++)
3195 if (TEST_BIT (valid_mult
, i
+ MAX_RATIO
))
3196 fprintf (dump_file
, " %d", (int) i
);
3197 fprintf (dump_file
, "\n");
3198 fprintf (dump_file
, "\n");
3201 VEC_replace (sbitmap
, valid_mult_list
, data_index
, valid_mult
);
3204 if (ratio
> MAX_RATIO
|| ratio
< -MAX_RATIO
)
3207 return TEST_BIT (valid_mult
, ratio
+ MAX_RATIO
);
3210 /* Returns cost of address in shape symbol + var + OFFSET + RATIO * index.
3211 If SYMBOL_PRESENT is false, symbol is omitted. If VAR_PRESENT is false,
3212 variable is omitted. Compute the cost for a memory reference that accesses
3213 a memory location of mode MEM_MODE in address space AS.
3215 MAY_AUTOINC is set to true if the autoincrement (increasing index by
3216 size of MEM_MODE / RATIO) is available. To make this determination, we
3217 look at the size of the increment to be made, which is given in CSTEP.
3218 CSTEP may be zero if the step is unknown.
3219 STMT_AFTER_INC is true iff the statement we're looking at is after the
3220 increment of the original biv.
3222 TODO -- there must be some better way. This all is quite crude. */
3226 HOST_WIDE_INT min_offset
, max_offset
;
3227 unsigned costs
[2][2][2][2];
3228 } *address_cost_data
;
3230 DEF_VEC_P (address_cost_data
);
3231 DEF_VEC_ALLOC_P (address_cost_data
, heap
);
3234 get_address_cost (bool symbol_present
, bool var_present
,
3235 unsigned HOST_WIDE_INT offset
, HOST_WIDE_INT ratio
,
3236 HOST_WIDE_INT cstep
, enum machine_mode mem_mode
,
3237 addr_space_t as
, bool speed
,
3238 bool stmt_after_inc
, bool *may_autoinc
)
3240 enum machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3241 static VEC(address_cost_data
, heap
) *address_cost_data_list
;
3242 unsigned int data_index
= (int) as
* MAX_MACHINE_MODE
+ (int) mem_mode
;
3243 address_cost_data data
;
3244 static bool has_preinc
[MAX_MACHINE_MODE
], has_postinc
[MAX_MACHINE_MODE
];
3245 static bool has_predec
[MAX_MACHINE_MODE
], has_postdec
[MAX_MACHINE_MODE
];
3246 unsigned cost
, acost
, complexity
;
3247 bool offset_p
, ratio_p
, autoinc
;
3248 HOST_WIDE_INT s_offset
, autoinc_offset
, msize
;
3249 unsigned HOST_WIDE_INT mask
;
3252 if (data_index
>= VEC_length (address_cost_data
, address_cost_data_list
))
3253 VEC_safe_grow_cleared (address_cost_data
, heap
, address_cost_data_list
,
3256 data
= VEC_index (address_cost_data
, address_cost_data_list
, data_index
);
3260 HOST_WIDE_INT rat
, off
= 0;
3261 int old_cse_not_expected
, width
;
3262 unsigned sym_p
, var_p
, off_p
, rat_p
, add_c
;
3263 rtx seq
, addr
, base
;
3266 data
= (address_cost_data
) xcalloc (1, sizeof (*data
));
3268 reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3270 width
= GET_MODE_BITSIZE (address_mode
) - 1;
3271 if (width
> (HOST_BITS_PER_WIDE_INT
- 1))
3272 width
= HOST_BITS_PER_WIDE_INT
- 1;
3273 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, reg1
, NULL_RTX
);
3275 for (i
= width
; i
>= 0; i
--)
3277 off
= -((HOST_WIDE_INT
) 1 << i
);
3278 XEXP (addr
, 1) = gen_int_mode (off
, address_mode
);
3279 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
3282 data
->min_offset
= (i
== -1? 0 : off
);
3284 for (i
= width
; i
>= 0; i
--)
3286 off
= ((HOST_WIDE_INT
) 1 << i
) - 1;
3287 XEXP (addr
, 1) = gen_int_mode (off
, address_mode
);
3288 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
3293 data
->max_offset
= off
;
3295 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3297 fprintf (dump_file
, "get_address_cost:\n");
3298 fprintf (dump_file
, " min offset %s " HOST_WIDE_INT_PRINT_DEC
"\n",
3299 GET_MODE_NAME (mem_mode
),
3301 fprintf (dump_file
, " max offset %s " HOST_WIDE_INT_PRINT_DEC
"\n",
3302 GET_MODE_NAME (mem_mode
),
3307 for (i
= 2; i
<= MAX_RATIO
; i
++)
3308 if (multiplier_allowed_in_address_p (i
, mem_mode
, as
))
3314 /* Compute the cost of various addressing modes. */
3316 reg0
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3317 reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 2);
3319 if (HAVE_PRE_DECREMENT
)
3321 addr
= gen_rtx_PRE_DEC (address_mode
, reg0
);
3322 has_predec
[mem_mode
]
3323 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3325 if (HAVE_POST_DECREMENT
)
3327 addr
= gen_rtx_POST_DEC (address_mode
, reg0
);
3328 has_postdec
[mem_mode
]
3329 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3331 if (HAVE_PRE_INCREMENT
)
3333 addr
= gen_rtx_PRE_INC (address_mode
, reg0
);
3334 has_preinc
[mem_mode
]
3335 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3337 if (HAVE_POST_INCREMENT
)
3339 addr
= gen_rtx_POST_INC (address_mode
, reg0
);
3340 has_postinc
[mem_mode
]
3341 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3343 for (i
= 0; i
< 16; i
++)
3346 var_p
= (i
>> 1) & 1;
3347 off_p
= (i
>> 2) & 1;
3348 rat_p
= (i
>> 3) & 1;
3352 addr
= gen_rtx_fmt_ee (MULT
, address_mode
, addr
,
3353 gen_int_mode (rat
, address_mode
));
3356 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, addr
, reg1
);
3360 base
= gen_rtx_SYMBOL_REF (address_mode
, ggc_strdup (""));
3361 /* ??? We can run into trouble with some backends by presenting
3362 it with symbols which haven't been properly passed through
3363 targetm.encode_section_info. By setting the local bit, we
3364 enhance the probability of things working. */
3365 SYMBOL_REF_FLAGS (base
) = SYMBOL_FLAG_LOCAL
;
3368 base
= gen_rtx_fmt_e (CONST
, address_mode
,
3370 (PLUS
, address_mode
, base
,
3371 gen_int_mode (off
, address_mode
)));
3374 base
= gen_int_mode (off
, address_mode
);
3379 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, addr
, base
);
3382 /* To avoid splitting addressing modes, pretend that no cse will
3384 old_cse_not_expected
= cse_not_expected
;
3385 cse_not_expected
= true;
3386 addr
= memory_address_addr_space (mem_mode
, addr
, as
);
3387 cse_not_expected
= old_cse_not_expected
;
3391 acost
= seq_cost (seq
, speed
);
3392 acost
+= address_cost (addr
, mem_mode
, as
, speed
);
3396 data
->costs
[sym_p
][var_p
][off_p
][rat_p
] = acost
;
3399 /* On some targets, it is quite expensive to load symbol to a register,
3400 which makes addresses that contain symbols look much more expensive.
3401 However, the symbol will have to be loaded in any case before the
3402 loop (and quite likely we have it in register already), so it does not
3403 make much sense to penalize them too heavily. So make some final
3404 tweaks for the SYMBOL_PRESENT modes:
3406 If VAR_PRESENT is false, and the mode obtained by changing symbol to
3407 var is cheaper, use this mode with small penalty.
3408 If VAR_PRESENT is true, try whether the mode with
3409 SYMBOL_PRESENT = false is cheaper even with cost of addition, and
3410 if this is the case, use it. */
3411 add_c
= add_cost (address_mode
, speed
);
3412 for (i
= 0; i
< 8; i
++)
3415 off_p
= (i
>> 1) & 1;
3416 rat_p
= (i
>> 2) & 1;
3418 acost
= data
->costs
[0][1][off_p
][rat_p
] + 1;
3422 if (acost
< data
->costs
[1][var_p
][off_p
][rat_p
])
3423 data
->costs
[1][var_p
][off_p
][rat_p
] = acost
;
3426 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3428 fprintf (dump_file
, "Address costs:\n");
3430 for (i
= 0; i
< 16; i
++)
3433 var_p
= (i
>> 1) & 1;
3434 off_p
= (i
>> 2) & 1;
3435 rat_p
= (i
>> 3) & 1;
3437 fprintf (dump_file
, " ");
3439 fprintf (dump_file
, "sym + ");
3441 fprintf (dump_file
, "var + ");
3443 fprintf (dump_file
, "cst + ");
3445 fprintf (dump_file
, "rat * ");
3447 acost
= data
->costs
[sym_p
][var_p
][off_p
][rat_p
];
3448 fprintf (dump_file
, "index costs %d\n", acost
);
3450 if (has_predec
[mem_mode
] || has_postdec
[mem_mode
]
3451 || has_preinc
[mem_mode
] || has_postinc
[mem_mode
])
3452 fprintf (dump_file
, " May include autoinc/dec\n");
3453 fprintf (dump_file
, "\n");
3456 VEC_replace (address_cost_data
, address_cost_data_list
,
3460 bits
= GET_MODE_BITSIZE (address_mode
);
3461 mask
= ~(~(unsigned HOST_WIDE_INT
) 0 << (bits
- 1) << 1);
3463 if ((offset
>> (bits
- 1) & 1))
3468 msize
= GET_MODE_SIZE (mem_mode
);
3469 autoinc_offset
= offset
;
3471 autoinc_offset
+= ratio
* cstep
;
3472 if (symbol_present
|| var_present
|| ratio
!= 1)
3474 else if ((has_postinc
[mem_mode
] && autoinc_offset
== 0
3476 || (has_postdec
[mem_mode
] && autoinc_offset
== 0
3478 || (has_preinc
[mem_mode
] && autoinc_offset
== msize
3480 || (has_predec
[mem_mode
] && autoinc_offset
== -msize
3481 && msize
== -cstep
))
3485 offset_p
= (s_offset
!= 0
3486 && data
->min_offset
<= s_offset
3487 && s_offset
<= data
->max_offset
);
3488 ratio_p
= (ratio
!= 1
3489 && multiplier_allowed_in_address_p (ratio
, mem_mode
, as
));
3491 if (ratio
!= 1 && !ratio_p
)
3492 cost
+= multiply_by_cost (ratio
, address_mode
, speed
);
3494 if (s_offset
&& !offset_p
&& !symbol_present
)
3495 cost
+= add_cost (address_mode
, speed
);
3498 *may_autoinc
= autoinc
;
3499 acost
= data
->costs
[symbol_present
][var_present
][offset_p
][ratio_p
];
3500 complexity
= (symbol_present
!= 0) + (var_present
!= 0) + offset_p
+ ratio_p
;
3501 return new_cost (cost
+ acost
, complexity
);
3504 /* Calculate the SPEED or size cost of shiftadd EXPR in MODE. MULT is the
3505 the EXPR operand holding the shift. COST0 and COST1 are the costs for
3506 calculating the operands of EXPR. Returns true if successful, and returns
3507 the cost in COST. */
3510 get_shiftadd_cost (tree expr
, enum machine_mode mode
, comp_cost cost0
,
3511 comp_cost cost1
, tree mult
, bool speed
, comp_cost
*cost
)
3514 tree op1
= TREE_OPERAND (expr
, 1);
3515 tree cst
= TREE_OPERAND (mult
, 1);
3516 tree multop
= TREE_OPERAND (mult
, 0);
3517 int m
= exact_log2 (int_cst_value (cst
));
3518 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
3521 if (!(m
>= 0 && m
< maxm
))
3524 sa_cost
= (TREE_CODE (expr
) != MINUS_EXPR
3525 ? shiftadd_cost
[speed
][mode
][m
]
3527 ? shiftsub1_cost
[speed
][mode
][m
]
3528 : shiftsub0_cost
[speed
][mode
][m
]));
3529 res
= new_cost (sa_cost
, 0);
3530 res
= add_costs (res
, mult
== op1
? cost0
: cost1
);
3532 STRIP_NOPS (multop
);
3533 if (!is_gimple_val (multop
))
3534 res
= add_costs (res
, force_expr_to_var_cost (multop
, speed
));
3540 /* Estimates cost of forcing expression EXPR into a variable. */
3543 force_expr_to_var_cost (tree expr
, bool speed
)
3545 static bool costs_initialized
= false;
3546 static unsigned integer_cost
[2];
3547 static unsigned symbol_cost
[2];
3548 static unsigned address_cost
[2];
3550 comp_cost cost0
, cost1
, cost
;
3551 enum machine_mode mode
;
3553 if (!costs_initialized
)
3555 tree type
= build_pointer_type (integer_type_node
);
3560 var
= create_tmp_var_raw (integer_type_node
, "test_var");
3561 TREE_STATIC (var
) = 1;
3562 x
= produce_memory_decl_rtl (var
, NULL
);
3563 SET_DECL_RTL (var
, x
);
3565 addr
= build1 (ADDR_EXPR
, type
, var
);
3568 for (i
= 0; i
< 2; i
++)
3570 integer_cost
[i
] = computation_cost (build_int_cst (integer_type_node
,
3573 symbol_cost
[i
] = computation_cost (addr
, i
) + 1;
3576 = computation_cost (fold_build_pointer_plus_hwi (addr
, 2000), i
) + 1;
3577 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3579 fprintf (dump_file
, "force_expr_to_var_cost %s costs:\n", i
? "speed" : "size");
3580 fprintf (dump_file
, " integer %d\n", (int) integer_cost
[i
]);
3581 fprintf (dump_file
, " symbol %d\n", (int) symbol_cost
[i
]);
3582 fprintf (dump_file
, " address %d\n", (int) address_cost
[i
]);
3583 fprintf (dump_file
, " other %d\n", (int) target_spill_cost
[i
]);
3584 fprintf (dump_file
, "\n");
3588 costs_initialized
= true;
3593 if (SSA_VAR_P (expr
))
3596 if (is_gimple_min_invariant (expr
))
3598 if (TREE_CODE (expr
) == INTEGER_CST
)
3599 return new_cost (integer_cost
[speed
], 0);
3601 if (TREE_CODE (expr
) == ADDR_EXPR
)
3603 tree obj
= TREE_OPERAND (expr
, 0);
3605 if (TREE_CODE (obj
) == VAR_DECL
3606 || TREE_CODE (obj
) == PARM_DECL
3607 || TREE_CODE (obj
) == RESULT_DECL
)
3608 return new_cost (symbol_cost
[speed
], 0);
3611 return new_cost (address_cost
[speed
], 0);
3614 switch (TREE_CODE (expr
))
3616 case POINTER_PLUS_EXPR
:
3620 op0
= TREE_OPERAND (expr
, 0);
3621 op1
= TREE_OPERAND (expr
, 1);
3625 if (is_gimple_val (op0
))
3628 cost0
= force_expr_to_var_cost (op0
, speed
);
3630 if (is_gimple_val (op1
))
3633 cost1
= force_expr_to_var_cost (op1
, speed
);
3638 op0
= TREE_OPERAND (expr
, 0);
3642 if (is_gimple_val (op0
))
3645 cost0
= force_expr_to_var_cost (op0
, speed
);
3651 /* Just an arbitrary value, FIXME. */
3652 return new_cost (target_spill_cost
[speed
], 0);
3655 mode
= TYPE_MODE (TREE_TYPE (expr
));
3656 switch (TREE_CODE (expr
))
3658 case POINTER_PLUS_EXPR
:
3662 cost
= new_cost (add_cost (mode
, speed
), 0);
3663 if (TREE_CODE (expr
) != NEGATE_EXPR
)
3665 tree mult
= NULL_TREE
;
3667 if (TREE_CODE (op1
) == MULT_EXPR
)
3669 else if (TREE_CODE (op0
) == MULT_EXPR
)
3672 if (mult
!= NULL_TREE
3673 && cst_and_fits_in_hwi (TREE_OPERAND (mult
, 1))
3674 && get_shiftadd_cost (expr
, mode
, cost0
, cost1
, mult
, speed
,
3681 if (cst_and_fits_in_hwi (op0
))
3682 cost
= new_cost (multiply_by_cost (int_cst_value (op0
), mode
, speed
), 0);
3683 else if (cst_and_fits_in_hwi (op1
))
3684 cost
= new_cost (multiply_by_cost (int_cst_value (op1
), mode
, speed
), 0);
3686 return new_cost (target_spill_cost
[speed
], 0);
3693 cost
= add_costs (cost
, cost0
);
3694 cost
= add_costs (cost
, cost1
);
3696 /* Bound the cost by target_spill_cost. The parts of complicated
3697 computations often are either loop invariant or at least can
3698 be shared between several iv uses, so letting this grow without
3699 limits would not give reasonable results. */
3700 if (cost
.cost
> (int) target_spill_cost
[speed
])
3701 cost
.cost
= target_spill_cost
[speed
];
3706 /* Estimates cost of forcing EXPR into a variable. DEPENDS_ON is a set of the
3707 invariants the computation depends on. */
3710 force_var_cost (struct ivopts_data
*data
,
3711 tree expr
, bitmap
*depends_on
)
3715 fd_ivopts_data
= data
;
3716 walk_tree (&expr
, find_depends
, depends_on
, NULL
);
3719 return force_expr_to_var_cost (expr
, data
->speed
);
3722 /* Estimates cost of expressing address ADDR as var + symbol + offset. The
3723 value of offset is added to OFFSET, SYMBOL_PRESENT and VAR_PRESENT are set
3724 to false if the corresponding part is missing. DEPENDS_ON is a set of the
3725 invariants the computation depends on. */
3728 split_address_cost (struct ivopts_data
*data
,
3729 tree addr
, bool *symbol_present
, bool *var_present
,
3730 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3733 HOST_WIDE_INT bitsize
;
3734 HOST_WIDE_INT bitpos
;
3736 enum machine_mode mode
;
3737 int unsignedp
, volatilep
;
3739 core
= get_inner_reference (addr
, &bitsize
, &bitpos
, &toffset
, &mode
,
3740 &unsignedp
, &volatilep
, false);
3743 || bitpos
% BITS_PER_UNIT
!= 0
3744 || TREE_CODE (core
) != VAR_DECL
)
3746 *symbol_present
= false;
3747 *var_present
= true;
3748 fd_ivopts_data
= data
;
3749 walk_tree (&addr
, find_depends
, depends_on
, NULL
);
3750 return new_cost (target_spill_cost
[data
->speed
], 0);
3753 *offset
+= bitpos
/ BITS_PER_UNIT
;
3754 if (TREE_STATIC (core
)
3755 || DECL_EXTERNAL (core
))
3757 *symbol_present
= true;
3758 *var_present
= false;
3762 *symbol_present
= false;
3763 *var_present
= true;
3767 /* Estimates cost of expressing difference of addresses E1 - E2 as
3768 var + symbol + offset. The value of offset is added to OFFSET,
3769 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
3770 part is missing. DEPENDS_ON is a set of the invariants the computation
3774 ptr_difference_cost (struct ivopts_data
*data
,
3775 tree e1
, tree e2
, bool *symbol_present
, bool *var_present
,
3776 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3778 HOST_WIDE_INT diff
= 0;
3779 aff_tree aff_e1
, aff_e2
;
3782 gcc_assert (TREE_CODE (e1
) == ADDR_EXPR
);
3784 if (ptr_difference_const (e1
, e2
, &diff
))
3787 *symbol_present
= false;
3788 *var_present
= false;
3792 if (integer_zerop (e2
))
3793 return split_address_cost (data
, TREE_OPERAND (e1
, 0),
3794 symbol_present
, var_present
, offset
, depends_on
);
3796 *symbol_present
= false;
3797 *var_present
= true;
3799 type
= signed_type_for (TREE_TYPE (e1
));
3800 tree_to_aff_combination (e1
, type
, &aff_e1
);
3801 tree_to_aff_combination (e2
, type
, &aff_e2
);
3802 aff_combination_scale (&aff_e2
, double_int_minus_one
);
3803 aff_combination_add (&aff_e1
, &aff_e2
);
3805 return force_var_cost (data
, aff_combination_to_tree (&aff_e1
), depends_on
);
3808 /* Estimates cost of expressing difference E1 - E2 as
3809 var + symbol + offset. The value of offset is added to OFFSET,
3810 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
3811 part is missing. DEPENDS_ON is a set of the invariants the computation
3815 difference_cost (struct ivopts_data
*data
,
3816 tree e1
, tree e2
, bool *symbol_present
, bool *var_present
,
3817 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3819 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (e1
));
3820 unsigned HOST_WIDE_INT off1
, off2
;
3821 aff_tree aff_e1
, aff_e2
;
3824 e1
= strip_offset (e1
, &off1
);
3825 e2
= strip_offset (e2
, &off2
);
3826 *offset
+= off1
- off2
;
3831 if (TREE_CODE (e1
) == ADDR_EXPR
)
3832 return ptr_difference_cost (data
, e1
, e2
, symbol_present
, var_present
,
3833 offset
, depends_on
);
3834 *symbol_present
= false;
3836 if (operand_equal_p (e1
, e2
, 0))
3838 *var_present
= false;
3842 *var_present
= true;
3844 if (integer_zerop (e2
))
3845 return force_var_cost (data
, e1
, depends_on
);
3847 if (integer_zerop (e1
))
3849 comp_cost cost
= force_var_cost (data
, e2
, depends_on
);
3850 cost
.cost
+= multiply_by_cost (-1, mode
, data
->speed
);
3854 type
= signed_type_for (TREE_TYPE (e1
));
3855 tree_to_aff_combination (e1
, type
, &aff_e1
);
3856 tree_to_aff_combination (e2
, type
, &aff_e2
);
3857 aff_combination_scale (&aff_e2
, double_int_minus_one
);
3858 aff_combination_add (&aff_e1
, &aff_e2
);
3860 return force_var_cost (data
, aff_combination_to_tree (&aff_e1
), depends_on
);
3863 /* Returns true if AFF1 and AFF2 are identical. */
3866 compare_aff_trees (aff_tree
*aff1
, aff_tree
*aff2
)
3870 if (aff1
->n
!= aff2
->n
)
3873 for (i
= 0; i
< aff1
->n
; i
++)
3875 if (double_int_cmp (aff1
->elts
[i
].coef
, aff2
->elts
[i
].coef
, 0) != 0)
3878 if (!operand_equal_p (aff1
->elts
[i
].val
, aff2
->elts
[i
].val
, 0))
3884 /* Stores EXPR in DATA->inv_expr_tab, and assigns it an inv_expr_id. */
3887 get_expr_id (struct ivopts_data
*data
, tree expr
)
3889 struct iv_inv_expr_ent ent
;
3890 struct iv_inv_expr_ent
**slot
;
3893 ent
.hash
= iterative_hash_expr (expr
, 0);
3894 slot
= (struct iv_inv_expr_ent
**) htab_find_slot (data
->inv_expr_tab
,
3899 *slot
= XNEW (struct iv_inv_expr_ent
);
3900 (*slot
)->expr
= expr
;
3901 (*slot
)->hash
= ent
.hash
;
3902 (*slot
)->id
= data
->inv_expr_id
++;
3906 /* Returns the pseudo expr id if expression UBASE - RATIO * CBASE
3907 requires a new compiler generated temporary. Returns -1 otherwise.
3908 ADDRESS_P is a flag indicating if the expression is for address
3912 get_loop_invariant_expr_id (struct ivopts_data
*data
, tree ubase
,
3913 tree cbase
, HOST_WIDE_INT ratio
,
3916 aff_tree ubase_aff
, cbase_aff
;
3924 if ((TREE_CODE (ubase
) == INTEGER_CST
)
3925 && (TREE_CODE (cbase
) == INTEGER_CST
))
3928 /* Strips the constant part. */
3929 if (TREE_CODE (ubase
) == PLUS_EXPR
3930 || TREE_CODE (ubase
) == MINUS_EXPR
3931 || TREE_CODE (ubase
) == POINTER_PLUS_EXPR
)
3933 if (TREE_CODE (TREE_OPERAND (ubase
, 1)) == INTEGER_CST
)
3934 ubase
= TREE_OPERAND (ubase
, 0);
3937 /* Strips the constant part. */
3938 if (TREE_CODE (cbase
) == PLUS_EXPR
3939 || TREE_CODE (cbase
) == MINUS_EXPR
3940 || TREE_CODE (cbase
) == POINTER_PLUS_EXPR
)
3942 if (TREE_CODE (TREE_OPERAND (cbase
, 1)) == INTEGER_CST
)
3943 cbase
= TREE_OPERAND (cbase
, 0);
3948 if (((TREE_CODE (ubase
) == SSA_NAME
)
3949 || (TREE_CODE (ubase
) == ADDR_EXPR
3950 && is_gimple_min_invariant (ubase
)))
3951 && (TREE_CODE (cbase
) == INTEGER_CST
))
3954 if (((TREE_CODE (cbase
) == SSA_NAME
)
3955 || (TREE_CODE (cbase
) == ADDR_EXPR
3956 && is_gimple_min_invariant (cbase
)))
3957 && (TREE_CODE (ubase
) == INTEGER_CST
))
3963 if(operand_equal_p (ubase
, cbase
, 0))
3966 if (TREE_CODE (ubase
) == ADDR_EXPR
3967 && TREE_CODE (cbase
) == ADDR_EXPR
)
3971 usym
= TREE_OPERAND (ubase
, 0);
3972 csym
= TREE_OPERAND (cbase
, 0);
3973 if (TREE_CODE (usym
) == ARRAY_REF
)
3975 tree ind
= TREE_OPERAND (usym
, 1);
3976 if (TREE_CODE (ind
) == INTEGER_CST
3977 && host_integerp (ind
, 0)
3978 && TREE_INT_CST_LOW (ind
) == 0)
3979 usym
= TREE_OPERAND (usym
, 0);
3981 if (TREE_CODE (csym
) == ARRAY_REF
)
3983 tree ind
= TREE_OPERAND (csym
, 1);
3984 if (TREE_CODE (ind
) == INTEGER_CST
3985 && host_integerp (ind
, 0)
3986 && TREE_INT_CST_LOW (ind
) == 0)
3987 csym
= TREE_OPERAND (csym
, 0);
3989 if (operand_equal_p (usym
, csym
, 0))
3992 /* Now do more complex comparison */
3993 tree_to_aff_combination (ubase
, TREE_TYPE (ubase
), &ubase_aff
);
3994 tree_to_aff_combination (cbase
, TREE_TYPE (cbase
), &cbase_aff
);
3995 if (compare_aff_trees (&ubase_aff
, &cbase_aff
))
3999 tree_to_aff_combination (ub
, TREE_TYPE (ub
), &ubase_aff
);
4000 tree_to_aff_combination (cb
, TREE_TYPE (cb
), &cbase_aff
);
4002 aff_combination_scale (&cbase_aff
, shwi_to_double_int (-1 * ratio
));
4003 aff_combination_add (&ubase_aff
, &cbase_aff
);
4004 expr
= aff_combination_to_tree (&ubase_aff
);
4005 return get_expr_id (data
, expr
);
4010 /* Determines the cost of the computation by that USE is expressed
4011 from induction variable CAND. If ADDRESS_P is true, we just need
4012 to create an address from it, otherwise we want to get it into
4013 register. A set of invariants we depend on is stored in
4014 DEPENDS_ON. AT is the statement at that the value is computed.
4015 If CAN_AUTOINC is nonnull, use it to record whether autoinc
4016 addressing is likely. */
4019 get_computation_cost_at (struct ivopts_data
*data
,
4020 struct iv_use
*use
, struct iv_cand
*cand
,
4021 bool address_p
, bitmap
*depends_on
, gimple at
,
4025 tree ubase
= use
->iv
->base
, ustep
= use
->iv
->step
;
4027 tree utype
= TREE_TYPE (ubase
), ctype
;
4028 unsigned HOST_WIDE_INT cstepi
, offset
= 0;
4029 HOST_WIDE_INT ratio
, aratio
;
4030 bool var_present
, symbol_present
, stmt_is_after_inc
;
4033 bool speed
= optimize_bb_for_speed_p (gimple_bb (at
));
4037 /* Only consider real candidates. */
4039 return infinite_cost
;
4041 cbase
= cand
->iv
->base
;
4042 cstep
= cand
->iv
->step
;
4043 ctype
= TREE_TYPE (cbase
);
4045 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
4047 /* We do not have a precision to express the values of use. */
4048 return infinite_cost
;
4053 /* Do not try to express address of an object with computation based
4054 on address of a different object. This may cause problems in rtl
4055 level alias analysis (that does not expect this to be happening,
4056 as this is illegal in C), and would be unlikely to be useful
4058 if (use
->iv
->base_object
4059 && cand
->iv
->base_object
4060 && !operand_equal_p (use
->iv
->base_object
, cand
->iv
->base_object
, 0))
4061 return infinite_cost
;
4064 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
4066 /* TODO -- add direct handling of this case. */
4070 /* CSTEPI is removed from the offset in case statement is after the
4071 increment. If the step is not constant, we use zero instead.
4072 This is a bit imprecise (there is the extra addition), but
4073 redundancy elimination is likely to transform the code so that
4074 it uses value of the variable before increment anyway,
4075 so it is not that much unrealistic. */
4076 if (cst_and_fits_in_hwi (cstep
))
4077 cstepi
= int_cst_value (cstep
);
4081 if (!constant_multiple_of (ustep
, cstep
, &rat
))
4082 return infinite_cost
;
4084 if (double_int_fits_in_shwi_p (rat
))
4085 ratio
= double_int_to_shwi (rat
);
4087 return infinite_cost
;
4090 ctype
= TREE_TYPE (cbase
);
4092 stmt_is_after_inc
= stmt_after_increment (data
->current_loop
, cand
, at
);
4094 /* use = ubase + ratio * (var - cbase). If either cbase is a constant
4095 or ratio == 1, it is better to handle this like
4097 ubase - ratio * cbase + ratio * var
4099 (also holds in the case ratio == -1, TODO. */
4101 if (cst_and_fits_in_hwi (cbase
))
4103 offset
= - ratio
* int_cst_value (cbase
);
4104 cost
= difference_cost (data
,
4105 ubase
, build_int_cst (utype
, 0),
4106 &symbol_present
, &var_present
, &offset
,
4108 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4110 else if (ratio
== 1)
4112 tree real_cbase
= cbase
;
4114 /* Check to see if any adjustment is needed. */
4115 if (cstepi
== 0 && stmt_is_after_inc
)
4117 aff_tree real_cbase_aff
;
4120 tree_to_aff_combination (cbase
, TREE_TYPE (real_cbase
),
4122 tree_to_aff_combination (cstep
, TREE_TYPE (cstep
), &cstep_aff
);
4124 aff_combination_add (&real_cbase_aff
, &cstep_aff
);
4125 real_cbase
= aff_combination_to_tree (&real_cbase_aff
);
4128 cost
= difference_cost (data
,
4130 &symbol_present
, &var_present
, &offset
,
4132 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4135 && !POINTER_TYPE_P (ctype
)
4136 && multiplier_allowed_in_address_p
4137 (ratio
, TYPE_MODE (TREE_TYPE (utype
)),
4138 TYPE_ADDR_SPACE (TREE_TYPE (utype
))))
4141 = fold_build2 (MULT_EXPR
, ctype
, cbase
, build_int_cst (ctype
, ratio
));
4142 cost
= difference_cost (data
,
4144 &symbol_present
, &var_present
, &offset
,
4146 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4150 cost
= force_var_cost (data
, cbase
, depends_on
);
4151 cost
= add_costs (cost
,
4152 difference_cost (data
,
4153 ubase
, build_int_cst (utype
, 0),
4154 &symbol_present
, &var_present
,
4155 &offset
, depends_on
));
4156 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4157 cost
.cost
+= add_cost (TYPE_MODE (ctype
), data
->speed
);
4163 get_loop_invariant_expr_id (data
, ubase
, cbase
, ratio
, address_p
);
4164 /* Clear depends on. */
4165 if (*inv_expr_id
!= -1 && depends_on
&& *depends_on
)
4166 bitmap_clear (*depends_on
);
4169 /* If we are after the increment, the value of the candidate is higher by
4171 if (stmt_is_after_inc
)
4172 offset
-= ratio
* cstepi
;
4174 /* Now the computation is in shape symbol + var1 + const + ratio * var2.
4175 (symbol/var1/const parts may be omitted). If we are looking for an
4176 address, find the cost of addressing this. */
4178 return add_costs (cost
,
4179 get_address_cost (symbol_present
, var_present
,
4180 offset
, ratio
, cstepi
,
4181 TYPE_MODE (TREE_TYPE (utype
)),
4182 TYPE_ADDR_SPACE (TREE_TYPE (utype
)),
4183 speed
, stmt_is_after_inc
,
4186 /* Otherwise estimate the costs for computing the expression. */
4187 if (!symbol_present
&& !var_present
&& !offset
)
4190 cost
.cost
+= multiply_by_cost (ratio
, TYPE_MODE (ctype
), speed
);
4194 /* Symbol + offset should be compile-time computable so consider that they
4195 are added once to the variable, if present. */
4196 if (var_present
&& (symbol_present
|| offset
))
4197 cost
.cost
+= adjust_setup_cost (data
,
4198 add_cost (TYPE_MODE (ctype
), speed
));
4200 /* Having offset does not affect runtime cost in case it is added to
4201 symbol, but it increases complexity. */
4205 cost
.cost
+= add_cost (TYPE_MODE (ctype
), speed
);
4207 aratio
= ratio
> 0 ? ratio
: -ratio
;
4209 cost
.cost
+= multiply_by_cost (aratio
, TYPE_MODE (ctype
), speed
);
4214 *can_autoinc
= false;
4217 /* Just get the expression, expand it and measure the cost. */
4218 tree comp
= get_computation_at (data
->current_loop
, use
, cand
, at
);
4221 return infinite_cost
;
4224 comp
= build_simple_mem_ref (comp
);
4226 return new_cost (computation_cost (comp
, speed
), 0);
4230 /* Determines the cost of the computation by that USE is expressed
4231 from induction variable CAND. If ADDRESS_P is true, we just need
4232 to create an address from it, otherwise we want to get it into
4233 register. A set of invariants we depend on is stored in
4234 DEPENDS_ON. If CAN_AUTOINC is nonnull, use it to record whether
4235 autoinc addressing is likely. */
4238 get_computation_cost (struct ivopts_data
*data
,
4239 struct iv_use
*use
, struct iv_cand
*cand
,
4240 bool address_p
, bitmap
*depends_on
,
4241 bool *can_autoinc
, int *inv_expr_id
)
4243 return get_computation_cost_at (data
,
4244 use
, cand
, address_p
, depends_on
, use
->stmt
,
4245 can_autoinc
, inv_expr_id
);
4248 /* Determines cost of basing replacement of USE on CAND in a generic
4252 determine_use_iv_cost_generic (struct ivopts_data
*data
,
4253 struct iv_use
*use
, struct iv_cand
*cand
)
4257 int inv_expr_id
= -1;
4259 /* The simple case first -- if we need to express value of the preserved
4260 original biv, the cost is 0. This also prevents us from counting the
4261 cost of increment twice -- once at this use and once in the cost of
4263 if (cand
->pos
== IP_ORIGINAL
4264 && cand
->incremented_at
== use
->stmt
)
4266 set_use_iv_cost (data
, use
, cand
, zero_cost
, NULL
, NULL_TREE
,
4271 cost
= get_computation_cost (data
, use
, cand
, false, &depends_on
,
4272 NULL
, &inv_expr_id
);
4274 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, NULL_TREE
, ERROR_MARK
,
4277 return !infinite_cost_p (cost
);
4280 /* Determines cost of basing replacement of USE on CAND in an address. */
4283 determine_use_iv_cost_address (struct ivopts_data
*data
,
4284 struct iv_use
*use
, struct iv_cand
*cand
)
4288 int inv_expr_id
= -1;
4289 comp_cost cost
= get_computation_cost (data
, use
, cand
, true, &depends_on
,
4290 &can_autoinc
, &inv_expr_id
);
4292 if (cand
->ainc_use
== use
)
4295 cost
.cost
-= cand
->cost_step
;
4296 /* If we generated the candidate solely for exploiting autoincrement
4297 opportunities, and it turns out it can't be used, set the cost to
4298 infinity to make sure we ignore it. */
4299 else if (cand
->pos
== IP_AFTER_USE
|| cand
->pos
== IP_BEFORE_USE
)
4300 cost
= infinite_cost
;
4302 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, NULL_TREE
, ERROR_MARK
,
4305 return !infinite_cost_p (cost
);
4308 /* Computes value of candidate CAND at position AT in iteration NITER, and
4309 stores it to VAL. */
4312 cand_value_at (struct loop
*loop
, struct iv_cand
*cand
, gimple at
, tree niter
,
4315 aff_tree step
, delta
, nit
;
4316 struct iv
*iv
= cand
->iv
;
4317 tree type
= TREE_TYPE (iv
->base
);
4318 tree steptype
= type
;
4319 if (POINTER_TYPE_P (type
))
4320 steptype
= sizetype
;
4322 tree_to_aff_combination (iv
->step
, steptype
, &step
);
4323 tree_to_aff_combination (niter
, TREE_TYPE (niter
), &nit
);
4324 aff_combination_convert (&nit
, steptype
);
4325 aff_combination_mult (&nit
, &step
, &delta
);
4326 if (stmt_after_increment (loop
, cand
, at
))
4327 aff_combination_add (&delta
, &step
);
4329 tree_to_aff_combination (iv
->base
, type
, val
);
4330 aff_combination_add (val
, &delta
);
4333 /* Returns period of induction variable iv. */
4336 iv_period (struct iv
*iv
)
4338 tree step
= iv
->step
, period
, type
;
4341 gcc_assert (step
&& TREE_CODE (step
) == INTEGER_CST
);
4343 type
= unsigned_type_for (TREE_TYPE (step
));
4344 /* Period of the iv is lcm (step, type_range)/step -1,
4345 i.e., N*type_range/step - 1. Since type range is power
4346 of two, N == (step >> num_of_ending_zeros_binary (step),
4347 so the final result is
4349 (type_range >> num_of_ending_zeros_binary (step)) - 1
4352 pow2div
= num_ending_zeros (step
);
4354 period
= build_low_bits_mask (type
,
4355 (TYPE_PRECISION (type
)
4356 - tree_low_cst (pow2div
, 1)));
4361 /* Returns the comparison operator used when eliminating the iv USE. */
4363 static enum tree_code
4364 iv_elimination_compare (struct ivopts_data
*data
, struct iv_use
*use
)
4366 struct loop
*loop
= data
->current_loop
;
4370 ex_bb
= gimple_bb (use
->stmt
);
4371 exit
= EDGE_SUCC (ex_bb
, 0);
4372 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4373 exit
= EDGE_SUCC (ex_bb
, 1);
4375 return (exit
->flags
& EDGE_TRUE_VALUE
? EQ_EXPR
: NE_EXPR
);
4379 strip_wrap_conserving_type_conversions (tree exp
)
4381 while (tree_ssa_useless_type_conversion (exp
)
4382 && (nowrap_type_p (TREE_TYPE (exp
))
4383 == nowrap_type_p (TREE_TYPE (TREE_OPERAND (exp
, 0)))))
4384 exp
= TREE_OPERAND (exp
, 0);
4388 /* Walk the SSA form and check whether E == WHAT. Fairly simplistic, we
4389 check for an exact match. */
4392 expr_equal_p (tree e
, tree what
)
4395 enum tree_code code
;
4397 e
= strip_wrap_conserving_type_conversions (e
);
4398 what
= strip_wrap_conserving_type_conversions (what
);
4400 code
= TREE_CODE (what
);
4401 if (TREE_TYPE (e
) != TREE_TYPE (what
))
4404 if (operand_equal_p (e
, what
, 0))
4407 if (TREE_CODE (e
) != SSA_NAME
)
4410 stmt
= SSA_NAME_DEF_STMT (e
);
4411 if (gimple_code (stmt
) != GIMPLE_ASSIGN
4412 || gimple_assign_rhs_code (stmt
) != code
)
4415 switch (get_gimple_rhs_class (code
))
4417 case GIMPLE_BINARY_RHS
:
4418 if (!expr_equal_p (gimple_assign_rhs2 (stmt
), TREE_OPERAND (what
, 1)))
4422 case GIMPLE_UNARY_RHS
:
4423 case GIMPLE_SINGLE_RHS
:
4424 return expr_equal_p (gimple_assign_rhs1 (stmt
), TREE_OPERAND (what
, 0));
4430 /* Returns true if we can prove that BASE - OFFSET does not overflow. For now,
4431 we only detect the situation that BASE = SOMETHING + OFFSET, where the
4432 calculation is performed in non-wrapping type.
4434 TODO: More generally, we could test for the situation that
4435 BASE = SOMETHING + OFFSET' and OFFSET is between OFFSET' and zero.
4436 This would require knowing the sign of OFFSET.
4438 Also, we only look for the first addition in the computation of BASE.
4439 More complex analysis would be better, but introducing it just for
4440 this optimization seems like an overkill. */
4443 difference_cannot_overflow_p (tree base
, tree offset
)
4445 enum tree_code code
;
4448 if (!nowrap_type_p (TREE_TYPE (base
)))
4451 base
= expand_simple_operations (base
);
4453 if (TREE_CODE (base
) == SSA_NAME
)
4455 gimple stmt
= SSA_NAME_DEF_STMT (base
);
4457 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
4460 code
= gimple_assign_rhs_code (stmt
);
4461 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
4464 e1
= gimple_assign_rhs1 (stmt
);
4465 e2
= gimple_assign_rhs2 (stmt
);
4469 code
= TREE_CODE (base
);
4470 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
4472 e1
= TREE_OPERAND (base
, 0);
4473 e2
= TREE_OPERAND (base
, 1);
4476 /* TODO: deeper inspection may be necessary to prove the equality. */
4480 return expr_equal_p (e1
, offset
) || expr_equal_p (e2
, offset
);
4481 case POINTER_PLUS_EXPR
:
4482 return expr_equal_p (e2
, offset
);
4489 /* Tries to replace loop exit by one formulated in terms of a LT_EXPR
4490 comparison with CAND. NITER describes the number of iterations of
4491 the loops. If successful, the comparison in COMP_P is altered accordingly.
4493 We aim to handle the following situation:
4509 Here, the number of iterations of the loop is (a + 1 > b) ? 0 : b - a - 1.
4510 We aim to optimize this to
4518 while (p < p_0 - a + b);
4520 This preserves the correctness, since the pointer arithmetics does not
4521 overflow. More precisely:
4523 1) if a + 1 <= b, then p_0 - a + b is the final value of p, hence there is no
4524 overflow in computing it or the values of p.
4525 2) if a + 1 > b, then we need to verify that the expression p_0 - a does not
4526 overflow. To prove this, we use the fact that p_0 = base + a. */
4529 iv_elimination_compare_lt (struct ivopts_data
*data
,
4530 struct iv_cand
*cand
, enum tree_code
*comp_p
,
4531 struct tree_niter_desc
*niter
)
4533 tree cand_type
, a
, b
, mbz
, nit_type
= TREE_TYPE (niter
->niter
), offset
;
4534 struct affine_tree_combination nit
, tmpa
, tmpb
;
4535 enum tree_code comp
;
4538 /* We need to know that the candidate induction variable does not overflow.
4539 While more complex analysis may be used to prove this, for now just
4540 check that the variable appears in the original program and that it
4541 is computed in a type that guarantees no overflows. */
4542 cand_type
= TREE_TYPE (cand
->iv
->base
);
4543 if (cand
->pos
!= IP_ORIGINAL
|| !nowrap_type_p (cand_type
))
4546 /* Make sure that the loop iterates till the loop bound is hit, as otherwise
4547 the calculation of the BOUND could overflow, making the comparison
4549 if (!data
->loop_single_exit_p
)
4552 /* We need to be able to decide whether candidate is increasing or decreasing
4553 in order to choose the right comparison operator. */
4554 if (!cst_and_fits_in_hwi (cand
->iv
->step
))
4556 step
= int_cst_value (cand
->iv
->step
);
4558 /* Check that the number of iterations matches the expected pattern:
4559 a + 1 > b ? 0 : b - a - 1. */
4560 mbz
= niter
->may_be_zero
;
4561 if (TREE_CODE (mbz
) == GT_EXPR
)
4563 /* Handle a + 1 > b. */
4564 tree op0
= TREE_OPERAND (mbz
, 0);
4565 if (TREE_CODE (op0
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op0
, 1)))
4567 a
= TREE_OPERAND (op0
, 0);
4568 b
= TREE_OPERAND (mbz
, 1);
4573 else if (TREE_CODE (mbz
) == LT_EXPR
)
4575 tree op1
= TREE_OPERAND (mbz
, 1);
4577 /* Handle b < a + 1. */
4578 if (TREE_CODE (op1
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op1
, 1)))
4580 a
= TREE_OPERAND (op1
, 0);
4581 b
= TREE_OPERAND (mbz
, 0);
4589 /* Expected number of iterations is B - A - 1. Check that it matches
4590 the actual number, i.e., that B - A - NITER = 1. */
4591 tree_to_aff_combination (niter
->niter
, nit_type
, &nit
);
4592 tree_to_aff_combination (fold_convert (nit_type
, a
), nit_type
, &tmpa
);
4593 tree_to_aff_combination (fold_convert (nit_type
, b
), nit_type
, &tmpb
);
4594 aff_combination_scale (&nit
, double_int_minus_one
);
4595 aff_combination_scale (&tmpa
, double_int_minus_one
);
4596 aff_combination_add (&tmpb
, &tmpa
);
4597 aff_combination_add (&tmpb
, &nit
);
4598 if (tmpb
.n
!= 0 || !double_int_equal_p (tmpb
.offset
, double_int_one
))
4601 /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
4603 offset
= fold_build2 (MULT_EXPR
, TREE_TYPE (cand
->iv
->step
),
4605 fold_convert (TREE_TYPE (cand
->iv
->step
), a
));
4606 if (!difference_cannot_overflow_p (cand
->iv
->base
, offset
))
4609 /* Determine the new comparison operator. */
4610 comp
= step
< 0 ? GT_EXPR
: LT_EXPR
;
4611 if (*comp_p
== NE_EXPR
)
4613 else if (*comp_p
== EQ_EXPR
)
4614 *comp_p
= invert_tree_comparison (comp
, false);
4621 /* Check whether it is possible to express the condition in USE by comparison
4622 of candidate CAND. If so, store the value compared with to BOUND, and the
4623 comparison operator to COMP. */
4626 may_eliminate_iv (struct ivopts_data
*data
,
4627 struct iv_use
*use
, struct iv_cand
*cand
, tree
*bound
,
4628 enum tree_code
*comp
)
4633 struct loop
*loop
= data
->current_loop
;
4635 struct tree_niter_desc
*desc
= NULL
;
4637 if (TREE_CODE (cand
->iv
->step
) != INTEGER_CST
)
4640 /* For now works only for exits that dominate the loop latch.
4641 TODO: extend to other conditions inside loop body. */
4642 ex_bb
= gimple_bb (use
->stmt
);
4643 if (use
->stmt
!= last_stmt (ex_bb
)
4644 || gimple_code (use
->stmt
) != GIMPLE_COND
4645 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, ex_bb
))
4648 exit
= EDGE_SUCC (ex_bb
, 0);
4649 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4650 exit
= EDGE_SUCC (ex_bb
, 1);
4651 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4654 desc
= niter_for_exit (data
, exit
);
4658 /* Determine whether we can use the variable to test the exit condition.
4659 This is the case iff the period of the induction variable is greater
4660 than the number of iterations for which the exit condition is true. */
4661 period
= iv_period (cand
->iv
);
4663 /* If the number of iterations is constant, compare against it directly. */
4664 if (TREE_CODE (desc
->niter
) == INTEGER_CST
)
4666 /* See cand_value_at. */
4667 if (stmt_after_increment (loop
, cand
, use
->stmt
))
4669 if (!tree_int_cst_lt (desc
->niter
, period
))
4674 if (tree_int_cst_lt (period
, desc
->niter
))
4679 /* If not, and if this is the only possible exit of the loop, see whether
4680 we can get a conservative estimate on the number of iterations of the
4681 entire loop and compare against that instead. */
4684 double_int period_value
, max_niter
;
4686 max_niter
= desc
->max
;
4687 if (stmt_after_increment (loop
, cand
, use
->stmt
))
4688 max_niter
= double_int_add (max_niter
, double_int_one
);
4689 period_value
= tree_to_double_int (period
);
4690 if (double_int_ucmp (max_niter
, period_value
) > 0)
4692 /* See if we can take advantage of infered loop bound information. */
4693 if (data
->loop_single_exit_p
)
4695 if (!estimated_loop_iterations (loop
, true, &max_niter
))
4697 /* The loop bound is already adjusted by adding 1. */
4698 if (double_int_ucmp (max_niter
, period_value
) > 0)
4706 cand_value_at (loop
, cand
, use
->stmt
, desc
->niter
, &bnd
);
4708 *bound
= aff_combination_to_tree (&bnd
);
4709 *comp
= iv_elimination_compare (data
, use
);
4711 /* It is unlikely that computing the number of iterations using division
4712 would be more profitable than keeping the original induction variable. */
4713 if (expression_expensive_p (*bound
))
4716 /* Sometimes, it is possible to handle the situation that the number of
4717 iterations may be zero unless additional assumtions by using <
4718 instead of != in the exit condition.
4720 TODO: we could also calculate the value MAY_BE_ZERO ? 0 : NITER and
4721 base the exit condition on it. However, that is often too
4723 if (!integer_zerop (desc
->may_be_zero
))
4724 return iv_elimination_compare_lt (data
, cand
, comp
, desc
);
4729 /* Calculates the cost of BOUND, if it is a PARM_DECL. A PARM_DECL must
4730 be copied, if is is used in the loop body and DATA->body_includes_call. */
4733 parm_decl_cost (struct ivopts_data
*data
, tree bound
)
4735 tree sbound
= bound
;
4736 STRIP_NOPS (sbound
);
4738 if (TREE_CODE (sbound
) == SSA_NAME
4739 && TREE_CODE (SSA_NAME_VAR (sbound
)) == PARM_DECL
4740 && gimple_nop_p (SSA_NAME_DEF_STMT (sbound
))
4741 && data
->body_includes_call
)
4742 return COSTS_N_INSNS (1);
4747 /* Determines cost of basing replacement of USE on CAND in a condition. */
4750 determine_use_iv_cost_condition (struct ivopts_data
*data
,
4751 struct iv_use
*use
, struct iv_cand
*cand
)
4753 tree bound
= NULL_TREE
;
4755 bitmap depends_on_elim
= NULL
, depends_on_express
= NULL
, depends_on
;
4756 comp_cost elim_cost
, express_cost
, cost
, bound_cost
;
4758 int elim_inv_expr_id
= -1, express_inv_expr_id
= -1, inv_expr_id
;
4759 tree
*control_var
, *bound_cst
;
4760 enum tree_code comp
= ERROR_MARK
;
4762 /* Only consider real candidates. */
4765 set_use_iv_cost (data
, use
, cand
, infinite_cost
, NULL
, NULL_TREE
,
4770 /* Try iv elimination. */
4771 if (may_eliminate_iv (data
, use
, cand
, &bound
, &comp
))
4773 elim_cost
= force_var_cost (data
, bound
, &depends_on_elim
);
4774 if (elim_cost
.cost
== 0)
4775 elim_cost
.cost
= parm_decl_cost (data
, bound
);
4776 else if (TREE_CODE (bound
) == INTEGER_CST
)
4778 /* If we replace a loop condition 'i < n' with 'p < base + n',
4779 depends_on_elim will have 'base' and 'n' set, which implies
4780 that both 'base' and 'n' will be live during the loop. More likely,
4781 'base + n' will be loop invariant, resulting in only one live value
4782 during the loop. So in that case we clear depends_on_elim and set
4783 elim_inv_expr_id instead. */
4784 if (depends_on_elim
&& bitmap_count_bits (depends_on_elim
) > 1)
4786 elim_inv_expr_id
= get_expr_id (data
, bound
);
4787 bitmap_clear (depends_on_elim
);
4789 /* The bound is a loop invariant, so it will be only computed
4791 elim_cost
.cost
= adjust_setup_cost (data
, elim_cost
.cost
);
4794 elim_cost
= infinite_cost
;
4796 /* Try expressing the original giv. If it is compared with an invariant,
4797 note that we cannot get rid of it. */
4798 ok
= extract_cond_operands (data
, use
->stmt
, &control_var
, &bound_cst
,
4802 /* When the condition is a comparison of the candidate IV against
4803 zero, prefer this IV.
4805 TODO: The constant that we're substracting from the cost should
4806 be target-dependent. This information should be added to the
4807 target costs for each backend. */
4808 if (!infinite_cost_p (elim_cost
) /* Do not try to decrease infinite! */
4809 && integer_zerop (*bound_cst
)
4810 && (operand_equal_p (*control_var
, cand
->var_after
, 0)
4811 || operand_equal_p (*control_var
, cand
->var_before
, 0)))
4812 elim_cost
.cost
-= 1;
4814 express_cost
= get_computation_cost (data
, use
, cand
, false,
4815 &depends_on_express
, NULL
,
4816 &express_inv_expr_id
);
4817 fd_ivopts_data
= data
;
4818 walk_tree (&cmp_iv
->base
, find_depends
, &depends_on_express
, NULL
);
4820 /* Count the cost of the original bound as well. */
4821 bound_cost
= force_var_cost (data
, *bound_cst
, NULL
);
4822 if (bound_cost
.cost
== 0)
4823 bound_cost
.cost
= parm_decl_cost (data
, *bound_cst
);
4824 else if (TREE_CODE (*bound_cst
) == INTEGER_CST
)
4825 bound_cost
.cost
= 0;
4826 express_cost
.cost
+= bound_cost
.cost
;
4828 /* Choose the better approach, preferring the eliminated IV. */
4829 if (compare_costs (elim_cost
, express_cost
) <= 0)
4832 depends_on
= depends_on_elim
;
4833 depends_on_elim
= NULL
;
4834 inv_expr_id
= elim_inv_expr_id
;
4838 cost
= express_cost
;
4839 depends_on
= depends_on_express
;
4840 depends_on_express
= NULL
;
4843 inv_expr_id
= express_inv_expr_id
;
4846 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, bound
, comp
, inv_expr_id
);
4848 if (depends_on_elim
)
4849 BITMAP_FREE (depends_on_elim
);
4850 if (depends_on_express
)
4851 BITMAP_FREE (depends_on_express
);
4853 return !infinite_cost_p (cost
);
4856 /* Determines cost of basing replacement of USE on CAND. Returns false
4857 if USE cannot be based on CAND. */
4860 determine_use_iv_cost (struct ivopts_data
*data
,
4861 struct iv_use
*use
, struct iv_cand
*cand
)
4865 case USE_NONLINEAR_EXPR
:
4866 return determine_use_iv_cost_generic (data
, use
, cand
);
4869 return determine_use_iv_cost_address (data
, use
, cand
);
4872 return determine_use_iv_cost_condition (data
, use
, cand
);
4879 /* Return true if get_computation_cost indicates that autoincrement is
4880 a possibility for the pair of USE and CAND, false otherwise. */
4883 autoinc_possible_for_pair (struct ivopts_data
*data
, struct iv_use
*use
,
4884 struct iv_cand
*cand
)
4890 if (use
->type
!= USE_ADDRESS
)
4893 cost
= get_computation_cost (data
, use
, cand
, true, &depends_on
,
4894 &can_autoinc
, NULL
);
4896 BITMAP_FREE (depends_on
);
4898 return !infinite_cost_p (cost
) && can_autoinc
;
4901 /* Examine IP_ORIGINAL candidates to see if they are incremented next to a
4902 use that allows autoincrement, and set their AINC_USE if possible. */
4905 set_autoinc_for_original_candidates (struct ivopts_data
*data
)
4909 for (i
= 0; i
< n_iv_cands (data
); i
++)
4911 struct iv_cand
*cand
= iv_cand (data
, i
);
4912 struct iv_use
*closest
= NULL
;
4913 if (cand
->pos
!= IP_ORIGINAL
)
4915 for (j
= 0; j
< n_iv_uses (data
); j
++)
4917 struct iv_use
*use
= iv_use (data
, j
);
4918 unsigned uid
= gimple_uid (use
->stmt
);
4919 if (gimple_bb (use
->stmt
) != gimple_bb (cand
->incremented_at
)
4920 || uid
> gimple_uid (cand
->incremented_at
))
4922 if (closest
== NULL
|| uid
> gimple_uid (closest
->stmt
))
4925 if (closest
== NULL
|| !autoinc_possible_for_pair (data
, closest
, cand
))
4927 cand
->ainc_use
= closest
;
4931 /* Finds the candidates for the induction variables. */
4934 find_iv_candidates (struct ivopts_data
*data
)
4936 /* Add commonly used ivs. */
4937 add_standard_iv_candidates (data
);
4939 /* Add old induction variables. */
4940 add_old_ivs_candidates (data
);
4942 /* Add induction variables derived from uses. */
4943 add_derived_ivs_candidates (data
);
4945 set_autoinc_for_original_candidates (data
);
4947 /* Record the important candidates. */
4948 record_important_candidates (data
);
4951 /* Determines costs of basing the use of the iv on an iv candidate. */
4954 determine_use_iv_costs (struct ivopts_data
*data
)
4958 struct iv_cand
*cand
;
4959 bitmap to_clear
= BITMAP_ALLOC (NULL
);
4961 alloc_use_cost_map (data
);
4963 for (i
= 0; i
< n_iv_uses (data
); i
++)
4965 use
= iv_use (data
, i
);
4967 if (data
->consider_all_candidates
)
4969 for (j
= 0; j
< n_iv_cands (data
); j
++)
4971 cand
= iv_cand (data
, j
);
4972 determine_use_iv_cost (data
, use
, cand
);
4979 EXECUTE_IF_SET_IN_BITMAP (use
->related_cands
, 0, j
, bi
)
4981 cand
= iv_cand (data
, j
);
4982 if (!determine_use_iv_cost (data
, use
, cand
))
4983 bitmap_set_bit (to_clear
, j
);
4986 /* Remove the candidates for that the cost is infinite from
4987 the list of related candidates. */
4988 bitmap_and_compl_into (use
->related_cands
, to_clear
);
4989 bitmap_clear (to_clear
);
4993 BITMAP_FREE (to_clear
);
4995 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4997 fprintf (dump_file
, "Use-candidate costs:\n");
4999 for (i
= 0; i
< n_iv_uses (data
); i
++)
5001 use
= iv_use (data
, i
);
5003 fprintf (dump_file
, "Use %d:\n", i
);
5004 fprintf (dump_file
, " cand\tcost\tcompl.\tdepends on\n");
5005 for (j
= 0; j
< use
->n_map_members
; j
++)
5007 if (!use
->cost_map
[j
].cand
5008 || infinite_cost_p (use
->cost_map
[j
].cost
))
5011 fprintf (dump_file
, " %d\t%d\t%d\t",
5012 use
->cost_map
[j
].cand
->id
,
5013 use
->cost_map
[j
].cost
.cost
,
5014 use
->cost_map
[j
].cost
.complexity
);
5015 if (use
->cost_map
[j
].depends_on
)
5016 bitmap_print (dump_file
,
5017 use
->cost_map
[j
].depends_on
, "","");
5018 if (use
->cost_map
[j
].inv_expr_id
!= -1)
5019 fprintf (dump_file
, " inv_expr:%d", use
->cost_map
[j
].inv_expr_id
);
5020 fprintf (dump_file
, "\n");
5023 fprintf (dump_file
, "\n");
5025 fprintf (dump_file
, "\n");
5029 /* Determines cost of the candidate CAND. */
5032 determine_iv_cost (struct ivopts_data
*data
, struct iv_cand
*cand
)
5034 comp_cost cost_base
;
5035 unsigned cost
, cost_step
;
5044 /* There are two costs associated with the candidate -- its increment
5045 and its initialization. The second is almost negligible for any loop
5046 that rolls enough, so we take it just very little into account. */
5048 base
= cand
->iv
->base
;
5049 cost_base
= force_var_cost (data
, base
, NULL
);
5050 /* It will be exceptional that the iv register happens to be initialized with
5051 the proper value at no cost. In general, there will at least be a regcopy
5053 if (cost_base
.cost
== 0)
5054 cost_base
.cost
= COSTS_N_INSNS (1);
5055 cost_step
= add_cost (TYPE_MODE (TREE_TYPE (base
)), data
->speed
);
5057 cost
= cost_step
+ adjust_setup_cost (data
, cost_base
.cost
);
5059 /* Prefer the original ivs unless we may gain something by replacing it.
5060 The reason is to make debugging simpler; so this is not relevant for
5061 artificial ivs created by other optimization passes. */
5062 if (cand
->pos
!= IP_ORIGINAL
5063 || DECL_ARTIFICIAL (SSA_NAME_VAR (cand
->var_before
)))
5066 /* Prefer not to insert statements into latch unless there are some
5067 already (so that we do not create unnecessary jumps). */
5068 if (cand
->pos
== IP_END
5069 && empty_block_p (ip_end_pos (data
->current_loop
)))
5073 cand
->cost_step
= cost_step
;
5076 /* Determines costs of computation of the candidates. */
5079 determine_iv_costs (struct ivopts_data
*data
)
5083 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5085 fprintf (dump_file
, "Candidate costs:\n");
5086 fprintf (dump_file
, " cand\tcost\n");
5089 for (i
= 0; i
< n_iv_cands (data
); i
++)
5091 struct iv_cand
*cand
= iv_cand (data
, i
);
5093 determine_iv_cost (data
, cand
);
5095 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5096 fprintf (dump_file
, " %d\t%d\n", i
, cand
->cost
);
5099 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5100 fprintf (dump_file
, "\n");
5103 /* Calculates cost for having SIZE induction variables. */
5106 ivopts_global_cost_for_size (struct ivopts_data
*data
, unsigned size
)
5108 /* We add size to the cost, so that we prefer eliminating ivs
5110 return size
+ estimate_reg_pressure_cost (size
, data
->regs_used
, data
->speed
,
5111 data
->body_includes_call
);
5114 /* For each size of the induction variable set determine the penalty. */
5117 determine_set_costs (struct ivopts_data
*data
)
5121 gimple_stmt_iterator psi
;
5123 struct loop
*loop
= data
->current_loop
;
5126 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5128 fprintf (dump_file
, "Global costs:\n");
5129 fprintf (dump_file
, " target_avail_regs %d\n", target_avail_regs
);
5130 fprintf (dump_file
, " target_clobbered_regs %d\n", target_clobbered_regs
);
5131 fprintf (dump_file
, " target_reg_cost %d\n", target_reg_cost
[data
->speed
]);
5132 fprintf (dump_file
, " target_spill_cost %d\n", target_spill_cost
[data
->speed
]);
5136 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
5138 phi
= gsi_stmt (psi
);
5139 op
= PHI_RESULT (phi
);
5141 if (!is_gimple_reg (op
))
5144 if (get_iv (data
, op
))
5150 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
5152 struct version_info
*info
= ver_info (data
, j
);
5154 if (info
->inv_id
&& info
->has_nonlin_use
)
5158 data
->regs_used
= n
;
5159 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5160 fprintf (dump_file
, " regs_used %d\n", n
);
5162 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5164 fprintf (dump_file
, " cost for size:\n");
5165 fprintf (dump_file
, " ivs\tcost\n");
5166 for (j
= 0; j
<= 2 * target_avail_regs
; j
++)
5167 fprintf (dump_file
, " %d\t%d\n", j
,
5168 ivopts_global_cost_for_size (data
, j
));
5169 fprintf (dump_file
, "\n");
5173 /* Returns true if A is a cheaper cost pair than B. */
5176 cheaper_cost_pair (struct cost_pair
*a
, struct cost_pair
*b
)
5186 cmp
= compare_costs (a
->cost
, b
->cost
);
5193 /* In case the costs are the same, prefer the cheaper candidate. */
5194 if (a
->cand
->cost
< b
->cand
->cost
)
5201 /* Returns candidate by that USE is expressed in IVS. */
5203 static struct cost_pair
*
5204 iv_ca_cand_for_use (struct iv_ca
*ivs
, struct iv_use
*use
)
5206 return ivs
->cand_for_use
[use
->id
];
5209 /* Computes the cost field of IVS structure. */
5212 iv_ca_recount_cost (struct ivopts_data
*data
, struct iv_ca
*ivs
)
5214 comp_cost cost
= ivs
->cand_use_cost
;
5216 cost
.cost
+= ivs
->cand_cost
;
5218 cost
.cost
+= ivopts_global_cost_for_size (data
,
5219 ivs
->n_regs
+ ivs
->num_used_inv_expr
);
5224 /* Remove invariants in set INVS to set IVS. */
5227 iv_ca_set_remove_invariants (struct iv_ca
*ivs
, bitmap invs
)
5235 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
5237 ivs
->n_invariant_uses
[iid
]--;
5238 if (ivs
->n_invariant_uses
[iid
] == 0)
5243 /* Set USE not to be expressed by any candidate in IVS. */
5246 iv_ca_set_no_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5249 unsigned uid
= use
->id
, cid
;
5250 struct cost_pair
*cp
;
5252 cp
= ivs
->cand_for_use
[uid
];
5258 ivs
->cand_for_use
[uid
] = NULL
;
5259 ivs
->n_cand_uses
[cid
]--;
5261 if (ivs
->n_cand_uses
[cid
] == 0)
5263 bitmap_clear_bit (ivs
->cands
, cid
);
5264 /* Do not count the pseudocandidates. */
5268 ivs
->cand_cost
-= cp
->cand
->cost
;
5270 iv_ca_set_remove_invariants (ivs
, cp
->cand
->depends_on
);
5273 ivs
->cand_use_cost
= sub_costs (ivs
->cand_use_cost
, cp
->cost
);
5275 iv_ca_set_remove_invariants (ivs
, cp
->depends_on
);
5277 if (cp
->inv_expr_id
!= -1)
5279 ivs
->used_inv_expr
[cp
->inv_expr_id
]--;
5280 if (ivs
->used_inv_expr
[cp
->inv_expr_id
] == 0)
5281 ivs
->num_used_inv_expr
--;
5283 iv_ca_recount_cost (data
, ivs
);
5286 /* Add invariants in set INVS to set IVS. */
5289 iv_ca_set_add_invariants (struct iv_ca
*ivs
, bitmap invs
)
5297 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
5299 ivs
->n_invariant_uses
[iid
]++;
5300 if (ivs
->n_invariant_uses
[iid
] == 1)
5305 /* Set cost pair for USE in set IVS to CP. */
5308 iv_ca_set_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5309 struct iv_use
*use
, struct cost_pair
*cp
)
5311 unsigned uid
= use
->id
, cid
;
5313 if (ivs
->cand_for_use
[uid
] == cp
)
5316 if (ivs
->cand_for_use
[uid
])
5317 iv_ca_set_no_cp (data
, ivs
, use
);
5324 ivs
->cand_for_use
[uid
] = cp
;
5325 ivs
->n_cand_uses
[cid
]++;
5326 if (ivs
->n_cand_uses
[cid
] == 1)
5328 bitmap_set_bit (ivs
->cands
, cid
);
5329 /* Do not count the pseudocandidates. */
5333 ivs
->cand_cost
+= cp
->cand
->cost
;
5335 iv_ca_set_add_invariants (ivs
, cp
->cand
->depends_on
);
5338 ivs
->cand_use_cost
= add_costs (ivs
->cand_use_cost
, cp
->cost
);
5339 iv_ca_set_add_invariants (ivs
, cp
->depends_on
);
5341 if (cp
->inv_expr_id
!= -1)
5343 ivs
->used_inv_expr
[cp
->inv_expr_id
]++;
5344 if (ivs
->used_inv_expr
[cp
->inv_expr_id
] == 1)
5345 ivs
->num_used_inv_expr
++;
5347 iv_ca_recount_cost (data
, ivs
);
5351 /* Extend set IVS by expressing USE by some of the candidates in it
5352 if possible. All important candidates will be considered
5353 if IMPORTANT_CANDIDATES is true. */
5356 iv_ca_add_use (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5357 struct iv_use
*use
, bool important_candidates
)
5359 struct cost_pair
*best_cp
= NULL
, *cp
;
5364 gcc_assert (ivs
->upto
>= use
->id
);
5366 if (ivs
->upto
== use
->id
)
5372 cands
= (important_candidates
? data
->important_candidates
: ivs
->cands
);
5373 EXECUTE_IF_SET_IN_BITMAP (cands
, 0, i
, bi
)
5375 struct iv_cand
*cand
= iv_cand (data
, i
);
5377 cp
= get_use_iv_cost (data
, use
, cand
);
5379 if (cheaper_cost_pair (cp
, best_cp
))
5383 iv_ca_set_cp (data
, ivs
, use
, best_cp
);
5386 /* Get cost for assignment IVS. */
5389 iv_ca_cost (struct iv_ca
*ivs
)
5391 /* This was a conditional expression but it triggered a bug in
5394 return infinite_cost
;
5399 /* Returns true if all dependences of CP are among invariants in IVS. */
5402 iv_ca_has_deps (struct iv_ca
*ivs
, struct cost_pair
*cp
)
5407 if (!cp
->depends_on
)
5410 EXECUTE_IF_SET_IN_BITMAP (cp
->depends_on
, 0, i
, bi
)
5412 if (ivs
->n_invariant_uses
[i
] == 0)
5419 /* Creates change of expressing USE by NEW_CP instead of OLD_CP and chains
5420 it before NEXT_CHANGE. */
5422 static struct iv_ca_delta
*
5423 iv_ca_delta_add (struct iv_use
*use
, struct cost_pair
*old_cp
,
5424 struct cost_pair
*new_cp
, struct iv_ca_delta
*next_change
)
5426 struct iv_ca_delta
*change
= XNEW (struct iv_ca_delta
);
5429 change
->old_cp
= old_cp
;
5430 change
->new_cp
= new_cp
;
5431 change
->next_change
= next_change
;
5436 /* Joins two lists of changes L1 and L2. Destructive -- old lists
5439 static struct iv_ca_delta
*
5440 iv_ca_delta_join (struct iv_ca_delta
*l1
, struct iv_ca_delta
*l2
)
5442 struct iv_ca_delta
*last
;
5450 for (last
= l1
; last
->next_change
; last
= last
->next_change
)
5452 last
->next_change
= l2
;
5457 /* Reverse the list of changes DELTA, forming the inverse to it. */
5459 static struct iv_ca_delta
*
5460 iv_ca_delta_reverse (struct iv_ca_delta
*delta
)
5462 struct iv_ca_delta
*act
, *next
, *prev
= NULL
;
5463 struct cost_pair
*tmp
;
5465 for (act
= delta
; act
; act
= next
)
5467 next
= act
->next_change
;
5468 act
->next_change
= prev
;
5472 act
->old_cp
= act
->new_cp
;
5479 /* Commit changes in DELTA to IVS. If FORWARD is false, the changes are
5480 reverted instead. */
5483 iv_ca_delta_commit (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5484 struct iv_ca_delta
*delta
, bool forward
)
5486 struct cost_pair
*from
, *to
;
5487 struct iv_ca_delta
*act
;
5490 delta
= iv_ca_delta_reverse (delta
);
5492 for (act
= delta
; act
; act
= act
->next_change
)
5496 gcc_assert (iv_ca_cand_for_use (ivs
, act
->use
) == from
);
5497 iv_ca_set_cp (data
, ivs
, act
->use
, to
);
5501 iv_ca_delta_reverse (delta
);
5504 /* Returns true if CAND is used in IVS. */
5507 iv_ca_cand_used_p (struct iv_ca
*ivs
, struct iv_cand
*cand
)
5509 return ivs
->n_cand_uses
[cand
->id
] > 0;
5512 /* Returns number of induction variable candidates in the set IVS. */
5515 iv_ca_n_cands (struct iv_ca
*ivs
)
5517 return ivs
->n_cands
;
5520 /* Free the list of changes DELTA. */
5523 iv_ca_delta_free (struct iv_ca_delta
**delta
)
5525 struct iv_ca_delta
*act
, *next
;
5527 for (act
= *delta
; act
; act
= next
)
5529 next
= act
->next_change
;
5536 /* Allocates new iv candidates assignment. */
5538 static struct iv_ca
*
5539 iv_ca_new (struct ivopts_data
*data
)
5541 struct iv_ca
*nw
= XNEW (struct iv_ca
);
5545 nw
->cand_for_use
= XCNEWVEC (struct cost_pair
*, n_iv_uses (data
));
5546 nw
->n_cand_uses
= XCNEWVEC (unsigned, n_iv_cands (data
));
5547 nw
->cands
= BITMAP_ALLOC (NULL
);
5550 nw
->cand_use_cost
= zero_cost
;
5552 nw
->n_invariant_uses
= XCNEWVEC (unsigned, data
->max_inv_id
+ 1);
5553 nw
->cost
= zero_cost
;
5554 nw
->used_inv_expr
= XCNEWVEC (unsigned, data
->inv_expr_id
+ 1);
5555 nw
->num_used_inv_expr
= 0;
5560 /* Free memory occupied by the set IVS. */
5563 iv_ca_free (struct iv_ca
**ivs
)
5565 free ((*ivs
)->cand_for_use
);
5566 free ((*ivs
)->n_cand_uses
);
5567 BITMAP_FREE ((*ivs
)->cands
);
5568 free ((*ivs
)->n_invariant_uses
);
5569 free ((*ivs
)->used_inv_expr
);
5574 /* Dumps IVS to FILE. */
5577 iv_ca_dump (struct ivopts_data
*data
, FILE *file
, struct iv_ca
*ivs
)
5579 const char *pref
= " invariants ";
5581 comp_cost cost
= iv_ca_cost (ivs
);
5583 fprintf (file
, " cost: %d (complexity %d)\n", cost
.cost
, cost
.complexity
);
5584 fprintf (file
, " cand_cost: %d\n cand_use_cost: %d (complexity %d)\n",
5585 ivs
->cand_cost
, ivs
->cand_use_cost
.cost
, ivs
->cand_use_cost
.complexity
);
5586 bitmap_print (file
, ivs
->cands
, " candidates: ","\n");
5588 for (i
= 0; i
< ivs
->upto
; i
++)
5590 struct iv_use
*use
= iv_use (data
, i
);
5591 struct cost_pair
*cp
= iv_ca_cand_for_use (ivs
, use
);
5593 fprintf (file
, " use:%d --> iv_cand:%d, cost=(%d,%d)\n",
5594 use
->id
, cp
->cand
->id
, cp
->cost
.cost
, cp
->cost
.complexity
);
5596 fprintf (file
, " use:%d --> ??\n", use
->id
);
5599 for (i
= 1; i
<= data
->max_inv_id
; i
++)
5600 if (ivs
->n_invariant_uses
[i
])
5602 fprintf (file
, "%s%d", pref
, i
);
5605 fprintf (file
, "\n\n");
5608 /* Try changing candidate in IVS to CAND for each use. Return cost of the
5609 new set, and store differences in DELTA. Number of induction variables
5610 in the new set is stored to N_IVS. MIN_NCAND is a flag. When it is true
5611 the function will try to find a solution with mimimal iv candidates. */
5614 iv_ca_extend (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5615 struct iv_cand
*cand
, struct iv_ca_delta
**delta
,
5616 unsigned *n_ivs
, bool min_ncand
)
5621 struct cost_pair
*old_cp
, *new_cp
;
5624 for (i
= 0; i
< ivs
->upto
; i
++)
5626 use
= iv_use (data
, i
);
5627 old_cp
= iv_ca_cand_for_use (ivs
, use
);
5630 && old_cp
->cand
== cand
)
5633 new_cp
= get_use_iv_cost (data
, use
, cand
);
5637 if (!min_ncand
&& !iv_ca_has_deps (ivs
, new_cp
))
5640 if (!min_ncand
&& !cheaper_cost_pair (new_cp
, old_cp
))
5643 *delta
= iv_ca_delta_add (use
, old_cp
, new_cp
, *delta
);
5646 iv_ca_delta_commit (data
, ivs
, *delta
, true);
5647 cost
= iv_ca_cost (ivs
);
5649 *n_ivs
= iv_ca_n_cands (ivs
);
5650 iv_ca_delta_commit (data
, ivs
, *delta
, false);
5655 /* Try narrowing set IVS by removing CAND. Return the cost of
5656 the new set and store the differences in DELTA. */
5659 iv_ca_narrow (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5660 struct iv_cand
*cand
, struct iv_ca_delta
**delta
)
5664 struct cost_pair
*old_cp
, *new_cp
, *cp
;
5666 struct iv_cand
*cnd
;
5670 for (i
= 0; i
< n_iv_uses (data
); i
++)
5672 use
= iv_use (data
, i
);
5674 old_cp
= iv_ca_cand_for_use (ivs
, use
);
5675 if (old_cp
->cand
!= cand
)
5680 if (data
->consider_all_candidates
)
5682 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, ci
, bi
)
5687 cnd
= iv_cand (data
, ci
);
5689 cp
= get_use_iv_cost (data
, use
, cnd
);
5693 if (!iv_ca_has_deps (ivs
, cp
))
5696 if (!cheaper_cost_pair (cp
, new_cp
))
5704 EXECUTE_IF_AND_IN_BITMAP (use
->related_cands
, ivs
->cands
, 0, ci
, bi
)
5709 cnd
= iv_cand (data
, ci
);
5711 cp
= get_use_iv_cost (data
, use
, cnd
);
5714 if (!iv_ca_has_deps (ivs
, cp
))
5717 if (!cheaper_cost_pair (cp
, new_cp
))
5726 iv_ca_delta_free (delta
);
5727 return infinite_cost
;
5730 *delta
= iv_ca_delta_add (use
, old_cp
, new_cp
, *delta
);
5733 iv_ca_delta_commit (data
, ivs
, *delta
, true);
5734 cost
= iv_ca_cost (ivs
);
5735 iv_ca_delta_commit (data
, ivs
, *delta
, false);
5740 /* Try optimizing the set of candidates IVS by removing candidates different
5741 from to EXCEPT_CAND from it. Return cost of the new set, and store
5742 differences in DELTA. */
5745 iv_ca_prune (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5746 struct iv_cand
*except_cand
, struct iv_ca_delta
**delta
)
5749 struct iv_ca_delta
*act_delta
, *best_delta
;
5751 comp_cost best_cost
, acost
;
5752 struct iv_cand
*cand
;
5755 best_cost
= iv_ca_cost (ivs
);
5757 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
5759 cand
= iv_cand (data
, i
);
5761 if (cand
== except_cand
)
5764 acost
= iv_ca_narrow (data
, ivs
, cand
, &act_delta
);
5766 if (compare_costs (acost
, best_cost
) < 0)
5769 iv_ca_delta_free (&best_delta
);
5770 best_delta
= act_delta
;
5773 iv_ca_delta_free (&act_delta
);
5782 /* Recurse to possibly remove other unnecessary ivs. */
5783 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5784 best_cost
= iv_ca_prune (data
, ivs
, except_cand
, delta
);
5785 iv_ca_delta_commit (data
, ivs
, best_delta
, false);
5786 *delta
= iv_ca_delta_join (best_delta
, *delta
);
5790 /* Tries to extend the sets IVS in the best possible way in order
5791 to express the USE. If ORIGINALP is true, prefer candidates from
5792 the original set of IVs, otherwise favor important candidates not
5793 based on any memory object. */
5796 try_add_cand_for (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5797 struct iv_use
*use
, bool originalp
)
5799 comp_cost best_cost
, act_cost
;
5802 struct iv_cand
*cand
;
5803 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
;
5804 struct cost_pair
*cp
;
5806 iv_ca_add_use (data
, ivs
, use
, false);
5807 best_cost
= iv_ca_cost (ivs
);
5809 cp
= iv_ca_cand_for_use (ivs
, use
);
5814 iv_ca_add_use (data
, ivs
, use
, true);
5815 best_cost
= iv_ca_cost (ivs
);
5816 cp
= iv_ca_cand_for_use (ivs
, use
);
5820 best_delta
= iv_ca_delta_add (use
, NULL
, cp
, NULL
);
5821 iv_ca_set_no_cp (data
, ivs
, use
);
5824 /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
5825 first try important candidates not based on any memory object. Only if
5826 this fails, try the specific ones. Rationale -- in loops with many
5827 variables the best choice often is to use just one generic biv. If we
5828 added here many ivs specific to the uses, the optimization algorithm later
5829 would be likely to get stuck in a local minimum, thus causing us to create
5830 too many ivs. The approach from few ivs to more seems more likely to be
5831 successful -- starting from few ivs, replacing an expensive use by a
5832 specific iv should always be a win. */
5833 EXECUTE_IF_SET_IN_BITMAP (data
->important_candidates
, 0, i
, bi
)
5835 cand
= iv_cand (data
, i
);
5837 if (originalp
&& cand
->pos
!=IP_ORIGINAL
)
5840 if (!originalp
&& cand
->iv
->base_object
!= NULL_TREE
)
5843 if (iv_ca_cand_used_p (ivs
, cand
))
5846 cp
= get_use_iv_cost (data
, use
, cand
);
5850 iv_ca_set_cp (data
, ivs
, use
, cp
);
5851 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
,
5853 iv_ca_set_no_cp (data
, ivs
, use
);
5854 act_delta
= iv_ca_delta_add (use
, NULL
, cp
, act_delta
);
5856 if (compare_costs (act_cost
, best_cost
) < 0)
5858 best_cost
= act_cost
;
5860 iv_ca_delta_free (&best_delta
);
5861 best_delta
= act_delta
;
5864 iv_ca_delta_free (&act_delta
);
5867 if (infinite_cost_p (best_cost
))
5869 for (i
= 0; i
< use
->n_map_members
; i
++)
5871 cp
= use
->cost_map
+ i
;
5876 /* Already tried this. */
5877 if (cand
->important
)
5879 if (originalp
&& cand
->pos
== IP_ORIGINAL
)
5881 if (!originalp
&& cand
->iv
->base_object
== NULL_TREE
)
5885 if (iv_ca_cand_used_p (ivs
, cand
))
5889 iv_ca_set_cp (data
, ivs
, use
, cp
);
5890 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
, true);
5891 iv_ca_set_no_cp (data
, ivs
, use
);
5892 act_delta
= iv_ca_delta_add (use
, iv_ca_cand_for_use (ivs
, use
),
5895 if (compare_costs (act_cost
, best_cost
) < 0)
5897 best_cost
= act_cost
;
5900 iv_ca_delta_free (&best_delta
);
5901 best_delta
= act_delta
;
5904 iv_ca_delta_free (&act_delta
);
5908 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5909 iv_ca_delta_free (&best_delta
);
5911 return !infinite_cost_p (best_cost
);
5914 /* Finds an initial assignment of candidates to uses. */
5916 static struct iv_ca
*
5917 get_initial_solution (struct ivopts_data
*data
, bool originalp
)
5919 struct iv_ca
*ivs
= iv_ca_new (data
);
5922 for (i
= 0; i
< n_iv_uses (data
); i
++)
5923 if (!try_add_cand_for (data
, ivs
, iv_use (data
, i
), originalp
))
5932 /* Tries to improve set of induction variables IVS. */
5935 try_improve_iv_set (struct ivopts_data
*data
, struct iv_ca
*ivs
)
5938 comp_cost acost
, best_cost
= iv_ca_cost (ivs
);
5939 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
, *tmp_delta
;
5940 struct iv_cand
*cand
;
5942 /* Try extending the set of induction variables by one. */
5943 for (i
= 0; i
< n_iv_cands (data
); i
++)
5945 cand
= iv_cand (data
, i
);
5947 if (iv_ca_cand_used_p (ivs
, cand
))
5950 acost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, &n_ivs
, false);
5954 /* If we successfully added the candidate and the set is small enough,
5955 try optimizing it by removing other candidates. */
5956 if (n_ivs
<= ALWAYS_PRUNE_CAND_SET_BOUND
)
5958 iv_ca_delta_commit (data
, ivs
, act_delta
, true);
5959 acost
= iv_ca_prune (data
, ivs
, cand
, &tmp_delta
);
5960 iv_ca_delta_commit (data
, ivs
, act_delta
, false);
5961 act_delta
= iv_ca_delta_join (act_delta
, tmp_delta
);
5964 if (compare_costs (acost
, best_cost
) < 0)
5967 iv_ca_delta_free (&best_delta
);
5968 best_delta
= act_delta
;
5971 iv_ca_delta_free (&act_delta
);
5976 /* Try removing the candidates from the set instead. */
5977 best_cost
= iv_ca_prune (data
, ivs
, NULL
, &best_delta
);
5979 /* Nothing more we can do. */
5984 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5985 gcc_assert (compare_costs (best_cost
, iv_ca_cost (ivs
)) == 0);
5986 iv_ca_delta_free (&best_delta
);
5990 /* Attempts to find the optimal set of induction variables. We do simple
5991 greedy heuristic -- we try to replace at most one candidate in the selected
5992 solution and remove the unused ivs while this improves the cost. */
5994 static struct iv_ca
*
5995 find_optimal_iv_set_1 (struct ivopts_data
*data
, bool originalp
)
5999 /* Get the initial solution. */
6000 set
= get_initial_solution (data
, originalp
);
6003 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6004 fprintf (dump_file
, "Unable to substitute for ivs, failed.\n");
6008 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6010 fprintf (dump_file
, "Initial set of candidates:\n");
6011 iv_ca_dump (data
, dump_file
, set
);
6014 while (try_improve_iv_set (data
, set
))
6016 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6018 fprintf (dump_file
, "Improved to:\n");
6019 iv_ca_dump (data
, dump_file
, set
);
6026 static struct iv_ca
*
6027 find_optimal_iv_set (struct ivopts_data
*data
)
6030 struct iv_ca
*set
, *origset
;
6032 comp_cost cost
, origcost
;
6034 /* Determine the cost based on a strategy that starts with original IVs,
6035 and try again using a strategy that prefers candidates not based
6037 origset
= find_optimal_iv_set_1 (data
, true);
6038 set
= find_optimal_iv_set_1 (data
, false);
6040 if (!origset
&& !set
)
6043 origcost
= origset
? iv_ca_cost (origset
) : infinite_cost
;
6044 cost
= set
? iv_ca_cost (set
) : infinite_cost
;
6046 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6048 fprintf (dump_file
, "Original cost %d (complexity %d)\n\n",
6049 origcost
.cost
, origcost
.complexity
);
6050 fprintf (dump_file
, "Final cost %d (complexity %d)\n\n",
6051 cost
.cost
, cost
.complexity
);
6054 /* Choose the one with the best cost. */
6055 if (compare_costs (origcost
, cost
) <= 0)
6062 iv_ca_free (&origset
);
6064 for (i
= 0; i
< n_iv_uses (data
); i
++)
6066 use
= iv_use (data
, i
);
6067 use
->selected
= iv_ca_cand_for_use (set
, use
)->cand
;
6073 /* Creates a new induction variable corresponding to CAND. */
6076 create_new_iv (struct ivopts_data
*data
, struct iv_cand
*cand
)
6078 gimple_stmt_iterator incr_pos
;
6088 incr_pos
= gsi_last_bb (ip_normal_pos (data
->current_loop
));
6092 incr_pos
= gsi_last_bb (ip_end_pos (data
->current_loop
));
6100 incr_pos
= gsi_for_stmt (cand
->incremented_at
);
6104 /* Mark that the iv is preserved. */
6105 name_info (data
, cand
->var_before
)->preserve_biv
= true;
6106 name_info (data
, cand
->var_after
)->preserve_biv
= true;
6108 /* Rewrite the increment so that it uses var_before directly. */
6109 find_interesting_uses_op (data
, cand
->var_after
)->selected
= cand
;
6113 gimple_add_tmp_var (cand
->var_before
);
6114 add_referenced_var (cand
->var_before
);
6116 base
= unshare_expr (cand
->iv
->base
);
6118 create_iv (base
, unshare_expr (cand
->iv
->step
),
6119 cand
->var_before
, data
->current_loop
,
6120 &incr_pos
, after
, &cand
->var_before
, &cand
->var_after
);
6123 /* Creates new induction variables described in SET. */
6126 create_new_ivs (struct ivopts_data
*data
, struct iv_ca
*set
)
6129 struct iv_cand
*cand
;
6132 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
6134 cand
= iv_cand (data
, i
);
6135 create_new_iv (data
, cand
);
6138 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6140 fprintf (dump_file
, "\nSelected IV set: \n");
6141 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
6143 cand
= iv_cand (data
, i
);
6144 dump_cand (dump_file
, cand
);
6146 fprintf (dump_file
, "\n");
6150 /* Rewrites USE (definition of iv used in a nonlinear expression)
6151 using candidate CAND. */
6154 rewrite_use_nonlinear_expr (struct ivopts_data
*data
,
6155 struct iv_use
*use
, struct iv_cand
*cand
)
6160 gimple_stmt_iterator bsi
;
6162 /* An important special case -- if we are asked to express value of
6163 the original iv by itself, just exit; there is no need to
6164 introduce a new computation (that might also need casting the
6165 variable to unsigned and back). */
6166 if (cand
->pos
== IP_ORIGINAL
6167 && cand
->incremented_at
== use
->stmt
)
6169 tree step
, ctype
, utype
;
6170 enum tree_code incr_code
= PLUS_EXPR
, old_code
;
6172 gcc_assert (is_gimple_assign (use
->stmt
));
6173 gcc_assert (gimple_assign_lhs (use
->stmt
) == cand
->var_after
);
6175 step
= cand
->iv
->step
;
6176 ctype
= TREE_TYPE (step
);
6177 utype
= TREE_TYPE (cand
->var_after
);
6178 if (TREE_CODE (step
) == NEGATE_EXPR
)
6180 incr_code
= MINUS_EXPR
;
6181 step
= TREE_OPERAND (step
, 0);
6184 /* Check whether we may leave the computation unchanged.
6185 This is the case only if it does not rely on other
6186 computations in the loop -- otherwise, the computation
6187 we rely upon may be removed in remove_unused_ivs,
6188 thus leading to ICE. */
6189 old_code
= gimple_assign_rhs_code (use
->stmt
);
6190 if (old_code
== PLUS_EXPR
6191 || old_code
== MINUS_EXPR
6192 || old_code
== POINTER_PLUS_EXPR
)
6194 if (gimple_assign_rhs1 (use
->stmt
) == cand
->var_before
)
6195 op
= gimple_assign_rhs2 (use
->stmt
);
6196 else if (old_code
!= MINUS_EXPR
6197 && gimple_assign_rhs2 (use
->stmt
) == cand
->var_before
)
6198 op
= gimple_assign_rhs1 (use
->stmt
);
6206 && (TREE_CODE (op
) == INTEGER_CST
6207 || operand_equal_p (op
, step
, 0)))
6210 /* Otherwise, add the necessary computations to express
6212 op
= fold_convert (ctype
, cand
->var_before
);
6213 comp
= fold_convert (utype
,
6214 build2 (incr_code
, ctype
, op
,
6215 unshare_expr (step
)));
6219 comp
= get_computation (data
->current_loop
, use
, cand
);
6220 gcc_assert (comp
!= NULL_TREE
);
6223 switch (gimple_code (use
->stmt
))
6226 tgt
= PHI_RESULT (use
->stmt
);
6228 /* If we should keep the biv, do not replace it. */
6229 if (name_info (data
, tgt
)->preserve_biv
)
6232 bsi
= gsi_after_labels (gimple_bb (use
->stmt
));
6236 tgt
= gimple_assign_lhs (use
->stmt
);
6237 bsi
= gsi_for_stmt (use
->stmt
);
6244 if (!valid_gimple_rhs_p (comp
)
6245 || (gimple_code (use
->stmt
) != GIMPLE_PHI
6246 /* We can't allow re-allocating the stmt as it might be pointed
6248 && (get_gimple_rhs_num_ops (TREE_CODE (comp
))
6249 >= gimple_num_ops (gsi_stmt (bsi
)))))
6251 comp
= force_gimple_operand_gsi (&bsi
, comp
, true, NULL_TREE
,
6252 true, GSI_SAME_STMT
);
6253 if (POINTER_TYPE_P (TREE_TYPE (tgt
)))
6255 duplicate_ssa_name_ptr_info (comp
, SSA_NAME_PTR_INFO (tgt
));
6256 /* As this isn't a plain copy we have to reset alignment
6258 if (SSA_NAME_PTR_INFO (comp
))
6260 SSA_NAME_PTR_INFO (comp
)->align
= 1;
6261 SSA_NAME_PTR_INFO (comp
)->misalign
= 0;
6266 if (gimple_code (use
->stmt
) == GIMPLE_PHI
)
6268 ass
= gimple_build_assign (tgt
, comp
);
6269 gsi_insert_before (&bsi
, ass
, GSI_SAME_STMT
);
6271 bsi
= gsi_for_stmt (use
->stmt
);
6272 remove_phi_node (&bsi
, false);
6276 gimple_assign_set_rhs_from_tree (&bsi
, comp
);
6277 use
->stmt
= gsi_stmt (bsi
);
6281 /* Performs a peephole optimization to reorder the iv update statement with
6282 a mem ref to enable instruction combining in later phases. The mem ref uses
6283 the iv value before the update, so the reordering transformation requires
6284 adjustment of the offset. CAND is the selected IV_CAND.
6288 t = MEM_REF (base, iv1, 8, 16); // base, index, stride, offset
6296 directly propagating t over to (1) will introduce overlapping live range
6297 thus increase register pressure. This peephole transform it into:
6301 t = MEM_REF (base, iv2, 8, 8);
6308 adjust_iv_update_pos (struct iv_cand
*cand
, struct iv_use
*use
)
6311 gimple iv_update
, stmt
;
6313 gimple_stmt_iterator gsi
, gsi_iv
;
6315 if (cand
->pos
!= IP_NORMAL
)
6318 var_after
= cand
->var_after
;
6319 iv_update
= SSA_NAME_DEF_STMT (var_after
);
6321 bb
= gimple_bb (iv_update
);
6322 gsi
= gsi_last_nondebug_bb (bb
);
6323 stmt
= gsi_stmt (gsi
);
6325 /* Only handle conditional statement for now. */
6326 if (gimple_code (stmt
) != GIMPLE_COND
)
6329 gsi_prev_nondebug (&gsi
);
6330 stmt
= gsi_stmt (gsi
);
6331 if (stmt
!= iv_update
)
6334 gsi_prev_nondebug (&gsi
);
6335 if (gsi_end_p (gsi
))
6338 stmt
= gsi_stmt (gsi
);
6339 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
6342 if (stmt
!= use
->stmt
)
6345 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
6348 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6350 fprintf (dump_file
, "Reordering \n");
6351 print_gimple_stmt (dump_file
, iv_update
, 0, 0);
6352 print_gimple_stmt (dump_file
, use
->stmt
, 0, 0);
6353 fprintf (dump_file
, "\n");
6356 gsi
= gsi_for_stmt (use
->stmt
);
6357 gsi_iv
= gsi_for_stmt (iv_update
);
6358 gsi_move_before (&gsi_iv
, &gsi
);
6360 cand
->pos
= IP_BEFORE_USE
;
6361 cand
->incremented_at
= use
->stmt
;
6364 /* Rewrites USE (address that is an iv) using candidate CAND. */
6367 rewrite_use_address (struct ivopts_data
*data
,
6368 struct iv_use
*use
, struct iv_cand
*cand
)
6371 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
6372 tree base_hint
= NULL_TREE
;
6376 adjust_iv_update_pos (cand
, use
);
6377 ok
= get_computation_aff (data
->current_loop
, use
, cand
, use
->stmt
, &aff
);
6379 unshare_aff_combination (&aff
);
6381 /* To avoid undefined overflow problems, all IV candidates use unsigned
6382 integer types. The drawback is that this makes it impossible for
6383 create_mem_ref to distinguish an IV that is based on a memory object
6384 from one that represents simply an offset.
6386 To work around this problem, we pass a hint to create_mem_ref that
6387 indicates which variable (if any) in aff is an IV based on a memory
6388 object. Note that we only consider the candidate. If this is not
6389 based on an object, the base of the reference is in some subexpression
6390 of the use -- but these will use pointer types, so they are recognized
6391 by the create_mem_ref heuristics anyway. */
6392 if (cand
->iv
->base_object
)
6393 base_hint
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6395 iv
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6396 ref
= create_mem_ref (&bsi
, TREE_TYPE (*use
->op_p
), &aff
,
6397 reference_alias_ptr_type (*use
->op_p
),
6398 iv
, base_hint
, data
->speed
);
6399 copy_ref_info (ref
, *use
->op_p
);
6403 /* Rewrites USE (the condition such that one of the arguments is an iv) using
6407 rewrite_use_compare (struct ivopts_data
*data
,
6408 struct iv_use
*use
, struct iv_cand
*cand
)
6410 tree comp
, *var_p
, op
, bound
;
6411 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
6412 enum tree_code compare
;
6413 struct cost_pair
*cp
= get_use_iv_cost (data
, use
, cand
);
6419 tree var
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6420 tree var_type
= TREE_TYPE (var
);
6423 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6425 fprintf (dump_file
, "Replacing exit test: ");
6426 print_gimple_stmt (dump_file
, use
->stmt
, 0, TDF_SLIM
);
6429 bound
= unshare_expr (fold_convert (var_type
, bound
));
6430 op
= force_gimple_operand (bound
, &stmts
, true, NULL_TREE
);
6432 gsi_insert_seq_on_edge_immediate (
6433 loop_preheader_edge (data
->current_loop
),
6436 gimple_cond_set_lhs (use
->stmt
, var
);
6437 gimple_cond_set_code (use
->stmt
, compare
);
6438 gimple_cond_set_rhs (use
->stmt
, op
);
6442 /* The induction variable elimination failed; just express the original
6444 comp
= get_computation (data
->current_loop
, use
, cand
);
6445 gcc_assert (comp
!= NULL_TREE
);
6447 ok
= extract_cond_operands (data
, use
->stmt
, &var_p
, NULL
, NULL
, NULL
);
6450 *var_p
= force_gimple_operand_gsi (&bsi
, comp
, true, SSA_NAME_VAR (*var_p
),
6451 true, GSI_SAME_STMT
);
6454 /* Rewrites USE using candidate CAND. */
6457 rewrite_use (struct ivopts_data
*data
, struct iv_use
*use
, struct iv_cand
*cand
)
6461 case USE_NONLINEAR_EXPR
:
6462 rewrite_use_nonlinear_expr (data
, use
, cand
);
6466 rewrite_use_address (data
, use
, cand
);
6470 rewrite_use_compare (data
, use
, cand
);
6477 update_stmt (use
->stmt
);
6480 /* Rewrite the uses using the selected induction variables. */
6483 rewrite_uses (struct ivopts_data
*data
)
6486 struct iv_cand
*cand
;
6489 for (i
= 0; i
< n_iv_uses (data
); i
++)
6491 use
= iv_use (data
, i
);
6492 cand
= use
->selected
;
6495 rewrite_use (data
, use
, cand
);
6499 /* Removes the ivs that are not used after rewriting. */
6502 remove_unused_ivs (struct ivopts_data
*data
)
6506 bitmap toremove
= BITMAP_ALLOC (NULL
);
6508 /* Figure out an order in which to release SSA DEFs so that we don't
6509 release something that we'd have to propagate into a debug stmt
6511 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
6513 struct version_info
*info
;
6515 info
= ver_info (data
, j
);
6517 && !integer_zerop (info
->iv
->step
)
6519 && !info
->iv
->have_use_for
6520 && !info
->preserve_biv
)
6521 bitmap_set_bit (toremove
, SSA_NAME_VERSION (info
->iv
->ssa_name
));
6524 release_defs_bitset (toremove
);
6526 BITMAP_FREE (toremove
);
6529 /* Frees memory occupied by struct tree_niter_desc in *VALUE. Callback
6530 for pointer_map_traverse. */
6533 free_tree_niter_desc (const void *key ATTRIBUTE_UNUSED
, void **value
,
6534 void *data ATTRIBUTE_UNUSED
)
6536 struct tree_niter_desc
*const niter
= (struct tree_niter_desc
*) *value
;
6542 /* Frees data allocated by the optimization of a single loop. */
6545 free_loop_data (struct ivopts_data
*data
)
6553 pointer_map_traverse (data
->niters
, free_tree_niter_desc
, NULL
);
6554 pointer_map_destroy (data
->niters
);
6555 data
->niters
= NULL
;
6558 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
6560 struct version_info
*info
;
6562 info
= ver_info (data
, i
);
6565 info
->has_nonlin_use
= false;
6566 info
->preserve_biv
= false;
6569 bitmap_clear (data
->relevant
);
6570 bitmap_clear (data
->important_candidates
);
6572 for (i
= 0; i
< n_iv_uses (data
); i
++)
6574 struct iv_use
*use
= iv_use (data
, i
);
6577 BITMAP_FREE (use
->related_cands
);
6578 for (j
= 0; j
< use
->n_map_members
; j
++)
6579 if (use
->cost_map
[j
].depends_on
)
6580 BITMAP_FREE (use
->cost_map
[j
].depends_on
);
6581 free (use
->cost_map
);
6584 VEC_truncate (iv_use_p
, data
->iv_uses
, 0);
6586 for (i
= 0; i
< n_iv_cands (data
); i
++)
6588 struct iv_cand
*cand
= iv_cand (data
, i
);
6591 if (cand
->depends_on
)
6592 BITMAP_FREE (cand
->depends_on
);
6595 VEC_truncate (iv_cand_p
, data
->iv_candidates
, 0);
6597 if (data
->version_info_size
< num_ssa_names
)
6599 data
->version_info_size
= 2 * num_ssa_names
;
6600 free (data
->version_info
);
6601 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
6604 data
->max_inv_id
= 0;
6606 FOR_EACH_VEC_ELT (tree
, decl_rtl_to_reset
, i
, obj
)
6607 SET_DECL_RTL (obj
, NULL_RTX
);
6609 VEC_truncate (tree
, decl_rtl_to_reset
, 0);
6611 htab_empty (data
->inv_expr_tab
);
6612 data
->inv_expr_id
= 0;
6615 /* Finalizes data structures used by the iv optimization pass. LOOPS is the
6619 tree_ssa_iv_optimize_finalize (struct ivopts_data
*data
)
6621 free_loop_data (data
);
6622 free (data
->version_info
);
6623 BITMAP_FREE (data
->relevant
);
6624 BITMAP_FREE (data
->important_candidates
);
6626 VEC_free (tree
, heap
, decl_rtl_to_reset
);
6627 VEC_free (iv_use_p
, heap
, data
->iv_uses
);
6628 VEC_free (iv_cand_p
, heap
, data
->iv_candidates
);
6629 htab_delete (data
->inv_expr_tab
);
6632 /* Returns true if the loop body BODY includes any function calls. */
6635 loop_body_includes_call (basic_block
*body
, unsigned num_nodes
)
6637 gimple_stmt_iterator gsi
;
6640 for (i
= 0; i
< num_nodes
; i
++)
6641 for (gsi
= gsi_start_bb (body
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
6643 gimple stmt
= gsi_stmt (gsi
);
6644 if (is_gimple_call (stmt
)
6645 && !is_inexpensive_builtin (gimple_call_fndecl (stmt
)))
6651 /* Optimizes the LOOP. Returns true if anything changed. */
6654 tree_ssa_iv_optimize_loop (struct ivopts_data
*data
, struct loop
*loop
)
6656 bool changed
= false;
6657 struct iv_ca
*iv_ca
;
6658 edge exit
= single_dom_exit (loop
);
6661 gcc_assert (!data
->niters
);
6662 data
->current_loop
= loop
;
6663 data
->speed
= optimize_loop_for_speed_p (loop
);
6665 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6667 fprintf (dump_file
, "Processing loop %d\n", loop
->num
);
6671 fprintf (dump_file
, " single exit %d -> %d, exit condition ",
6672 exit
->src
->index
, exit
->dest
->index
);
6673 print_gimple_stmt (dump_file
, last_stmt (exit
->src
), 0, TDF_SLIM
);
6674 fprintf (dump_file
, "\n");
6677 fprintf (dump_file
, "\n");
6680 body
= get_loop_body (loop
);
6681 data
->body_includes_call
= loop_body_includes_call (body
, loop
->num_nodes
);
6682 renumber_gimple_stmt_uids_in_blocks (body
, loop
->num_nodes
);
6685 data
->loop_single_exit_p
= exit
!= NULL
&& loop_only_exit_p (loop
, exit
);
6687 /* For each ssa name determines whether it behaves as an induction variable
6689 if (!find_induction_variables (data
))
6692 /* Finds interesting uses (item 1). */
6693 find_interesting_uses (data
);
6694 if (n_iv_uses (data
) > MAX_CONSIDERED_USES
)
6697 /* Finds candidates for the induction variables (item 2). */
6698 find_iv_candidates (data
);
6700 /* Calculates the costs (item 3, part 1). */
6701 determine_iv_costs (data
);
6702 determine_use_iv_costs (data
);
6703 determine_set_costs (data
);
6705 /* Find the optimal set of induction variables (item 3, part 2). */
6706 iv_ca
= find_optimal_iv_set (data
);
6711 /* Create the new induction variables (item 4, part 1). */
6712 create_new_ivs (data
, iv_ca
);
6713 iv_ca_free (&iv_ca
);
6715 /* Rewrite the uses (item 4, part 2). */
6716 rewrite_uses (data
);
6718 /* Remove the ivs that are unused after rewriting. */
6719 remove_unused_ivs (data
);
6721 /* We have changed the structure of induction variables; it might happen
6722 that definitions in the scev database refer to some of them that were
6727 free_loop_data (data
);
6732 /* Main entry point. Optimizes induction variables in loops. */
6735 tree_ssa_iv_optimize (void)
6738 struct ivopts_data data
;
6741 tree_ssa_iv_optimize_init (&data
);
6743 /* Optimize the loops starting with the innermost ones. */
6744 FOR_EACH_LOOP (li
, loop
, LI_FROM_INNERMOST
)
6746 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6747 flow_loop_dump (loop
, dump_file
, NULL
, 1);
6749 tree_ssa_iv_optimize_loop (&data
, loop
);
6752 tree_ssa_iv_optimize_finalize (&data
);