1 /* Induction variable optimizations.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass tries to find the optimal set of induction variables for the loop.
21 It optimizes just the basic linear induction variables (although adding
22 support for other types should not be too hard). It includes the
23 optimizations commonly known as strength reduction, induction variable
24 coalescing and induction variable elimination. It does it in the
27 1) The interesting uses of induction variables are found. This includes
29 -- uses of induction variables in non-linear expressions
30 -- addresses of arrays
31 -- comparisons of induction variables
33 2) Candidates for the induction variables are found. This includes
35 -- old induction variables
36 -- the variables defined by expressions derived from the "interesting
39 3) The optimal (w.r. to a cost function) set of variables is chosen. The
40 cost function assigns a cost to sets of induction variables and consists
43 -- The use costs. Each of the interesting uses chooses the best induction
44 variable in the set and adds its cost to the sum. The cost reflects
45 the time spent on modifying the induction variables value to be usable
46 for the given purpose (adding base and offset for arrays, etc.).
47 -- The variable costs. Each of the variables has a cost assigned that
48 reflects the costs associated with incrementing the value of the
49 variable. The original variables are somewhat preferred.
50 -- The set cost. Depending on the size of the set, extra cost may be
51 added to reflect register pressure.
53 All the costs are defined in a machine-specific way, using the target
54 hooks and machine descriptions to determine them.
56 4) The trees are transformed to use the new variables, the dead code is
59 All of this is done loop by loop. Doing it globally is theoretically
60 possible, it might give a better performance and it might enable us
61 to decide costs more precisely, but getting all the interactions right
62 would be complicated. */
66 #include "coretypes.h"
69 #include "stor-layout.h"
76 #include "hard-reg-set.h"
79 #include "dominance.h"
81 #include "basic-block.h"
82 #include "gimple-pretty-print.h"
84 #include "hash-table.h"
85 #include "tree-ssa-alias.h"
86 #include "internal-fn.h"
88 #include "gimple-expr.h"
92 #include "gimple-iterator.h"
93 #include "gimplify-me.h"
94 #include "gimple-ssa.h"
95 #include "plugin-api.h"
99 #include "tree-phinodes.h"
100 #include "ssa-iterators.h"
101 #include "stringpool.h"
102 #include "tree-ssanames.h"
103 #include "tree-ssa-loop-ivopts.h"
104 #include "tree-ssa-loop-manip.h"
105 #include "tree-ssa-loop-niter.h"
106 #include "tree-ssa-loop.h"
108 #include "tree-dfa.h"
109 #include "tree-ssa.h"
111 #include "tree-pass.h"
112 #include "insn-config.h"
113 #include "tree-chrec.h"
114 #include "tree-scalar-evolution.h"
117 #include "langhooks.h"
118 #include "tree-affine.h"
120 #include "tree-inline.h"
121 #include "tree-ssa-propagate.h"
123 #include "tree-ssa-address.h"
124 #include "builtins.h"
126 /* FIXME: Expressions are expanded to RTL in this pass to determine the
127 cost of different addressing modes. This should be moved to a TBD
128 interface between the GIMPLE and RTL worlds. */
132 /* The infinite cost. */
133 #define INFTY 10000000
135 #define AVG_LOOP_NITER(LOOP) 5
137 /* Returns the expected number of loop iterations for LOOP.
138 The average trip count is computed from profile data if it
141 static inline HOST_WIDE_INT
142 avg_loop_niter (struct loop
*loop
)
144 HOST_WIDE_INT niter
= estimated_stmt_executions_int (loop
);
146 return AVG_LOOP_NITER (loop
);
151 /* Representation of the induction variable. */
154 tree base
; /* Initial value of the iv. */
155 tree base_object
; /* A memory object to that the induction variable points. */
156 tree step
; /* Step of the iv (constant only). */
157 tree ssa_name
; /* The ssa name with the value. */
158 bool biv_p
; /* Is it a biv? */
159 bool have_use_for
; /* Do we already have a use for it? */
160 unsigned use_id
; /* The identifier in the use if it is the case. */
163 /* Per-ssa version information (induction variable descriptions, etc.). */
166 tree name
; /* The ssa name. */
167 struct iv
*iv
; /* Induction variable description. */
168 bool has_nonlin_use
; /* For a loop-level invariant, whether it is used in
169 an expression that is not an induction variable. */
170 bool preserve_biv
; /* For the original biv, whether to preserve it. */
171 unsigned inv_id
; /* Id of an invariant. */
177 USE_NONLINEAR_EXPR
, /* Use in a nonlinear expression. */
178 USE_ADDRESS
, /* Use in an address. */
179 USE_COMPARE
/* Use is a compare. */
182 /* Cost of a computation. */
185 int cost
; /* The runtime cost. */
186 unsigned complexity
; /* The estimate of the complexity of the code for
187 the computation (in no concrete units --
188 complexity field should be larger for more
189 complex expressions and addressing modes). */
192 static const comp_cost no_cost
= {0, 0};
193 static const comp_cost infinite_cost
= {INFTY
, INFTY
};
195 /* The candidate - cost pair. */
198 struct iv_cand
*cand
; /* The candidate. */
199 comp_cost cost
; /* The cost. */
200 bitmap depends_on
; /* The list of invariants that have to be
202 tree value
; /* For final value elimination, the expression for
203 the final value of the iv. For iv elimination,
204 the new bound to compare with. */
205 enum tree_code comp
; /* For iv elimination, the comparison. */
206 int inv_expr_id
; /* Loop invariant expression id. */
212 unsigned id
; /* The id of the use. */
213 enum use_type type
; /* Type of the use. */
214 struct iv
*iv
; /* The induction variable it is based on. */
215 gimple stmt
; /* Statement in that it occurs. */
216 tree
*op_p
; /* The place where it occurs. */
217 bitmap related_cands
; /* The set of "related" iv candidates, plus the common
220 unsigned n_map_members
; /* Number of candidates in the cost_map list. */
221 struct cost_pair
*cost_map
;
222 /* The costs wrto the iv candidates. */
224 struct iv_cand
*selected
;
225 /* The selected candidate. */
228 /* The position where the iv is computed. */
231 IP_NORMAL
, /* At the end, just before the exit condition. */
232 IP_END
, /* At the end of the latch block. */
233 IP_BEFORE_USE
, /* Immediately before a specific use. */
234 IP_AFTER_USE
, /* Immediately after a specific use. */
235 IP_ORIGINAL
/* The original biv. */
238 /* The induction variable candidate. */
241 unsigned id
; /* The number of the candidate. */
242 bool important
; /* Whether this is an "important" candidate, i.e. such
243 that it should be considered by all uses. */
244 ENUM_BITFIELD(iv_position
) pos
: 8; /* Where it is computed. */
245 gimple incremented_at
;/* For original biv, the statement where it is
247 tree var_before
; /* The variable used for it before increment. */
248 tree var_after
; /* The variable used for it after increment. */
249 struct iv
*iv
; /* The value of the candidate. NULL for
250 "pseudocandidate" used to indicate the possibility
251 to replace the final value of an iv by direct
252 computation of the value. */
253 unsigned cost
; /* Cost of the candidate. */
254 unsigned cost_step
; /* Cost of the candidate's increment operation. */
255 struct iv_use
*ainc_use
; /* For IP_{BEFORE,AFTER}_USE candidates, the place
256 where it is incremented. */
257 bitmap depends_on
; /* The list of invariants that are used in step of the
261 /* Loop invariant expression hashtable entry. */
262 struct iv_inv_expr_ent
269 /* The data used by the induction variable optimizations. */
271 typedef struct iv_use
*iv_use_p
;
273 typedef struct iv_cand
*iv_cand_p
;
275 /* Hashtable helpers. */
277 struct iv_inv_expr_hasher
: typed_free_remove
<iv_inv_expr_ent
>
279 typedef iv_inv_expr_ent value_type
;
280 typedef iv_inv_expr_ent compare_type
;
281 static inline hashval_t
hash (const value_type
*);
282 static inline bool equal (const value_type
*, const compare_type
*);
285 /* Hash function for loop invariant expressions. */
288 iv_inv_expr_hasher::hash (const value_type
*expr
)
293 /* Hash table equality function for expressions. */
296 iv_inv_expr_hasher::equal (const value_type
*expr1
, const compare_type
*expr2
)
298 return expr1
->hash
== expr2
->hash
299 && operand_equal_p (expr1
->expr
, expr2
->expr
, 0);
304 /* The currently optimized loop. */
305 struct loop
*current_loop
;
307 /* Numbers of iterations for all exits of the current loop. */
308 hash_map
<edge
, tree_niter_desc
*> *niters
;
310 /* Number of registers used in it. */
313 /* The size of version_info array allocated. */
314 unsigned version_info_size
;
316 /* The array of information for the ssa names. */
317 struct version_info
*version_info
;
319 /* The hashtable of loop invariant expressions created
321 hash_table
<iv_inv_expr_hasher
> *inv_expr_tab
;
323 /* Loop invariant expression id. */
326 /* The bitmap of indices in version_info whose value was changed. */
329 /* The uses of induction variables. */
330 vec
<iv_use_p
> iv_uses
;
332 /* The candidates. */
333 vec
<iv_cand_p
> iv_candidates
;
335 /* A bitmap of important candidates. */
336 bitmap important_candidates
;
338 /* Cache used by tree_to_aff_combination_expand. */
339 hash_map
<tree
, name_expansion
*> *name_expansion_cache
;
341 /* The maximum invariant id. */
344 /* Whether to consider just related and important candidates when replacing a
346 bool consider_all_candidates
;
348 /* Are we optimizing for speed? */
351 /* Whether the loop body includes any function calls. */
352 bool body_includes_call
;
354 /* Whether the loop body can only be exited via single exit. */
355 bool loop_single_exit_p
;
358 /* An assignment of iv candidates to uses. */
362 /* The number of uses covered by the assignment. */
365 /* Number of uses that cannot be expressed by the candidates in the set. */
368 /* Candidate assigned to a use, together with the related costs. */
369 struct cost_pair
**cand_for_use
;
371 /* Number of times each candidate is used. */
372 unsigned *n_cand_uses
;
374 /* The candidates used. */
377 /* The number of candidates in the set. */
380 /* Total number of registers needed. */
383 /* Total cost of expressing uses. */
384 comp_cost cand_use_cost
;
386 /* Total cost of candidates. */
389 /* Number of times each invariant is used. */
390 unsigned *n_invariant_uses
;
392 /* The array holding the number of uses of each loop
393 invariant expressions created by ivopt. */
394 unsigned *used_inv_expr
;
396 /* The number of created loop invariants. */
397 unsigned num_used_inv_expr
;
399 /* Total cost of the assignment. */
403 /* Difference of two iv candidate assignments. */
410 /* An old assignment (for rollback purposes). */
411 struct cost_pair
*old_cp
;
413 /* A new assignment. */
414 struct cost_pair
*new_cp
;
416 /* Next change in the list. */
417 struct iv_ca_delta
*next_change
;
420 /* Bound on number of candidates below that all candidates are considered. */
422 #define CONSIDER_ALL_CANDIDATES_BOUND \
423 ((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
425 /* If there are more iv occurrences, we just give up (it is quite unlikely that
426 optimizing such a loop would help, and it would take ages). */
428 #define MAX_CONSIDERED_USES \
429 ((unsigned) PARAM_VALUE (PARAM_IV_MAX_CONSIDERED_USES))
431 /* If there are at most this number of ivs in the set, try removing unnecessary
432 ivs from the set always. */
434 #define ALWAYS_PRUNE_CAND_SET_BOUND \
435 ((unsigned) PARAM_VALUE (PARAM_IV_ALWAYS_PRUNE_CAND_SET_BOUND))
437 /* The list of trees for that the decl_rtl field must be reset is stored
440 static vec
<tree
> decl_rtl_to_reset
;
442 static comp_cost
force_expr_to_var_cost (tree
, bool);
444 /* Number of uses recorded in DATA. */
446 static inline unsigned
447 n_iv_uses (struct ivopts_data
*data
)
449 return data
->iv_uses
.length ();
452 /* Ith use recorded in DATA. */
454 static inline struct iv_use
*
455 iv_use (struct ivopts_data
*data
, unsigned i
)
457 return data
->iv_uses
[i
];
460 /* Number of candidates recorded in DATA. */
462 static inline unsigned
463 n_iv_cands (struct ivopts_data
*data
)
465 return data
->iv_candidates
.length ();
468 /* Ith candidate recorded in DATA. */
470 static inline struct iv_cand
*
471 iv_cand (struct ivopts_data
*data
, unsigned i
)
473 return data
->iv_candidates
[i
];
476 /* The single loop exit if it dominates the latch, NULL otherwise. */
479 single_dom_exit (struct loop
*loop
)
481 edge exit
= single_exit (loop
);
486 if (!just_once_each_iteration_p (loop
, exit
->src
))
492 /* Dumps information about the induction variable IV to FILE. */
495 dump_iv (FILE *file
, struct iv
*iv
)
499 fprintf (file
, "ssa name ");
500 print_generic_expr (file
, iv
->ssa_name
, TDF_SLIM
);
501 fprintf (file
, "\n");
504 fprintf (file
, " type ");
505 print_generic_expr (file
, TREE_TYPE (iv
->base
), TDF_SLIM
);
506 fprintf (file
, "\n");
510 fprintf (file
, " base ");
511 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
512 fprintf (file
, "\n");
514 fprintf (file
, " step ");
515 print_generic_expr (file
, iv
->step
, TDF_SLIM
);
516 fprintf (file
, "\n");
520 fprintf (file
, " invariant ");
521 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
522 fprintf (file
, "\n");
527 fprintf (file
, " base object ");
528 print_generic_expr (file
, iv
->base_object
, TDF_SLIM
);
529 fprintf (file
, "\n");
533 fprintf (file
, " is a biv\n");
536 /* Dumps information about the USE to FILE. */
539 dump_use (FILE *file
, struct iv_use
*use
)
541 fprintf (file
, "use %d\n", use
->id
);
545 case USE_NONLINEAR_EXPR
:
546 fprintf (file
, " generic\n");
550 fprintf (file
, " address\n");
554 fprintf (file
, " compare\n");
561 fprintf (file
, " in statement ");
562 print_gimple_stmt (file
, use
->stmt
, 0, 0);
563 fprintf (file
, "\n");
565 fprintf (file
, " at position ");
567 print_generic_expr (file
, *use
->op_p
, TDF_SLIM
);
568 fprintf (file
, "\n");
570 dump_iv (file
, use
->iv
);
572 if (use
->related_cands
)
574 fprintf (file
, " related candidates ");
575 dump_bitmap (file
, use
->related_cands
);
579 /* Dumps information about the uses to FILE. */
582 dump_uses (FILE *file
, struct ivopts_data
*data
)
587 for (i
= 0; i
< n_iv_uses (data
); i
++)
589 use
= iv_use (data
, i
);
591 dump_use (file
, use
);
592 fprintf (file
, "\n");
596 /* Dumps information about induction variable candidate CAND to FILE. */
599 dump_cand (FILE *file
, struct iv_cand
*cand
)
601 struct iv
*iv
= cand
->iv
;
603 fprintf (file
, "candidate %d%s\n",
604 cand
->id
, cand
->important
? " (important)" : "");
606 if (cand
->depends_on
)
608 fprintf (file
, " depends on ");
609 dump_bitmap (file
, cand
->depends_on
);
614 fprintf (file
, " final value replacement\n");
618 if (cand
->var_before
)
620 fprintf (file
, " var_before ");
621 print_generic_expr (file
, cand
->var_before
, TDF_SLIM
);
622 fprintf (file
, "\n");
626 fprintf (file
, " var_after ");
627 print_generic_expr (file
, cand
->var_after
, TDF_SLIM
);
628 fprintf (file
, "\n");
634 fprintf (file
, " incremented before exit test\n");
638 fprintf (file
, " incremented before use %d\n", cand
->ainc_use
->id
);
642 fprintf (file
, " incremented after use %d\n", cand
->ainc_use
->id
);
646 fprintf (file
, " incremented at end\n");
650 fprintf (file
, " original biv\n");
657 /* Returns the info for ssa version VER. */
659 static inline struct version_info
*
660 ver_info (struct ivopts_data
*data
, unsigned ver
)
662 return data
->version_info
+ ver
;
665 /* Returns the info for ssa name NAME. */
667 static inline struct version_info
*
668 name_info (struct ivopts_data
*data
, tree name
)
670 return ver_info (data
, SSA_NAME_VERSION (name
));
673 /* Returns true if STMT is after the place where the IP_NORMAL ivs will be
677 stmt_after_ip_normal_pos (struct loop
*loop
, gimple stmt
)
679 basic_block bb
= ip_normal_pos (loop
), sbb
= gimple_bb (stmt
);
683 if (sbb
== loop
->latch
)
689 return stmt
== last_stmt (bb
);
692 /* Returns true if STMT if after the place where the original induction
693 variable CAND is incremented. If TRUE_IF_EQUAL is set, we return true
694 if the positions are identical. */
697 stmt_after_inc_pos (struct iv_cand
*cand
, gimple stmt
, bool true_if_equal
)
699 basic_block cand_bb
= gimple_bb (cand
->incremented_at
);
700 basic_block stmt_bb
= gimple_bb (stmt
);
702 if (!dominated_by_p (CDI_DOMINATORS
, stmt_bb
, cand_bb
))
705 if (stmt_bb
!= cand_bb
)
709 && gimple_uid (stmt
) == gimple_uid (cand
->incremented_at
))
711 return gimple_uid (stmt
) > gimple_uid (cand
->incremented_at
);
714 /* Returns true if STMT if after the place where the induction variable
715 CAND is incremented in LOOP. */
718 stmt_after_increment (struct loop
*loop
, struct iv_cand
*cand
, gimple stmt
)
726 return stmt_after_ip_normal_pos (loop
, stmt
);
730 return stmt_after_inc_pos (cand
, stmt
, false);
733 return stmt_after_inc_pos (cand
, stmt
, true);
740 /* Returns true if EXP is a ssa name that occurs in an abnormal phi node. */
743 abnormal_ssa_name_p (tree exp
)
748 if (TREE_CODE (exp
) != SSA_NAME
)
751 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp
) != 0;
754 /* Returns false if BASE or INDEX contains a ssa name that occurs in an
755 abnormal phi node. Callback for for_each_index. */
758 idx_contains_abnormal_ssa_name_p (tree base
, tree
*index
,
759 void *data ATTRIBUTE_UNUSED
)
761 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
763 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 2)))
765 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 3)))
769 return !abnormal_ssa_name_p (*index
);
772 /* Returns true if EXPR contains a ssa name that occurs in an
773 abnormal phi node. */
776 contains_abnormal_ssa_name_p (tree expr
)
779 enum tree_code_class codeclass
;
784 code
= TREE_CODE (expr
);
785 codeclass
= TREE_CODE_CLASS (code
);
787 if (code
== SSA_NAME
)
788 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
) != 0;
790 if (code
== INTEGER_CST
791 || is_gimple_min_invariant (expr
))
794 if (code
== ADDR_EXPR
)
795 return !for_each_index (&TREE_OPERAND (expr
, 0),
796 idx_contains_abnormal_ssa_name_p
,
799 if (code
== COND_EXPR
)
800 return contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0))
801 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1))
802 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 2));
808 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1)))
813 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0)))
825 /* Returns the structure describing number of iterations determined from
826 EXIT of DATA->current_loop, or NULL if something goes wrong. */
828 static struct tree_niter_desc
*
829 niter_for_exit (struct ivopts_data
*data
, edge exit
)
831 struct tree_niter_desc
*desc
;
832 tree_niter_desc
**slot
;
836 data
->niters
= new hash_map
<edge
, tree_niter_desc
*>;
840 slot
= data
->niters
->get (exit
);
844 /* Try to determine number of iterations. We cannot safely work with ssa
845 names that appear in phi nodes on abnormal edges, so that we do not
846 create overlapping life ranges for them (PR 27283). */
847 desc
= XNEW (struct tree_niter_desc
);
848 if (!number_of_iterations_exit (data
->current_loop
,
850 || contains_abnormal_ssa_name_p (desc
->niter
))
855 data
->niters
->put (exit
, desc
);
863 /* Returns the structure describing number of iterations determined from
864 single dominating exit of DATA->current_loop, or NULL if something
867 static struct tree_niter_desc
*
868 niter_for_single_dom_exit (struct ivopts_data
*data
)
870 edge exit
= single_dom_exit (data
->current_loop
);
875 return niter_for_exit (data
, exit
);
878 /* Initializes data structures used by the iv optimization pass, stored
882 tree_ssa_iv_optimize_init (struct ivopts_data
*data
)
884 data
->version_info_size
= 2 * num_ssa_names
;
885 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
886 data
->relevant
= BITMAP_ALLOC (NULL
);
887 data
->important_candidates
= BITMAP_ALLOC (NULL
);
888 data
->max_inv_id
= 0;
890 data
->iv_uses
.create (20);
891 data
->iv_candidates
.create (20);
892 data
->inv_expr_tab
= new hash_table
<iv_inv_expr_hasher
> (10);
893 data
->inv_expr_id
= 0;
894 data
->name_expansion_cache
= NULL
;
895 decl_rtl_to_reset
.create (20);
898 /* Returns a memory object to that EXPR points. In case we are able to
899 determine that it does not point to any such object, NULL is returned. */
902 determine_base_object (tree expr
)
904 enum tree_code code
= TREE_CODE (expr
);
907 /* If this is a pointer casted to any type, we need to determine
908 the base object for the pointer; so handle conversions before
909 throwing away non-pointer expressions. */
910 if (CONVERT_EXPR_P (expr
))
911 return determine_base_object (TREE_OPERAND (expr
, 0));
913 if (!POINTER_TYPE_P (TREE_TYPE (expr
)))
922 obj
= TREE_OPERAND (expr
, 0);
923 base
= get_base_address (obj
);
928 if (TREE_CODE (base
) == MEM_REF
)
929 return determine_base_object (TREE_OPERAND (base
, 0));
931 return fold_convert (ptr_type_node
,
932 build_fold_addr_expr (base
));
934 case POINTER_PLUS_EXPR
:
935 return determine_base_object (TREE_OPERAND (expr
, 0));
939 /* Pointer addition is done solely using POINTER_PLUS_EXPR. */
943 return fold_convert (ptr_type_node
, expr
);
947 /* Return true if address expression with non-DECL_P operand appears
951 contain_complex_addr_expr (tree expr
)
956 switch (TREE_CODE (expr
))
958 case POINTER_PLUS_EXPR
:
961 res
|= contain_complex_addr_expr (TREE_OPERAND (expr
, 0));
962 res
|= contain_complex_addr_expr (TREE_OPERAND (expr
, 1));
966 return (!DECL_P (TREE_OPERAND (expr
, 0)));
975 /* Allocates an induction variable with given initial value BASE and step STEP
979 alloc_iv (tree base
, tree step
)
982 struct iv
*iv
= XCNEW (struct iv
);
983 gcc_assert (step
!= NULL_TREE
);
985 /* Lower address expression in base except ones with DECL_P as operand.
987 1) More accurate cost can be computed for address expressions;
988 2) Duplicate candidates won't be created for bases in different
989 forms, like &a[0] and &a. */
991 if ((TREE_CODE (expr
) == ADDR_EXPR
&& !DECL_P (TREE_OPERAND (expr
, 0)))
992 || contain_complex_addr_expr (expr
))
995 tree_to_aff_combination (expr
, TREE_TYPE (base
), &comb
);
996 base
= fold_convert (TREE_TYPE (base
), aff_combination_to_tree (&comb
));
1000 iv
->base_object
= determine_base_object (base
);
1003 iv
->have_use_for
= false;
1005 iv
->ssa_name
= NULL_TREE
;
1010 /* Sets STEP and BASE for induction variable IV. */
1013 set_iv (struct ivopts_data
*data
, tree iv
, tree base
, tree step
)
1015 struct version_info
*info
= name_info (data
, iv
);
1017 gcc_assert (!info
->iv
);
1019 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (iv
));
1020 info
->iv
= alloc_iv (base
, step
);
1021 info
->iv
->ssa_name
= iv
;
1024 /* Finds induction variable declaration for VAR. */
1027 get_iv (struct ivopts_data
*data
, tree var
)
1030 tree type
= TREE_TYPE (var
);
1032 if (!POINTER_TYPE_P (type
)
1033 && !INTEGRAL_TYPE_P (type
))
1036 if (!name_info (data
, var
)->iv
)
1038 bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
1041 || !flow_bb_inside_loop_p (data
->current_loop
, bb
))
1042 set_iv (data
, var
, var
, build_int_cst (type
, 0));
1045 return name_info (data
, var
)->iv
;
1048 /* Determines the step of a biv defined in PHI. Returns NULL if PHI does
1049 not define a simple affine biv with nonzero step. */
1052 determine_biv_step (gphi
*phi
)
1054 struct loop
*loop
= gimple_bb (phi
)->loop_father
;
1055 tree name
= PHI_RESULT (phi
);
1058 if (virtual_operand_p (name
))
1061 if (!simple_iv (loop
, loop
, name
, &iv
, true))
1064 return integer_zerop (iv
.step
) ? NULL_TREE
: iv
.step
;
1067 /* Finds basic ivs. */
1070 find_bivs (struct ivopts_data
*data
)
1073 tree step
, type
, base
;
1075 struct loop
*loop
= data
->current_loop
;
1078 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1082 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi
)))
1085 step
= determine_biv_step (phi
);
1089 base
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
1090 base
= expand_simple_operations (base
);
1091 if (contains_abnormal_ssa_name_p (base
)
1092 || contains_abnormal_ssa_name_p (step
))
1095 type
= TREE_TYPE (PHI_RESULT (phi
));
1096 base
= fold_convert (type
, base
);
1099 if (POINTER_TYPE_P (type
))
1100 step
= convert_to_ptrofftype (step
);
1102 step
= fold_convert (type
, step
);
1105 set_iv (data
, PHI_RESULT (phi
), base
, step
);
1112 /* Marks basic ivs. */
1115 mark_bivs (struct ivopts_data
*data
)
1120 struct iv
*iv
, *incr_iv
;
1121 struct loop
*loop
= data
->current_loop
;
1122 basic_block incr_bb
;
1125 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1129 iv
= get_iv (data
, PHI_RESULT (phi
));
1133 var
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
1134 def
= SSA_NAME_DEF_STMT (var
);
1135 /* Don't mark iv peeled from other one as biv. */
1137 && gimple_code (def
) == GIMPLE_PHI
1138 && gimple_bb (def
) == loop
->header
)
1141 incr_iv
= get_iv (data
, var
);
1145 /* If the increment is in the subloop, ignore it. */
1146 incr_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
1147 if (incr_bb
->loop_father
!= data
->current_loop
1148 || (incr_bb
->flags
& BB_IRREDUCIBLE_LOOP
))
1152 incr_iv
->biv_p
= true;
1156 /* Checks whether STMT defines a linear induction variable and stores its
1157 parameters to IV. */
1160 find_givs_in_stmt_scev (struct ivopts_data
*data
, gimple stmt
, affine_iv
*iv
)
1163 struct loop
*loop
= data
->current_loop
;
1165 iv
->base
= NULL_TREE
;
1166 iv
->step
= NULL_TREE
;
1168 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1171 lhs
= gimple_assign_lhs (stmt
);
1172 if (TREE_CODE (lhs
) != SSA_NAME
)
1175 if (!simple_iv (loop
, loop_containing_stmt (stmt
), lhs
, iv
, true))
1177 iv
->base
= expand_simple_operations (iv
->base
);
1179 if (contains_abnormal_ssa_name_p (iv
->base
)
1180 || contains_abnormal_ssa_name_p (iv
->step
))
1183 /* If STMT could throw, then do not consider STMT as defining a GIV.
1184 While this will suppress optimizations, we can not safely delete this
1185 GIV and associated statements, even if it appears it is not used. */
1186 if (stmt_could_throw_p (stmt
))
1192 /* Finds general ivs in statement STMT. */
1195 find_givs_in_stmt (struct ivopts_data
*data
, gimple stmt
)
1199 if (!find_givs_in_stmt_scev (data
, stmt
, &iv
))
1202 set_iv (data
, gimple_assign_lhs (stmt
), iv
.base
, iv
.step
);
1205 /* Finds general ivs in basic block BB. */
1208 find_givs_in_bb (struct ivopts_data
*data
, basic_block bb
)
1210 gimple_stmt_iterator bsi
;
1212 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1213 find_givs_in_stmt (data
, gsi_stmt (bsi
));
1216 /* Finds general ivs. */
1219 find_givs (struct ivopts_data
*data
)
1221 struct loop
*loop
= data
->current_loop
;
1222 basic_block
*body
= get_loop_body_in_dom_order (loop
);
1225 for (i
= 0; i
< loop
->num_nodes
; i
++)
1226 find_givs_in_bb (data
, body
[i
]);
1230 /* For each ssa name defined in LOOP determines whether it is an induction
1231 variable and if so, its initial value and step. */
1234 find_induction_variables (struct ivopts_data
*data
)
1239 if (!find_bivs (data
))
1245 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1247 struct tree_niter_desc
*niter
= niter_for_single_dom_exit (data
);
1251 fprintf (dump_file
, " number of iterations ");
1252 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
1253 if (!integer_zerop (niter
->may_be_zero
))
1255 fprintf (dump_file
, "; zero if ");
1256 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
1258 fprintf (dump_file
, "\n\n");
1261 fprintf (dump_file
, "Induction variables:\n\n");
1263 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1265 if (ver_info (data
, i
)->iv
)
1266 dump_iv (dump_file
, ver_info (data
, i
)->iv
);
1273 /* Records a use of type USE_TYPE at *USE_P in STMT whose value is IV. */
1275 static struct iv_use
*
1276 record_use (struct ivopts_data
*data
, tree
*use_p
, struct iv
*iv
,
1277 gimple stmt
, enum use_type use_type
)
1279 struct iv_use
*use
= XCNEW (struct iv_use
);
1281 use
->id
= n_iv_uses (data
);
1282 use
->type
= use_type
;
1286 use
->related_cands
= BITMAP_ALLOC (NULL
);
1288 /* To avoid showing ssa name in the dumps, if it was not reset by the
1290 iv
->ssa_name
= NULL_TREE
;
1292 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1293 dump_use (dump_file
, use
);
1295 data
->iv_uses
.safe_push (use
);
1300 /* Checks whether OP is a loop-level invariant and if so, records it.
1301 NONLINEAR_USE is true if the invariant is used in a way we do not
1302 handle specially. */
1305 record_invariant (struct ivopts_data
*data
, tree op
, bool nonlinear_use
)
1308 struct version_info
*info
;
1310 if (TREE_CODE (op
) != SSA_NAME
1311 || virtual_operand_p (op
))
1314 bb
= gimple_bb (SSA_NAME_DEF_STMT (op
));
1316 && flow_bb_inside_loop_p (data
->current_loop
, bb
))
1319 info
= name_info (data
, op
);
1321 info
->has_nonlin_use
|= nonlinear_use
;
1323 info
->inv_id
= ++data
->max_inv_id
;
1324 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (op
));
1327 /* Checks whether the use OP is interesting and if so, records it. */
1329 static struct iv_use
*
1330 find_interesting_uses_op (struct ivopts_data
*data
, tree op
)
1337 if (TREE_CODE (op
) != SSA_NAME
)
1340 iv
= get_iv (data
, op
);
1344 if (iv
->have_use_for
)
1346 use
= iv_use (data
, iv
->use_id
);
1348 gcc_assert (use
->type
== USE_NONLINEAR_EXPR
);
1352 if (integer_zerop (iv
->step
))
1354 record_invariant (data
, op
, true);
1357 iv
->have_use_for
= true;
1359 civ
= XNEW (struct iv
);
1362 stmt
= SSA_NAME_DEF_STMT (op
);
1363 gcc_assert (gimple_code (stmt
) == GIMPLE_PHI
1364 || is_gimple_assign (stmt
));
1366 use
= record_use (data
, NULL
, civ
, stmt
, USE_NONLINEAR_EXPR
);
1367 iv
->use_id
= use
->id
;
1372 /* Given a condition in statement STMT, checks whether it is a compare
1373 of an induction variable and an invariant. If this is the case,
1374 CONTROL_VAR is set to location of the iv, BOUND to the location of
1375 the invariant, IV_VAR and IV_BOUND are set to the corresponding
1376 induction variable descriptions, and true is returned. If this is not
1377 the case, CONTROL_VAR and BOUND are set to the arguments of the
1378 condition and false is returned. */
1381 extract_cond_operands (struct ivopts_data
*data
, gimple stmt
,
1382 tree
**control_var
, tree
**bound
,
1383 struct iv
**iv_var
, struct iv
**iv_bound
)
1385 /* The objects returned when COND has constant operands. */
1386 static struct iv const_iv
;
1388 tree
*op0
= &zero
, *op1
= &zero
, *tmp_op
;
1389 struct iv
*iv0
= &const_iv
, *iv1
= &const_iv
, *tmp_iv
;
1392 if (gimple_code (stmt
) == GIMPLE_COND
)
1394 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
1395 op0
= gimple_cond_lhs_ptr (cond_stmt
);
1396 op1
= gimple_cond_rhs_ptr (cond_stmt
);
1400 op0
= gimple_assign_rhs1_ptr (stmt
);
1401 op1
= gimple_assign_rhs2_ptr (stmt
);
1404 zero
= integer_zero_node
;
1405 const_iv
.step
= integer_zero_node
;
1407 if (TREE_CODE (*op0
) == SSA_NAME
)
1408 iv0
= get_iv (data
, *op0
);
1409 if (TREE_CODE (*op1
) == SSA_NAME
)
1410 iv1
= get_iv (data
, *op1
);
1412 /* Exactly one of the compared values must be an iv, and the other one must
1417 if (integer_zerop (iv0
->step
))
1419 /* Control variable may be on the other side. */
1420 tmp_op
= op0
; op0
= op1
; op1
= tmp_op
;
1421 tmp_iv
= iv0
; iv0
= iv1
; iv1
= tmp_iv
;
1423 ret
= !integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
);
1427 *control_var
= op0
;;
1438 /* Checks whether the condition in STMT is interesting and if so,
1442 find_interesting_uses_cond (struct ivopts_data
*data
, gimple stmt
)
1444 tree
*var_p
, *bound_p
;
1445 struct iv
*var_iv
, *civ
;
1447 if (!extract_cond_operands (data
, stmt
, &var_p
, &bound_p
, &var_iv
, NULL
))
1449 find_interesting_uses_op (data
, *var_p
);
1450 find_interesting_uses_op (data
, *bound_p
);
1454 civ
= XNEW (struct iv
);
1456 record_use (data
, NULL
, civ
, stmt
, USE_COMPARE
);
1459 /* Returns the outermost loop EXPR is obviously invariant in
1460 relative to the loop LOOP, i.e. if all its operands are defined
1461 outside of the returned loop. Returns NULL if EXPR is not
1462 even obviously invariant in LOOP. */
1465 outermost_invariant_loop_for_expr (struct loop
*loop
, tree expr
)
1470 if (is_gimple_min_invariant (expr
))
1471 return current_loops
->tree_root
;
1473 if (TREE_CODE (expr
) == SSA_NAME
)
1475 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1478 if (flow_bb_inside_loop_p (loop
, def_bb
))
1480 return superloop_at_depth (loop
,
1481 loop_depth (def_bb
->loop_father
) + 1);
1484 return current_loops
->tree_root
;
1490 unsigned maxdepth
= 0;
1491 len
= TREE_OPERAND_LENGTH (expr
);
1492 for (i
= 0; i
< len
; i
++)
1494 struct loop
*ivloop
;
1495 if (!TREE_OPERAND (expr
, i
))
1498 ivloop
= outermost_invariant_loop_for_expr (loop
, TREE_OPERAND (expr
, i
));
1501 maxdepth
= MAX (maxdepth
, loop_depth (ivloop
));
1504 return superloop_at_depth (loop
, maxdepth
);
1507 /* Returns true if expression EXPR is obviously invariant in LOOP,
1508 i.e. if all its operands are defined outside of the LOOP. LOOP
1509 should not be the function body. */
1512 expr_invariant_in_loop_p (struct loop
*loop
, tree expr
)
1517 gcc_assert (loop_depth (loop
) > 0);
1519 if (is_gimple_min_invariant (expr
))
1522 if (TREE_CODE (expr
) == SSA_NAME
)
1524 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1526 && flow_bb_inside_loop_p (loop
, def_bb
))
1535 len
= TREE_OPERAND_LENGTH (expr
);
1536 for (i
= 0; i
< len
; i
++)
1537 if (TREE_OPERAND (expr
, i
)
1538 && !expr_invariant_in_loop_p (loop
, TREE_OPERAND (expr
, i
)))
1544 /* Cumulates the steps of indices into DATA and replaces their values with the
1545 initial ones. Returns false when the value of the index cannot be determined.
1546 Callback for for_each_index. */
1548 struct ifs_ivopts_data
1550 struct ivopts_data
*ivopts_data
;
1556 idx_find_step (tree base
, tree
*idx
, void *data
)
1558 struct ifs_ivopts_data
*dta
= (struct ifs_ivopts_data
*) data
;
1560 tree step
, iv_base
, iv_step
, lbound
, off
;
1561 struct loop
*loop
= dta
->ivopts_data
->current_loop
;
1563 /* If base is a component ref, require that the offset of the reference
1565 if (TREE_CODE (base
) == COMPONENT_REF
)
1567 off
= component_ref_field_offset (base
);
1568 return expr_invariant_in_loop_p (loop
, off
);
1571 /* If base is array, first check whether we will be able to move the
1572 reference out of the loop (in order to take its address in strength
1573 reduction). In order for this to work we need both lower bound
1574 and step to be loop invariants. */
1575 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1577 /* Moreover, for a range, the size needs to be invariant as well. */
1578 if (TREE_CODE (base
) == ARRAY_RANGE_REF
1579 && !expr_invariant_in_loop_p (loop
, TYPE_SIZE (TREE_TYPE (base
))))
1582 step
= array_ref_element_size (base
);
1583 lbound
= array_ref_low_bound (base
);
1585 if (!expr_invariant_in_loop_p (loop
, step
)
1586 || !expr_invariant_in_loop_p (loop
, lbound
))
1590 if (TREE_CODE (*idx
) != SSA_NAME
)
1593 iv
= get_iv (dta
->ivopts_data
, *idx
);
1597 /* XXX We produce for a base of *D42 with iv->base being &x[0]
1598 *&x[0], which is not folded and does not trigger the
1599 ARRAY_REF path below. */
1602 if (integer_zerop (iv
->step
))
1605 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1607 step
= array_ref_element_size (base
);
1609 /* We only handle addresses whose step is an integer constant. */
1610 if (TREE_CODE (step
) != INTEGER_CST
)
1614 /* The step for pointer arithmetics already is 1 byte. */
1615 step
= size_one_node
;
1619 if (!convert_affine_scev (dta
->ivopts_data
->current_loop
,
1620 sizetype
, &iv_base
, &iv_step
, dta
->stmt
,
1623 /* The index might wrap. */
1627 step
= fold_build2 (MULT_EXPR
, sizetype
, step
, iv_step
);
1628 dta
->step
= fold_build2 (PLUS_EXPR
, sizetype
, dta
->step
, step
);
1633 /* Records use in index IDX. Callback for for_each_index. Ivopts data
1634 object is passed to it in DATA. */
1637 idx_record_use (tree base
, tree
*idx
,
1640 struct ivopts_data
*data
= (struct ivopts_data
*) vdata
;
1641 find_interesting_uses_op (data
, *idx
);
1642 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
1644 find_interesting_uses_op (data
, array_ref_element_size (base
));
1645 find_interesting_uses_op (data
, array_ref_low_bound (base
));
1650 /* If we can prove that TOP = cst * BOT for some constant cst,
1651 store cst to MUL and return true. Otherwise return false.
1652 The returned value is always sign-extended, regardless of the
1653 signedness of TOP and BOT. */
1656 constant_multiple_of (tree top
, tree bot
, widest_int
*mul
)
1659 enum tree_code code
;
1660 unsigned precision
= TYPE_PRECISION (TREE_TYPE (top
));
1661 widest_int res
, p0
, p1
;
1666 if (operand_equal_p (top
, bot
, 0))
1672 code
= TREE_CODE (top
);
1676 mby
= TREE_OPERAND (top
, 1);
1677 if (TREE_CODE (mby
) != INTEGER_CST
)
1680 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &res
))
1683 *mul
= wi::sext (res
* wi::to_widest (mby
), precision
);
1688 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &p0
)
1689 || !constant_multiple_of (TREE_OPERAND (top
, 1), bot
, &p1
))
1692 if (code
== MINUS_EXPR
)
1694 *mul
= wi::sext (p0
+ p1
, precision
);
1698 if (TREE_CODE (bot
) != INTEGER_CST
)
1701 p0
= widest_int::from (top
, SIGNED
);
1702 p1
= widest_int::from (bot
, SIGNED
);
1705 *mul
= wi::sext (wi::divmod_trunc (p0
, p1
, SIGNED
, &res
), precision
);
1713 /* Return true if memory reference REF with step STEP may be unaligned. */
1716 may_be_unaligned_p (tree ref
, tree step
)
1718 /* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
1719 thus they are not misaligned. */
1720 if (TREE_CODE (ref
) == TARGET_MEM_REF
)
1723 unsigned int align
= TYPE_ALIGN (TREE_TYPE (ref
));
1724 if (GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref
))) > align
)
1725 align
= GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref
)));
1727 unsigned HOST_WIDE_INT bitpos
;
1728 unsigned int ref_align
;
1729 get_object_alignment_1 (ref
, &ref_align
, &bitpos
);
1730 if (ref_align
< align
1731 || (bitpos
% align
) != 0
1732 || (bitpos
% BITS_PER_UNIT
) != 0)
1735 unsigned int trailing_zeros
= tree_ctz (step
);
1736 if (trailing_zeros
< HOST_BITS_PER_INT
1737 && (1U << trailing_zeros
) * BITS_PER_UNIT
< align
)
1743 /* Return true if EXPR may be non-addressable. */
1746 may_be_nonaddressable_p (tree expr
)
1748 switch (TREE_CODE (expr
))
1750 case TARGET_MEM_REF
:
1751 /* TARGET_MEM_REFs are translated directly to valid MEMs on the
1752 target, thus they are always addressable. */
1756 return DECL_NONADDRESSABLE_P (TREE_OPERAND (expr
, 1))
1757 || may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
1759 case VIEW_CONVERT_EXPR
:
1760 /* This kind of view-conversions may wrap non-addressable objects
1761 and make them look addressable. After some processing the
1762 non-addressability may be uncovered again, causing ADDR_EXPRs
1763 of inappropriate objects to be built. */
1764 if (is_gimple_reg (TREE_OPERAND (expr
, 0))
1765 || !is_gimple_addressable (TREE_OPERAND (expr
, 0)))
1768 /* ... fall through ... */
1771 case ARRAY_RANGE_REF
:
1772 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
1784 /* Finds addresses in *OP_P inside STMT. */
1787 find_interesting_uses_address (struct ivopts_data
*data
, gimple stmt
, tree
*op_p
)
1789 tree base
= *op_p
, step
= size_zero_node
;
1791 struct ifs_ivopts_data ifs_ivopts_data
;
1793 /* Do not play with volatile memory references. A bit too conservative,
1794 perhaps, but safe. */
1795 if (gimple_has_volatile_ops (stmt
))
1798 /* Ignore bitfields for now. Not really something terribly complicated
1800 if (TREE_CODE (base
) == BIT_FIELD_REF
)
1803 base
= unshare_expr (base
);
1805 if (TREE_CODE (base
) == TARGET_MEM_REF
)
1807 tree type
= build_pointer_type (TREE_TYPE (base
));
1811 && TREE_CODE (TMR_BASE (base
)) == SSA_NAME
)
1813 civ
= get_iv (data
, TMR_BASE (base
));
1817 TMR_BASE (base
) = civ
->base
;
1820 if (TMR_INDEX2 (base
)
1821 && TREE_CODE (TMR_INDEX2 (base
)) == SSA_NAME
)
1823 civ
= get_iv (data
, TMR_INDEX2 (base
));
1827 TMR_INDEX2 (base
) = civ
->base
;
1830 if (TMR_INDEX (base
)
1831 && TREE_CODE (TMR_INDEX (base
)) == SSA_NAME
)
1833 civ
= get_iv (data
, TMR_INDEX (base
));
1837 TMR_INDEX (base
) = civ
->base
;
1842 if (TMR_STEP (base
))
1843 astep
= fold_build2 (MULT_EXPR
, type
, TMR_STEP (base
), astep
);
1845 step
= fold_build2 (PLUS_EXPR
, type
, step
, astep
);
1849 if (integer_zerop (step
))
1851 base
= tree_mem_ref_addr (type
, base
);
1855 ifs_ivopts_data
.ivopts_data
= data
;
1856 ifs_ivopts_data
.stmt
= stmt
;
1857 ifs_ivopts_data
.step
= size_zero_node
;
1858 if (!for_each_index (&base
, idx_find_step
, &ifs_ivopts_data
)
1859 || integer_zerop (ifs_ivopts_data
.step
))
1861 step
= ifs_ivopts_data
.step
;
1863 /* Check that the base expression is addressable. This needs
1864 to be done after substituting bases of IVs into it. */
1865 if (may_be_nonaddressable_p (base
))
1868 /* Moreover, on strict alignment platforms, check that it is
1869 sufficiently aligned. */
1870 if (STRICT_ALIGNMENT
&& may_be_unaligned_p (base
, step
))
1873 base
= build_fold_addr_expr (base
);
1875 /* Substituting bases of IVs into the base expression might
1876 have caused folding opportunities. */
1877 if (TREE_CODE (base
) == ADDR_EXPR
)
1879 tree
*ref
= &TREE_OPERAND (base
, 0);
1880 while (handled_component_p (*ref
))
1881 ref
= &TREE_OPERAND (*ref
, 0);
1882 if (TREE_CODE (*ref
) == MEM_REF
)
1884 tree tem
= fold_binary (MEM_REF
, TREE_TYPE (*ref
),
1885 TREE_OPERAND (*ref
, 0),
1886 TREE_OPERAND (*ref
, 1));
1893 civ
= alloc_iv (base
, step
);
1894 record_use (data
, op_p
, civ
, stmt
, USE_ADDRESS
);
1898 for_each_index (op_p
, idx_record_use
, data
);
1901 /* Finds and records invariants used in STMT. */
1904 find_invariants_stmt (struct ivopts_data
*data
, gimple stmt
)
1907 use_operand_p use_p
;
1910 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
1912 op
= USE_FROM_PTR (use_p
);
1913 record_invariant (data
, op
, false);
1917 /* Finds interesting uses of induction variables in the statement STMT. */
1920 find_interesting_uses_stmt (struct ivopts_data
*data
, gimple stmt
)
1923 tree op
, *lhs
, *rhs
;
1925 use_operand_p use_p
;
1926 enum tree_code code
;
1928 find_invariants_stmt (data
, stmt
);
1930 if (gimple_code (stmt
) == GIMPLE_COND
)
1932 find_interesting_uses_cond (data
, stmt
);
1936 if (is_gimple_assign (stmt
))
1938 lhs
= gimple_assign_lhs_ptr (stmt
);
1939 rhs
= gimple_assign_rhs1_ptr (stmt
);
1941 if (TREE_CODE (*lhs
) == SSA_NAME
)
1943 /* If the statement defines an induction variable, the uses are not
1944 interesting by themselves. */
1946 iv
= get_iv (data
, *lhs
);
1948 if (iv
&& !integer_zerop (iv
->step
))
1952 code
= gimple_assign_rhs_code (stmt
);
1953 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
1954 && (REFERENCE_CLASS_P (*rhs
)
1955 || is_gimple_val (*rhs
)))
1957 if (REFERENCE_CLASS_P (*rhs
))
1958 find_interesting_uses_address (data
, stmt
, rhs
);
1960 find_interesting_uses_op (data
, *rhs
);
1962 if (REFERENCE_CLASS_P (*lhs
))
1963 find_interesting_uses_address (data
, stmt
, lhs
);
1966 else if (TREE_CODE_CLASS (code
) == tcc_comparison
)
1968 find_interesting_uses_cond (data
, stmt
);
1972 /* TODO -- we should also handle address uses of type
1974 memory = call (whatever);
1981 if (gimple_code (stmt
) == GIMPLE_PHI
1982 && gimple_bb (stmt
) == data
->current_loop
->header
)
1984 iv
= get_iv (data
, PHI_RESULT (stmt
));
1986 if (iv
&& !integer_zerop (iv
->step
))
1990 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
1992 op
= USE_FROM_PTR (use_p
);
1994 if (TREE_CODE (op
) != SSA_NAME
)
1997 iv
= get_iv (data
, op
);
2001 find_interesting_uses_op (data
, op
);
2005 /* Finds interesting uses of induction variables outside of loops
2006 on loop exit edge EXIT. */
2009 find_interesting_uses_outside (struct ivopts_data
*data
, edge exit
)
2015 for (psi
= gsi_start_phis (exit
->dest
); !gsi_end_p (psi
); gsi_next (&psi
))
2018 def
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
2019 if (!virtual_operand_p (def
))
2020 find_interesting_uses_op (data
, def
);
2024 /* Finds uses of the induction variables that are interesting. */
2027 find_interesting_uses (struct ivopts_data
*data
)
2030 gimple_stmt_iterator bsi
;
2031 basic_block
*body
= get_loop_body (data
->current_loop
);
2033 struct version_info
*info
;
2036 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2037 fprintf (dump_file
, "Uses:\n\n");
2039 for (i
= 0; i
< data
->current_loop
->num_nodes
; i
++)
2044 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2045 if (e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2046 && !flow_bb_inside_loop_p (data
->current_loop
, e
->dest
))
2047 find_interesting_uses_outside (data
, e
);
2049 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
2050 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
2051 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
2052 if (!is_gimple_debug (gsi_stmt (bsi
)))
2053 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
2056 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2060 fprintf (dump_file
, "\n");
2062 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
2064 info
= ver_info (data
, i
);
2067 fprintf (dump_file
, " ");
2068 print_generic_expr (dump_file
, info
->name
, TDF_SLIM
);
2069 fprintf (dump_file
, " is invariant (%d)%s\n",
2070 info
->inv_id
, info
->has_nonlin_use
? "" : ", eliminable");
2074 fprintf (dump_file
, "\n");
2080 /* Strips constant offsets from EXPR and stores them to OFFSET. If INSIDE_ADDR
2081 is true, assume we are inside an address. If TOP_COMPREF is true, assume
2082 we are at the top-level of the processed address. */
2085 strip_offset_1 (tree expr
, bool inside_addr
, bool top_compref
,
2086 HOST_WIDE_INT
*offset
)
2088 tree op0
= NULL_TREE
, op1
= NULL_TREE
, tmp
, step
;
2089 enum tree_code code
;
2090 tree type
, orig_type
= TREE_TYPE (expr
);
2091 HOST_WIDE_INT off0
, off1
, st
;
2092 tree orig_expr
= expr
;
2096 type
= TREE_TYPE (expr
);
2097 code
= TREE_CODE (expr
);
2103 if (!cst_and_fits_in_hwi (expr
)
2104 || integer_zerop (expr
))
2107 *offset
= int_cst_value (expr
);
2108 return build_int_cst (orig_type
, 0);
2110 case POINTER_PLUS_EXPR
:
2113 op0
= TREE_OPERAND (expr
, 0);
2114 op1
= TREE_OPERAND (expr
, 1);
2116 op0
= strip_offset_1 (op0
, false, false, &off0
);
2117 op1
= strip_offset_1 (op1
, false, false, &off1
);
2119 *offset
= (code
== MINUS_EXPR
? off0
- off1
: off0
+ off1
);
2120 if (op0
== TREE_OPERAND (expr
, 0)
2121 && op1
== TREE_OPERAND (expr
, 1))
2124 if (integer_zerop (op1
))
2126 else if (integer_zerop (op0
))
2128 if (code
== MINUS_EXPR
)
2129 expr
= fold_build1 (NEGATE_EXPR
, type
, op1
);
2134 expr
= fold_build2 (code
, type
, op0
, op1
);
2136 return fold_convert (orig_type
, expr
);
2139 op1
= TREE_OPERAND (expr
, 1);
2140 if (!cst_and_fits_in_hwi (op1
))
2143 op0
= TREE_OPERAND (expr
, 0);
2144 op0
= strip_offset_1 (op0
, false, false, &off0
);
2145 if (op0
== TREE_OPERAND (expr
, 0))
2148 *offset
= off0
* int_cst_value (op1
);
2149 if (integer_zerop (op0
))
2152 expr
= fold_build2 (MULT_EXPR
, type
, op0
, op1
);
2154 return fold_convert (orig_type
, expr
);
2157 case ARRAY_RANGE_REF
:
2161 step
= array_ref_element_size (expr
);
2162 if (!cst_and_fits_in_hwi (step
))
2165 st
= int_cst_value (step
);
2166 op1
= TREE_OPERAND (expr
, 1);
2167 op1
= strip_offset_1 (op1
, false, false, &off1
);
2168 *offset
= off1
* st
;
2171 && integer_zerop (op1
))
2173 /* Strip the component reference completely. */
2174 op0
= TREE_OPERAND (expr
, 0);
2175 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2188 tmp
= component_ref_field_offset (expr
);
2189 field
= TREE_OPERAND (expr
, 1);
2191 && cst_and_fits_in_hwi (tmp
)
2192 && cst_and_fits_in_hwi (DECL_FIELD_BIT_OFFSET (field
)))
2194 HOST_WIDE_INT boffset
, abs_off
;
2196 /* Strip the component reference completely. */
2197 op0
= TREE_OPERAND (expr
, 0);
2198 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2199 boffset
= int_cst_value (DECL_FIELD_BIT_OFFSET (field
));
2200 abs_off
= abs_hwi (boffset
) / BITS_PER_UNIT
;
2204 *offset
= off0
+ int_cst_value (tmp
) + abs_off
;
2211 op0
= TREE_OPERAND (expr
, 0);
2212 op0
= strip_offset_1 (op0
, true, true, &off0
);
2215 if (op0
== TREE_OPERAND (expr
, 0))
2218 expr
= build_fold_addr_expr (op0
);
2219 return fold_convert (orig_type
, expr
);
2222 /* ??? Offset operand? */
2223 inside_addr
= false;
2230 /* Default handling of expressions for that we want to recurse into
2231 the first operand. */
2232 op0
= TREE_OPERAND (expr
, 0);
2233 op0
= strip_offset_1 (op0
, inside_addr
, false, &off0
);
2236 if (op0
== TREE_OPERAND (expr
, 0)
2237 && (!op1
|| op1
== TREE_OPERAND (expr
, 1)))
2240 expr
= copy_node (expr
);
2241 TREE_OPERAND (expr
, 0) = op0
;
2243 TREE_OPERAND (expr
, 1) = op1
;
2245 /* Inside address, we might strip the top level component references,
2246 thus changing type of the expression. Handling of ADDR_EXPR
2248 expr
= fold_convert (orig_type
, expr
);
2253 /* Strips constant offsets from EXPR and stores them to OFFSET. */
2256 strip_offset (tree expr
, unsigned HOST_WIDE_INT
*offset
)
2259 tree core
= strip_offset_1 (expr
, false, false, &off
);
2264 /* Returns variant of TYPE that can be used as base for different uses.
2265 We return unsigned type with the same precision, which avoids problems
2269 generic_type_for (tree type
)
2271 if (POINTER_TYPE_P (type
))
2272 return unsigned_type_for (type
);
2274 if (TYPE_UNSIGNED (type
))
2277 return unsigned_type_for (type
);
2280 /* Records invariants in *EXPR_P. Callback for walk_tree. DATA contains
2281 the bitmap to that we should store it. */
2283 static struct ivopts_data
*fd_ivopts_data
;
2285 find_depends (tree
*expr_p
, int *ws ATTRIBUTE_UNUSED
, void *data
)
2287 bitmap
*depends_on
= (bitmap
*) data
;
2288 struct version_info
*info
;
2290 if (TREE_CODE (*expr_p
) != SSA_NAME
)
2292 info
= name_info (fd_ivopts_data
, *expr_p
);
2294 if (!info
->inv_id
|| info
->has_nonlin_use
)
2298 *depends_on
= BITMAP_ALLOC (NULL
);
2299 bitmap_set_bit (*depends_on
, info
->inv_id
);
2304 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2305 position to POS. If USE is not NULL, the candidate is set as related to
2306 it. If both BASE and STEP are NULL, we add a pseudocandidate for the
2307 replacement of the final value of the iv by a direct computation. */
2309 static struct iv_cand
*
2310 add_candidate_1 (struct ivopts_data
*data
,
2311 tree base
, tree step
, bool important
, enum iv_position pos
,
2312 struct iv_use
*use
, gimple incremented_at
)
2315 struct iv_cand
*cand
= NULL
;
2316 tree type
, orig_type
;
2318 /* For non-original variables, make sure their values are computed in a type
2319 that does not invoke undefined behavior on overflows (since in general,
2320 we cannot prove that these induction variables are non-wrapping). */
2321 if (pos
!= IP_ORIGINAL
)
2323 orig_type
= TREE_TYPE (base
);
2324 type
= generic_type_for (orig_type
);
2325 if (type
!= orig_type
)
2327 base
= fold_convert (type
, base
);
2328 step
= fold_convert (type
, step
);
2332 for (i
= 0; i
< n_iv_cands (data
); i
++)
2334 cand
= iv_cand (data
, i
);
2336 if (cand
->pos
!= pos
)
2339 if (cand
->incremented_at
!= incremented_at
2340 || ((pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
2341 && cand
->ainc_use
!= use
))
2355 if (operand_equal_p (base
, cand
->iv
->base
, 0)
2356 && operand_equal_p (step
, cand
->iv
->step
, 0)
2357 && (TYPE_PRECISION (TREE_TYPE (base
))
2358 == TYPE_PRECISION (TREE_TYPE (cand
->iv
->base
))))
2362 if (i
== n_iv_cands (data
))
2364 cand
= XCNEW (struct iv_cand
);
2370 cand
->iv
= alloc_iv (base
, step
);
2373 if (pos
!= IP_ORIGINAL
&& cand
->iv
)
2375 cand
->var_before
= create_tmp_var_raw (TREE_TYPE (base
), "ivtmp");
2376 cand
->var_after
= cand
->var_before
;
2378 cand
->important
= important
;
2379 cand
->incremented_at
= incremented_at
;
2380 data
->iv_candidates
.safe_push (cand
);
2383 && TREE_CODE (step
) != INTEGER_CST
)
2385 fd_ivopts_data
= data
;
2386 walk_tree (&step
, find_depends
, &cand
->depends_on
, NULL
);
2389 if (pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
2390 cand
->ainc_use
= use
;
2392 cand
->ainc_use
= NULL
;
2394 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2395 dump_cand (dump_file
, cand
);
2398 if (important
&& !cand
->important
)
2400 cand
->important
= true;
2401 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2402 fprintf (dump_file
, "Candidate %d is important\n", cand
->id
);
2407 bitmap_set_bit (use
->related_cands
, i
);
2408 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2409 fprintf (dump_file
, "Candidate %d is related to use %d\n",
2416 /* Returns true if incrementing the induction variable at the end of the LOOP
2419 The purpose is to avoid splitting latch edge with a biv increment, thus
2420 creating a jump, possibly confusing other optimization passes and leaving
2421 less freedom to scheduler. So we allow IP_END_POS only if IP_NORMAL_POS
2422 is not available (so we do not have a better alternative), or if the latch
2423 edge is already nonempty. */
2426 allow_ip_end_pos_p (struct loop
*loop
)
2428 if (!ip_normal_pos (loop
))
2431 if (!empty_block_p (ip_end_pos (loop
)))
2437 /* If possible, adds autoincrement candidates BASE + STEP * i based on use USE.
2438 Important field is set to IMPORTANT. */
2441 add_autoinc_candidates (struct ivopts_data
*data
, tree base
, tree step
,
2442 bool important
, struct iv_use
*use
)
2444 basic_block use_bb
= gimple_bb (use
->stmt
);
2445 machine_mode mem_mode
;
2446 unsigned HOST_WIDE_INT cstepi
;
2448 /* If we insert the increment in any position other than the standard
2449 ones, we must ensure that it is incremented once per iteration.
2450 It must not be in an inner nested loop, or one side of an if
2452 if (use_bb
->loop_father
!= data
->current_loop
2453 || !dominated_by_p (CDI_DOMINATORS
, data
->current_loop
->latch
, use_bb
)
2454 || stmt_could_throw_p (use
->stmt
)
2455 || !cst_and_fits_in_hwi (step
))
2458 cstepi
= int_cst_value (step
);
2460 mem_mode
= TYPE_MODE (TREE_TYPE (*use
->op_p
));
2461 if (((USE_LOAD_PRE_INCREMENT (mem_mode
)
2462 || USE_STORE_PRE_INCREMENT (mem_mode
))
2463 && GET_MODE_SIZE (mem_mode
) == cstepi
)
2464 || ((USE_LOAD_PRE_DECREMENT (mem_mode
)
2465 || USE_STORE_PRE_DECREMENT (mem_mode
))
2466 && GET_MODE_SIZE (mem_mode
) == -cstepi
))
2468 enum tree_code code
= MINUS_EXPR
;
2470 tree new_step
= step
;
2472 if (POINTER_TYPE_P (TREE_TYPE (base
)))
2474 new_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (step
), step
);
2475 code
= POINTER_PLUS_EXPR
;
2478 new_step
= fold_convert (TREE_TYPE (base
), new_step
);
2479 new_base
= fold_build2 (code
, TREE_TYPE (base
), base
, new_step
);
2480 add_candidate_1 (data
, new_base
, step
, important
, IP_BEFORE_USE
, use
,
2483 if (((USE_LOAD_POST_INCREMENT (mem_mode
)
2484 || USE_STORE_POST_INCREMENT (mem_mode
))
2485 && GET_MODE_SIZE (mem_mode
) == cstepi
)
2486 || ((USE_LOAD_POST_DECREMENT (mem_mode
)
2487 || USE_STORE_POST_DECREMENT (mem_mode
))
2488 && GET_MODE_SIZE (mem_mode
) == -cstepi
))
2490 add_candidate_1 (data
, base
, step
, important
, IP_AFTER_USE
, use
,
2495 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2496 position to POS. If USE is not NULL, the candidate is set as related to
2497 it. The candidate computation is scheduled on all available positions. */
2500 add_candidate (struct ivopts_data
*data
,
2501 tree base
, tree step
, bool important
, struct iv_use
*use
)
2503 if (ip_normal_pos (data
->current_loop
))
2504 add_candidate_1 (data
, base
, step
, important
, IP_NORMAL
, use
, NULL
);
2505 if (ip_end_pos (data
->current_loop
)
2506 && allow_ip_end_pos_p (data
->current_loop
))
2507 add_candidate_1 (data
, base
, step
, important
, IP_END
, use
, NULL
);
2509 if (use
!= NULL
&& use
->type
== USE_ADDRESS
)
2510 add_autoinc_candidates (data
, base
, step
, important
, use
);
2513 /* Adds standard iv candidates. */
2516 add_standard_iv_candidates (struct ivopts_data
*data
)
2518 add_candidate (data
, integer_zero_node
, integer_one_node
, true, NULL
);
2520 /* The same for a double-integer type if it is still fast enough. */
2522 (long_integer_type_node
) > TYPE_PRECISION (integer_type_node
)
2523 && TYPE_PRECISION (long_integer_type_node
) <= BITS_PER_WORD
)
2524 add_candidate (data
, build_int_cst (long_integer_type_node
, 0),
2525 build_int_cst (long_integer_type_node
, 1), true, NULL
);
2527 /* The same for a double-integer type if it is still fast enough. */
2529 (long_long_integer_type_node
) > TYPE_PRECISION (long_integer_type_node
)
2530 && TYPE_PRECISION (long_long_integer_type_node
) <= BITS_PER_WORD
)
2531 add_candidate (data
, build_int_cst (long_long_integer_type_node
, 0),
2532 build_int_cst (long_long_integer_type_node
, 1), true, NULL
);
2536 /* Adds candidates bases on the old induction variable IV. */
2539 add_old_iv_candidates (struct ivopts_data
*data
, struct iv
*iv
)
2543 struct iv_cand
*cand
;
2545 add_candidate (data
, iv
->base
, iv
->step
, true, NULL
);
2547 /* The same, but with initial value zero. */
2548 if (POINTER_TYPE_P (TREE_TYPE (iv
->base
)))
2549 add_candidate (data
, size_int (0), iv
->step
, true, NULL
);
2551 add_candidate (data
, build_int_cst (TREE_TYPE (iv
->base
), 0),
2552 iv
->step
, true, NULL
);
2554 phi
= SSA_NAME_DEF_STMT (iv
->ssa_name
);
2555 if (gimple_code (phi
) == GIMPLE_PHI
)
2557 /* Additionally record the possibility of leaving the original iv
2559 def
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (data
->current_loop
));
2560 /* Don't add candidate if it's from another PHI node because
2561 it's an affine iv appearing in the form of PEELED_CHREC. */
2562 phi
= SSA_NAME_DEF_STMT (def
);
2563 if (gimple_code (phi
) != GIMPLE_PHI
)
2565 cand
= add_candidate_1 (data
,
2566 iv
->base
, iv
->step
, true, IP_ORIGINAL
, NULL
,
2567 SSA_NAME_DEF_STMT (def
));
2568 cand
->var_before
= iv
->ssa_name
;
2569 cand
->var_after
= def
;
2572 gcc_assert (gimple_bb (phi
) == data
->current_loop
->header
);
2576 /* Adds candidates based on the old induction variables. */
2579 add_old_ivs_candidates (struct ivopts_data
*data
)
2585 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
2587 iv
= ver_info (data
, i
)->iv
;
2588 if (iv
&& iv
->biv_p
&& !integer_zerop (iv
->step
))
2589 add_old_iv_candidates (data
, iv
);
2593 /* Adds candidates based on the value of the induction variable IV and USE. */
2596 add_iv_value_candidates (struct ivopts_data
*data
,
2597 struct iv
*iv
, struct iv_use
*use
)
2599 unsigned HOST_WIDE_INT offset
;
2603 add_candidate (data
, iv
->base
, iv
->step
, false, use
);
2605 /* The same, but with initial value zero. Make such variable important,
2606 since it is generic enough so that possibly many uses may be based
2608 basetype
= TREE_TYPE (iv
->base
);
2609 if (POINTER_TYPE_P (basetype
))
2610 basetype
= sizetype
;
2611 add_candidate (data
, build_int_cst (basetype
, 0),
2612 iv
->step
, true, use
);
2614 /* Third, try removing the constant offset. Make sure to even
2615 add a candidate for &a[0] vs. (T *)&a. */
2616 base
= strip_offset (iv
->base
, &offset
);
2618 || base
!= iv
->base
)
2619 add_candidate (data
, base
, iv
->step
, false, use
);
2622 /* Adds candidates based on the uses. */
2625 add_derived_ivs_candidates (struct ivopts_data
*data
)
2629 for (i
= 0; i
< n_iv_uses (data
); i
++)
2631 struct iv_use
*use
= iv_use (data
, i
);
2638 case USE_NONLINEAR_EXPR
:
2641 /* Just add the ivs based on the value of the iv used here. */
2642 add_iv_value_candidates (data
, use
->iv
, use
);
2651 /* Record important candidates and add them to related_cands bitmaps
2655 record_important_candidates (struct ivopts_data
*data
)
2660 for (i
= 0; i
< n_iv_cands (data
); i
++)
2662 struct iv_cand
*cand
= iv_cand (data
, i
);
2664 if (cand
->important
)
2665 bitmap_set_bit (data
->important_candidates
, i
);
2668 data
->consider_all_candidates
= (n_iv_cands (data
)
2669 <= CONSIDER_ALL_CANDIDATES_BOUND
);
2671 if (data
->consider_all_candidates
)
2673 /* We will not need "related_cands" bitmaps in this case,
2674 so release them to decrease peak memory consumption. */
2675 for (i
= 0; i
< n_iv_uses (data
); i
++)
2677 use
= iv_use (data
, i
);
2678 BITMAP_FREE (use
->related_cands
);
2683 /* Add important candidates to the related_cands bitmaps. */
2684 for (i
= 0; i
< n_iv_uses (data
); i
++)
2685 bitmap_ior_into (iv_use (data
, i
)->related_cands
,
2686 data
->important_candidates
);
2690 /* Allocates the data structure mapping the (use, candidate) pairs to costs.
2691 If consider_all_candidates is true, we use a two-dimensional array, otherwise
2692 we allocate a simple list to every use. */
2695 alloc_use_cost_map (struct ivopts_data
*data
)
2697 unsigned i
, size
, s
;
2699 for (i
= 0; i
< n_iv_uses (data
); i
++)
2701 struct iv_use
*use
= iv_use (data
, i
);
2703 if (data
->consider_all_candidates
)
2704 size
= n_iv_cands (data
);
2707 s
= bitmap_count_bits (use
->related_cands
);
2709 /* Round up to the power of two, so that moduling by it is fast. */
2710 size
= s
? (1 << ceil_log2 (s
)) : 1;
2713 use
->n_map_members
= size
;
2714 use
->cost_map
= XCNEWVEC (struct cost_pair
, size
);
2718 /* Returns description of computation cost of expression whose runtime
2719 cost is RUNTIME and complexity corresponds to COMPLEXITY. */
2722 new_cost (unsigned runtime
, unsigned complexity
)
2726 cost
.cost
= runtime
;
2727 cost
.complexity
= complexity
;
2732 /* Adds costs COST1 and COST2. */
2735 add_costs (comp_cost cost1
, comp_cost cost2
)
2737 cost1
.cost
+= cost2
.cost
;
2738 cost1
.complexity
+= cost2
.complexity
;
2742 /* Subtracts costs COST1 and COST2. */
2745 sub_costs (comp_cost cost1
, comp_cost cost2
)
2747 cost1
.cost
-= cost2
.cost
;
2748 cost1
.complexity
-= cost2
.complexity
;
2753 /* Returns a negative number if COST1 < COST2, a positive number if
2754 COST1 > COST2, and 0 if COST1 = COST2. */
2757 compare_costs (comp_cost cost1
, comp_cost cost2
)
2759 if (cost1
.cost
== cost2
.cost
)
2760 return cost1
.complexity
- cost2
.complexity
;
2762 return cost1
.cost
- cost2
.cost
;
2765 /* Returns true if COST is infinite. */
2768 infinite_cost_p (comp_cost cost
)
2770 return cost
.cost
== INFTY
;
2773 /* Sets cost of (USE, CANDIDATE) pair to COST and record that it depends
2774 on invariants DEPENDS_ON and that the value used in expressing it
2775 is VALUE, and in case of iv elimination the comparison operator is COMP. */
2778 set_use_iv_cost (struct ivopts_data
*data
,
2779 struct iv_use
*use
, struct iv_cand
*cand
,
2780 comp_cost cost
, bitmap depends_on
, tree value
,
2781 enum tree_code comp
, int inv_expr_id
)
2785 if (infinite_cost_p (cost
))
2787 BITMAP_FREE (depends_on
);
2791 if (data
->consider_all_candidates
)
2793 use
->cost_map
[cand
->id
].cand
= cand
;
2794 use
->cost_map
[cand
->id
].cost
= cost
;
2795 use
->cost_map
[cand
->id
].depends_on
= depends_on
;
2796 use
->cost_map
[cand
->id
].value
= value
;
2797 use
->cost_map
[cand
->id
].comp
= comp
;
2798 use
->cost_map
[cand
->id
].inv_expr_id
= inv_expr_id
;
2802 /* n_map_members is a power of two, so this computes modulo. */
2803 s
= cand
->id
& (use
->n_map_members
- 1);
2804 for (i
= s
; i
< use
->n_map_members
; i
++)
2805 if (!use
->cost_map
[i
].cand
)
2807 for (i
= 0; i
< s
; i
++)
2808 if (!use
->cost_map
[i
].cand
)
2814 use
->cost_map
[i
].cand
= cand
;
2815 use
->cost_map
[i
].cost
= cost
;
2816 use
->cost_map
[i
].depends_on
= depends_on
;
2817 use
->cost_map
[i
].value
= value
;
2818 use
->cost_map
[i
].comp
= comp
;
2819 use
->cost_map
[i
].inv_expr_id
= inv_expr_id
;
2822 /* Gets cost of (USE, CANDIDATE) pair. */
2824 static struct cost_pair
*
2825 get_use_iv_cost (struct ivopts_data
*data
, struct iv_use
*use
,
2826 struct iv_cand
*cand
)
2829 struct cost_pair
*ret
;
2834 if (data
->consider_all_candidates
)
2836 ret
= use
->cost_map
+ cand
->id
;
2843 /* n_map_members is a power of two, so this computes modulo. */
2844 s
= cand
->id
& (use
->n_map_members
- 1);
2845 for (i
= s
; i
< use
->n_map_members
; i
++)
2846 if (use
->cost_map
[i
].cand
== cand
)
2847 return use
->cost_map
+ i
;
2848 else if (use
->cost_map
[i
].cand
== NULL
)
2850 for (i
= 0; i
< s
; i
++)
2851 if (use
->cost_map
[i
].cand
== cand
)
2852 return use
->cost_map
+ i
;
2853 else if (use
->cost_map
[i
].cand
== NULL
)
2859 /* Produce DECL_RTL for object obj so it looks like it is stored in memory. */
2861 produce_memory_decl_rtl (tree obj
, int *regno
)
2863 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (obj
));
2864 machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
2868 if (TREE_STATIC (obj
) || DECL_EXTERNAL (obj
))
2870 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (obj
));
2871 x
= gen_rtx_SYMBOL_REF (address_mode
, name
);
2872 SET_SYMBOL_REF_DECL (x
, obj
);
2873 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
2874 set_mem_addr_space (x
, as
);
2875 targetm
.encode_section_info (obj
, x
, true);
2879 x
= gen_raw_REG (address_mode
, (*regno
)++);
2880 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
2881 set_mem_addr_space (x
, as
);
2887 /* Prepares decl_rtl for variables referred in *EXPR_P. Callback for
2888 walk_tree. DATA contains the actual fake register number. */
2891 prepare_decl_rtl (tree
*expr_p
, int *ws
, void *data
)
2893 tree obj
= NULL_TREE
;
2895 int *regno
= (int *) data
;
2897 switch (TREE_CODE (*expr_p
))
2900 for (expr_p
= &TREE_OPERAND (*expr_p
, 0);
2901 handled_component_p (*expr_p
);
2902 expr_p
= &TREE_OPERAND (*expr_p
, 0))
2905 if (DECL_P (obj
) && HAS_RTL_P (obj
) && !DECL_RTL_SET_P (obj
))
2906 x
= produce_memory_decl_rtl (obj
, regno
);
2911 obj
= SSA_NAME_VAR (*expr_p
);
2912 /* Defer handling of anonymous SSA_NAMEs to the expander. */
2915 if (!DECL_RTL_SET_P (obj
))
2916 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
2925 if (DECL_RTL_SET_P (obj
))
2928 if (DECL_MODE (obj
) == BLKmode
)
2929 x
= produce_memory_decl_rtl (obj
, regno
);
2931 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
2941 decl_rtl_to_reset
.safe_push (obj
);
2942 SET_DECL_RTL (obj
, x
);
2948 /* Determines cost of the computation of EXPR. */
2951 computation_cost (tree expr
, bool speed
)
2955 tree type
= TREE_TYPE (expr
);
2957 /* Avoid using hard regs in ways which may be unsupported. */
2958 int regno
= LAST_VIRTUAL_REGISTER
+ 1;
2959 struct cgraph_node
*node
= cgraph_node::get (current_function_decl
);
2960 enum node_frequency real_frequency
= node
->frequency
;
2962 node
->frequency
= NODE_FREQUENCY_NORMAL
;
2963 crtl
->maybe_hot_insn_p
= speed
;
2964 walk_tree (&expr
, prepare_decl_rtl
, ®no
, NULL
);
2966 rslt
= expand_expr (expr
, NULL_RTX
, TYPE_MODE (type
), EXPAND_NORMAL
);
2969 default_rtl_profile ();
2970 node
->frequency
= real_frequency
;
2972 cost
= seq_cost (seq
, speed
);
2974 cost
+= address_cost (XEXP (rslt
, 0), TYPE_MODE (type
),
2975 TYPE_ADDR_SPACE (type
), speed
);
2976 else if (!REG_P (rslt
))
2977 cost
+= set_src_cost (rslt
, speed
);
2982 /* Returns variable containing the value of candidate CAND at statement AT. */
2985 var_at_stmt (struct loop
*loop
, struct iv_cand
*cand
, gimple stmt
)
2987 if (stmt_after_increment (loop
, cand
, stmt
))
2988 return cand
->var_after
;
2990 return cand
->var_before
;
2993 /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
2994 same precision that is at least as wide as the precision of TYPE, stores
2995 BA to A and BB to B, and returns the type of BA. Otherwise, returns the
2999 determine_common_wider_type (tree
*a
, tree
*b
)
3001 tree wider_type
= NULL
;
3003 tree atype
= TREE_TYPE (*a
);
3005 if (CONVERT_EXPR_P (*a
))
3007 suba
= TREE_OPERAND (*a
, 0);
3008 wider_type
= TREE_TYPE (suba
);
3009 if (TYPE_PRECISION (wider_type
) < TYPE_PRECISION (atype
))
3015 if (CONVERT_EXPR_P (*b
))
3017 subb
= TREE_OPERAND (*b
, 0);
3018 if (TYPE_PRECISION (wider_type
) != TYPE_PRECISION (TREE_TYPE (subb
)))
3029 /* Determines the expression by that USE is expressed from induction variable
3030 CAND at statement AT in LOOP. The expression is stored in a decomposed
3031 form into AFF. Returns false if USE cannot be expressed using CAND. */
3034 get_computation_aff (struct loop
*loop
,
3035 struct iv_use
*use
, struct iv_cand
*cand
, gimple at
,
3036 struct aff_tree
*aff
)
3038 tree ubase
= use
->iv
->base
;
3039 tree ustep
= use
->iv
->step
;
3040 tree cbase
= cand
->iv
->base
;
3041 tree cstep
= cand
->iv
->step
, cstep_common
;
3042 tree utype
= TREE_TYPE (ubase
), ctype
= TREE_TYPE (cbase
);
3043 tree common_type
, var
;
3045 aff_tree cbase_aff
, var_aff
;
3048 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
3050 /* We do not have a precision to express the values of use. */
3054 var
= var_at_stmt (loop
, cand
, at
);
3055 uutype
= unsigned_type_for (utype
);
3057 /* If the conversion is not noop, perform it. */
3058 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
3060 cstep
= fold_convert (uutype
, cstep
);
3061 cbase
= fold_convert (uutype
, cbase
);
3062 var
= fold_convert (uutype
, var
);
3065 if (!constant_multiple_of (ustep
, cstep
, &rat
))
3068 /* In case both UBASE and CBASE are shortened to UUTYPE from some common
3069 type, we achieve better folding by computing their difference in this
3070 wider type, and cast the result to UUTYPE. We do not need to worry about
3071 overflows, as all the arithmetics will in the end be performed in UUTYPE
3073 common_type
= determine_common_wider_type (&ubase
, &cbase
);
3075 /* use = ubase - ratio * cbase + ratio * var. */
3076 tree_to_aff_combination (ubase
, common_type
, aff
);
3077 tree_to_aff_combination (cbase
, common_type
, &cbase_aff
);
3078 tree_to_aff_combination (var
, uutype
, &var_aff
);
3080 /* We need to shift the value if we are after the increment. */
3081 if (stmt_after_increment (loop
, cand
, at
))
3085 if (common_type
!= uutype
)
3086 cstep_common
= fold_convert (common_type
, cstep
);
3088 cstep_common
= cstep
;
3090 tree_to_aff_combination (cstep_common
, common_type
, &cstep_aff
);
3091 aff_combination_add (&cbase_aff
, &cstep_aff
);
3094 aff_combination_scale (&cbase_aff
, -rat
);
3095 aff_combination_add (aff
, &cbase_aff
);
3096 if (common_type
!= uutype
)
3097 aff_combination_convert (aff
, uutype
);
3099 aff_combination_scale (&var_aff
, rat
);
3100 aff_combination_add (aff
, &var_aff
);
3105 /* Return the type of USE. */
3108 get_use_type (struct iv_use
*use
)
3110 tree base_type
= TREE_TYPE (use
->iv
->base
);
3113 if (use
->type
== USE_ADDRESS
)
3115 /* The base_type may be a void pointer. Create a pointer type based on
3116 the mem_ref instead. */
3117 type
= build_pointer_type (TREE_TYPE (*use
->op_p
));
3118 gcc_assert (TYPE_ADDR_SPACE (TREE_TYPE (type
))
3119 == TYPE_ADDR_SPACE (TREE_TYPE (base_type
)));
3127 /* Determines the expression by that USE is expressed from induction variable
3128 CAND at statement AT in LOOP. The computation is unshared. */
3131 get_computation_at (struct loop
*loop
,
3132 struct iv_use
*use
, struct iv_cand
*cand
, gimple at
)
3135 tree type
= get_use_type (use
);
3137 if (!get_computation_aff (loop
, use
, cand
, at
, &aff
))
3139 unshare_aff_combination (&aff
);
3140 return fold_convert (type
, aff_combination_to_tree (&aff
));
3143 /* Determines the expression by that USE is expressed from induction variable
3144 CAND in LOOP. The computation is unshared. */
3147 get_computation (struct loop
*loop
, struct iv_use
*use
, struct iv_cand
*cand
)
3149 return get_computation_at (loop
, use
, cand
, use
->stmt
);
3152 /* Adjust the cost COST for being in loop setup rather than loop body.
3153 If we're optimizing for space, the loop setup overhead is constant;
3154 if we're optimizing for speed, amortize it over the per-iteration cost. */
3156 adjust_setup_cost (struct ivopts_data
*data
, unsigned cost
)
3160 else if (optimize_loop_for_speed_p (data
->current_loop
))
3161 return cost
/ avg_loop_niter (data
->current_loop
);
3166 /* Returns true if multiplying by RATIO is allowed in an address. Test the
3167 validity for a memory reference accessing memory of mode MODE in
3168 address space AS. */
3172 multiplier_allowed_in_address_p (HOST_WIDE_INT ratio
, machine_mode mode
,
3175 #define MAX_RATIO 128
3176 unsigned int data_index
= (int) as
* MAX_MACHINE_MODE
+ (int) mode
;
3177 static vec
<sbitmap
> valid_mult_list
;
3180 if (data_index
>= valid_mult_list
.length ())
3181 valid_mult_list
.safe_grow_cleared (data_index
+ 1);
3183 valid_mult
= valid_mult_list
[data_index
];
3186 machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3187 rtx reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3188 rtx reg2
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 2);
3192 valid_mult
= sbitmap_alloc (2 * MAX_RATIO
+ 1);
3193 bitmap_clear (valid_mult
);
3194 scaled
= gen_rtx_fmt_ee (MULT
, address_mode
, reg1
, NULL_RTX
);
3195 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, scaled
, reg2
);
3196 for (i
= -MAX_RATIO
; i
<= MAX_RATIO
; i
++)
3198 XEXP (scaled
, 1) = gen_int_mode (i
, address_mode
);
3199 if (memory_address_addr_space_p (mode
, addr
, as
)
3200 || memory_address_addr_space_p (mode
, scaled
, as
))
3201 bitmap_set_bit (valid_mult
, i
+ MAX_RATIO
);
3204 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3206 fprintf (dump_file
, " allowed multipliers:");
3207 for (i
= -MAX_RATIO
; i
<= MAX_RATIO
; i
++)
3208 if (bitmap_bit_p (valid_mult
, i
+ MAX_RATIO
))
3209 fprintf (dump_file
, " %d", (int) i
);
3210 fprintf (dump_file
, "\n");
3211 fprintf (dump_file
, "\n");
3214 valid_mult_list
[data_index
] = valid_mult
;
3217 if (ratio
> MAX_RATIO
|| ratio
< -MAX_RATIO
)
3220 return bitmap_bit_p (valid_mult
, ratio
+ MAX_RATIO
);
3223 /* Returns cost of address in shape symbol + var + OFFSET + RATIO * index.
3224 If SYMBOL_PRESENT is false, symbol is omitted. If VAR_PRESENT is false,
3225 variable is omitted. Compute the cost for a memory reference that accesses
3226 a memory location of mode MEM_MODE in address space AS.
3228 MAY_AUTOINC is set to true if the autoincrement (increasing index by
3229 size of MEM_MODE / RATIO) is available. To make this determination, we
3230 look at the size of the increment to be made, which is given in CSTEP.
3231 CSTEP may be zero if the step is unknown.
3232 STMT_AFTER_INC is true iff the statement we're looking at is after the
3233 increment of the original biv.
3235 TODO -- there must be some better way. This all is quite crude. */
3239 AINC_PRE_INC
, /* Pre increment. */
3240 AINC_PRE_DEC
, /* Pre decrement. */
3241 AINC_POST_INC
, /* Post increment. */
3242 AINC_POST_DEC
, /* Post decrement. */
3243 AINC_NONE
/* Also the number of auto increment types. */
3246 typedef struct address_cost_data_s
3248 HOST_WIDE_INT min_offset
, max_offset
;
3249 unsigned costs
[2][2][2][2];
3250 unsigned ainc_costs
[AINC_NONE
];
3251 } *address_cost_data
;
3255 get_address_cost (bool symbol_present
, bool var_present
,
3256 unsigned HOST_WIDE_INT offset
, HOST_WIDE_INT ratio
,
3257 HOST_WIDE_INT cstep
, machine_mode mem_mode
,
3258 addr_space_t as
, bool speed
,
3259 bool stmt_after_inc
, bool *may_autoinc
)
3261 machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3262 static vec
<address_cost_data
> address_cost_data_list
;
3263 unsigned int data_index
= (int) as
* MAX_MACHINE_MODE
+ (int) mem_mode
;
3264 address_cost_data data
;
3265 static bool has_preinc
[MAX_MACHINE_MODE
], has_postinc
[MAX_MACHINE_MODE
];
3266 static bool has_predec
[MAX_MACHINE_MODE
], has_postdec
[MAX_MACHINE_MODE
];
3267 unsigned cost
, acost
, complexity
;
3268 enum ainc_type autoinc_type
;
3269 bool offset_p
, ratio_p
, autoinc
;
3270 HOST_WIDE_INT s_offset
, autoinc_offset
, msize
;
3271 unsigned HOST_WIDE_INT mask
;
3274 if (data_index
>= address_cost_data_list
.length ())
3275 address_cost_data_list
.safe_grow_cleared (data_index
+ 1);
3277 data
= address_cost_data_list
[data_index
];
3281 HOST_WIDE_INT rat
, off
= 0;
3282 int old_cse_not_expected
, width
;
3283 unsigned sym_p
, var_p
, off_p
, rat_p
, add_c
;
3288 data
= (address_cost_data
) xcalloc (1, sizeof (*data
));
3290 reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3292 width
= GET_MODE_BITSIZE (address_mode
) - 1;
3293 if (width
> (HOST_BITS_PER_WIDE_INT
- 1))
3294 width
= HOST_BITS_PER_WIDE_INT
- 1;
3295 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, reg1
, NULL_RTX
);
3297 for (i
= width
; i
>= 0; i
--)
3299 off
= -((unsigned HOST_WIDE_INT
) 1 << i
);
3300 XEXP (addr
, 1) = gen_int_mode (off
, address_mode
);
3301 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
3304 data
->min_offset
= (i
== -1? 0 : off
);
3306 for (i
= width
; i
>= 0; i
--)
3308 off
= ((unsigned HOST_WIDE_INT
) 1 << i
) - 1;
3309 XEXP (addr
, 1) = gen_int_mode (off
, address_mode
);
3310 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
3312 /* For some TARGET, like ARM THUMB1, the offset should be nature
3313 aligned. Try an aligned offset if address_mode is not QImode. */
3314 off
= (address_mode
== QImode
)
3316 : ((unsigned HOST_WIDE_INT
) 1 << i
)
3317 - GET_MODE_SIZE (address_mode
);
3320 XEXP (addr
, 1) = gen_int_mode (off
, address_mode
);
3321 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
3327 data
->max_offset
= off
;
3329 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3331 fprintf (dump_file
, "get_address_cost:\n");
3332 fprintf (dump_file
, " min offset %s " HOST_WIDE_INT_PRINT_DEC
"\n",
3333 GET_MODE_NAME (mem_mode
),
3335 fprintf (dump_file
, " max offset %s " HOST_WIDE_INT_PRINT_DEC
"\n",
3336 GET_MODE_NAME (mem_mode
),
3341 for (i
= 2; i
<= MAX_RATIO
; i
++)
3342 if (multiplier_allowed_in_address_p (i
, mem_mode
, as
))
3348 /* Compute the cost of various addressing modes. */
3350 reg0
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3351 reg1
= gen_raw_REG (address_mode
, LAST_VIRTUAL_REGISTER
+ 2);
3353 if (USE_LOAD_PRE_DECREMENT (mem_mode
)
3354 || USE_STORE_PRE_DECREMENT (mem_mode
))
3356 addr
= gen_rtx_PRE_DEC (address_mode
, reg0
);
3357 has_predec
[mem_mode
]
3358 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3360 if (has_predec
[mem_mode
])
3361 data
->ainc_costs
[AINC_PRE_DEC
]
3362 = address_cost (addr
, mem_mode
, as
, speed
);
3364 if (USE_LOAD_POST_DECREMENT (mem_mode
)
3365 || USE_STORE_POST_DECREMENT (mem_mode
))
3367 addr
= gen_rtx_POST_DEC (address_mode
, reg0
);
3368 has_postdec
[mem_mode
]
3369 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3371 if (has_postdec
[mem_mode
])
3372 data
->ainc_costs
[AINC_POST_DEC
]
3373 = address_cost (addr
, mem_mode
, as
, speed
);
3375 if (USE_LOAD_PRE_INCREMENT (mem_mode
)
3376 || USE_STORE_PRE_DECREMENT (mem_mode
))
3378 addr
= gen_rtx_PRE_INC (address_mode
, reg0
);
3379 has_preinc
[mem_mode
]
3380 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3382 if (has_preinc
[mem_mode
])
3383 data
->ainc_costs
[AINC_PRE_INC
]
3384 = address_cost (addr
, mem_mode
, as
, speed
);
3386 if (USE_LOAD_POST_INCREMENT (mem_mode
)
3387 || USE_STORE_POST_INCREMENT (mem_mode
))
3389 addr
= gen_rtx_POST_INC (address_mode
, reg0
);
3390 has_postinc
[mem_mode
]
3391 = memory_address_addr_space_p (mem_mode
, addr
, as
);
3393 if (has_postinc
[mem_mode
])
3394 data
->ainc_costs
[AINC_POST_INC
]
3395 = address_cost (addr
, mem_mode
, as
, speed
);
3397 for (i
= 0; i
< 16; i
++)
3400 var_p
= (i
>> 1) & 1;
3401 off_p
= (i
>> 2) & 1;
3402 rat_p
= (i
>> 3) & 1;
3406 addr
= gen_rtx_fmt_ee (MULT
, address_mode
, addr
,
3407 gen_int_mode (rat
, address_mode
));
3410 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, addr
, reg1
);
3414 base
= gen_rtx_SYMBOL_REF (address_mode
, ggc_strdup (""));
3415 /* ??? We can run into trouble with some backends by presenting
3416 it with symbols which haven't been properly passed through
3417 targetm.encode_section_info. By setting the local bit, we
3418 enhance the probability of things working. */
3419 SYMBOL_REF_FLAGS (base
) = SYMBOL_FLAG_LOCAL
;
3422 base
= gen_rtx_fmt_e (CONST
, address_mode
,
3424 (PLUS
, address_mode
, base
,
3425 gen_int_mode (off
, address_mode
)));
3428 base
= gen_int_mode (off
, address_mode
);
3433 addr
= gen_rtx_fmt_ee (PLUS
, address_mode
, addr
, base
);
3436 /* To avoid splitting addressing modes, pretend that no cse will
3438 old_cse_not_expected
= cse_not_expected
;
3439 cse_not_expected
= true;
3440 addr
= memory_address_addr_space (mem_mode
, addr
, as
);
3441 cse_not_expected
= old_cse_not_expected
;
3445 acost
= seq_cost (seq
, speed
);
3446 acost
+= address_cost (addr
, mem_mode
, as
, speed
);
3450 data
->costs
[sym_p
][var_p
][off_p
][rat_p
] = acost
;
3453 /* On some targets, it is quite expensive to load symbol to a register,
3454 which makes addresses that contain symbols look much more expensive.
3455 However, the symbol will have to be loaded in any case before the
3456 loop (and quite likely we have it in register already), so it does not
3457 make much sense to penalize them too heavily. So make some final
3458 tweaks for the SYMBOL_PRESENT modes:
3460 If VAR_PRESENT is false, and the mode obtained by changing symbol to
3461 var is cheaper, use this mode with small penalty.
3462 If VAR_PRESENT is true, try whether the mode with
3463 SYMBOL_PRESENT = false is cheaper even with cost of addition, and
3464 if this is the case, use it. */
3465 add_c
= add_cost (speed
, address_mode
);
3466 for (i
= 0; i
< 8; i
++)
3469 off_p
= (i
>> 1) & 1;
3470 rat_p
= (i
>> 2) & 1;
3472 acost
= data
->costs
[0][1][off_p
][rat_p
] + 1;
3476 if (acost
< data
->costs
[1][var_p
][off_p
][rat_p
])
3477 data
->costs
[1][var_p
][off_p
][rat_p
] = acost
;
3480 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3482 fprintf (dump_file
, "Address costs:\n");
3484 for (i
= 0; i
< 16; i
++)
3487 var_p
= (i
>> 1) & 1;
3488 off_p
= (i
>> 2) & 1;
3489 rat_p
= (i
>> 3) & 1;
3491 fprintf (dump_file
, " ");
3493 fprintf (dump_file
, "sym + ");
3495 fprintf (dump_file
, "var + ");
3497 fprintf (dump_file
, "cst + ");
3499 fprintf (dump_file
, "rat * ");
3501 acost
= data
->costs
[sym_p
][var_p
][off_p
][rat_p
];
3502 fprintf (dump_file
, "index costs %d\n", acost
);
3504 if (has_predec
[mem_mode
] || has_postdec
[mem_mode
]
3505 || has_preinc
[mem_mode
] || has_postinc
[mem_mode
])
3506 fprintf (dump_file
, " May include autoinc/dec\n");
3507 fprintf (dump_file
, "\n");
3510 address_cost_data_list
[data_index
] = data
;
3513 bits
= GET_MODE_BITSIZE (address_mode
);
3514 mask
= ~(~(unsigned HOST_WIDE_INT
) 0 << (bits
- 1) << 1);
3516 if ((offset
>> (bits
- 1) & 1))
3521 autoinc_type
= AINC_NONE
;
3522 msize
= GET_MODE_SIZE (mem_mode
);
3523 autoinc_offset
= offset
;
3525 autoinc_offset
+= ratio
* cstep
;
3526 if (symbol_present
|| var_present
|| ratio
!= 1)
3530 if (has_postinc
[mem_mode
] && autoinc_offset
== 0
3532 autoinc_type
= AINC_POST_INC
;
3533 else if (has_postdec
[mem_mode
] && autoinc_offset
== 0
3535 autoinc_type
= AINC_POST_DEC
;
3536 else if (has_preinc
[mem_mode
] && autoinc_offset
== msize
3538 autoinc_type
= AINC_PRE_INC
;
3539 else if (has_predec
[mem_mode
] && autoinc_offset
== -msize
3541 autoinc_type
= AINC_PRE_DEC
;
3543 if (autoinc_type
!= AINC_NONE
)
3548 offset_p
= (s_offset
!= 0
3549 && data
->min_offset
<= s_offset
3550 && s_offset
<= data
->max_offset
);
3551 ratio_p
= (ratio
!= 1
3552 && multiplier_allowed_in_address_p (ratio
, mem_mode
, as
));
3554 if (ratio
!= 1 && !ratio_p
)
3555 cost
+= mult_by_coeff_cost (ratio
, address_mode
, speed
);
3557 if (s_offset
&& !offset_p
&& !symbol_present
)
3558 cost
+= add_cost (speed
, address_mode
);
3561 *may_autoinc
= autoinc
;
3563 acost
= data
->ainc_costs
[autoinc_type
];
3565 acost
= data
->costs
[symbol_present
][var_present
][offset_p
][ratio_p
];
3566 complexity
= (symbol_present
!= 0) + (var_present
!= 0) + offset_p
+ ratio_p
;
3567 return new_cost (cost
+ acost
, complexity
);
3570 /* Calculate the SPEED or size cost of shiftadd EXPR in MODE. MULT is the
3571 the EXPR operand holding the shift. COST0 and COST1 are the costs for
3572 calculating the operands of EXPR. Returns true if successful, and returns
3573 the cost in COST. */
3576 get_shiftadd_cost (tree expr
, machine_mode mode
, comp_cost cost0
,
3577 comp_cost cost1
, tree mult
, bool speed
, comp_cost
*cost
)
3580 tree op1
= TREE_OPERAND (expr
, 1);
3581 tree cst
= TREE_OPERAND (mult
, 1);
3582 tree multop
= TREE_OPERAND (mult
, 0);
3583 int m
= exact_log2 (int_cst_value (cst
));
3584 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
3586 bool equal_p
= false;
3588 if (!(m
>= 0 && m
< maxm
))
3591 if (operand_equal_p (op1
, mult
, 0))
3594 sa_cost
= (TREE_CODE (expr
) != MINUS_EXPR
3595 ? shiftadd_cost (speed
, mode
, m
)
3597 ? shiftsub1_cost (speed
, mode
, m
)
3598 : shiftsub0_cost (speed
, mode
, m
)));
3599 res
= new_cost (sa_cost
, 0);
3600 res
= add_costs (res
, equal_p
? cost0
: cost1
);
3602 STRIP_NOPS (multop
);
3603 if (!is_gimple_val (multop
))
3604 res
= add_costs (res
, force_expr_to_var_cost (multop
, speed
));
3610 /* Estimates cost of forcing expression EXPR into a variable. */
3613 force_expr_to_var_cost (tree expr
, bool speed
)
3615 static bool costs_initialized
= false;
3616 static unsigned integer_cost
[2];
3617 static unsigned symbol_cost
[2];
3618 static unsigned address_cost
[2];
3620 comp_cost cost0
, cost1
, cost
;
3623 if (!costs_initialized
)
3625 tree type
= build_pointer_type (integer_type_node
);
3630 var
= create_tmp_var_raw (integer_type_node
, "test_var");
3631 TREE_STATIC (var
) = 1;
3632 x
= produce_memory_decl_rtl (var
, NULL
);
3633 SET_DECL_RTL (var
, x
);
3635 addr
= build1 (ADDR_EXPR
, type
, var
);
3638 for (i
= 0; i
< 2; i
++)
3640 integer_cost
[i
] = computation_cost (build_int_cst (integer_type_node
,
3643 symbol_cost
[i
] = computation_cost (addr
, i
) + 1;
3646 = computation_cost (fold_build_pointer_plus_hwi (addr
, 2000), i
) + 1;
3647 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3649 fprintf (dump_file
, "force_expr_to_var_cost %s costs:\n", i
? "speed" : "size");
3650 fprintf (dump_file
, " integer %d\n", (int) integer_cost
[i
]);
3651 fprintf (dump_file
, " symbol %d\n", (int) symbol_cost
[i
]);
3652 fprintf (dump_file
, " address %d\n", (int) address_cost
[i
]);
3653 fprintf (dump_file
, " other %d\n", (int) target_spill_cost
[i
]);
3654 fprintf (dump_file
, "\n");
3658 costs_initialized
= true;
3663 if (SSA_VAR_P (expr
))
3666 if (is_gimple_min_invariant (expr
))
3668 if (TREE_CODE (expr
) == INTEGER_CST
)
3669 return new_cost (integer_cost
[speed
], 0);
3671 if (TREE_CODE (expr
) == ADDR_EXPR
)
3673 tree obj
= TREE_OPERAND (expr
, 0);
3675 if (TREE_CODE (obj
) == VAR_DECL
3676 || TREE_CODE (obj
) == PARM_DECL
3677 || TREE_CODE (obj
) == RESULT_DECL
)
3678 return new_cost (symbol_cost
[speed
], 0);
3681 return new_cost (address_cost
[speed
], 0);
3684 switch (TREE_CODE (expr
))
3686 case POINTER_PLUS_EXPR
:
3690 op0
= TREE_OPERAND (expr
, 0);
3691 op1
= TREE_OPERAND (expr
, 1);
3698 op0
= TREE_OPERAND (expr
, 0);
3704 /* Just an arbitrary value, FIXME. */
3705 return new_cost (target_spill_cost
[speed
], 0);
3708 if (op0
== NULL_TREE
3709 || TREE_CODE (op0
) == SSA_NAME
|| CONSTANT_CLASS_P (op0
))
3712 cost0
= force_expr_to_var_cost (op0
, speed
);
3714 if (op1
== NULL_TREE
3715 || TREE_CODE (op1
) == SSA_NAME
|| CONSTANT_CLASS_P (op1
))
3718 cost1
= force_expr_to_var_cost (op1
, speed
);
3720 mode
= TYPE_MODE (TREE_TYPE (expr
));
3721 switch (TREE_CODE (expr
))
3723 case POINTER_PLUS_EXPR
:
3727 cost
= new_cost (add_cost (speed
, mode
), 0);
3728 if (TREE_CODE (expr
) != NEGATE_EXPR
)
3730 tree mult
= NULL_TREE
;
3732 if (TREE_CODE (op1
) == MULT_EXPR
)
3734 else if (TREE_CODE (op0
) == MULT_EXPR
)
3737 if (mult
!= NULL_TREE
3738 && cst_and_fits_in_hwi (TREE_OPERAND (mult
, 1))
3739 && get_shiftadd_cost (expr
, mode
, cost0
, cost1
, mult
,
3747 tree inner_mode
, outer_mode
;
3748 outer_mode
= TREE_TYPE (expr
);
3749 inner_mode
= TREE_TYPE (op0
);
3750 cost
= new_cost (convert_cost (TYPE_MODE (outer_mode
),
3751 TYPE_MODE (inner_mode
), speed
), 0);
3756 if (cst_and_fits_in_hwi (op0
))
3757 cost
= new_cost (mult_by_coeff_cost (int_cst_value (op0
),
3759 else if (cst_and_fits_in_hwi (op1
))
3760 cost
= new_cost (mult_by_coeff_cost (int_cst_value (op1
),
3763 return new_cost (target_spill_cost
[speed
], 0);
3770 cost
= add_costs (cost
, cost0
);
3771 cost
= add_costs (cost
, cost1
);
3773 /* Bound the cost by target_spill_cost. The parts of complicated
3774 computations often are either loop invariant or at least can
3775 be shared between several iv uses, so letting this grow without
3776 limits would not give reasonable results. */
3777 if (cost
.cost
> (int) target_spill_cost
[speed
])
3778 cost
.cost
= target_spill_cost
[speed
];
3783 /* Estimates cost of forcing EXPR into a variable. DEPENDS_ON is a set of the
3784 invariants the computation depends on. */
3787 force_var_cost (struct ivopts_data
*data
,
3788 tree expr
, bitmap
*depends_on
)
3792 fd_ivopts_data
= data
;
3793 walk_tree (&expr
, find_depends
, depends_on
, NULL
);
3796 return force_expr_to_var_cost (expr
, data
->speed
);
3799 /* Estimates cost of expressing address ADDR as var + symbol + offset. The
3800 value of offset is added to OFFSET, SYMBOL_PRESENT and VAR_PRESENT are set
3801 to false if the corresponding part is missing. DEPENDS_ON is a set of the
3802 invariants the computation depends on. */
3805 split_address_cost (struct ivopts_data
*data
,
3806 tree addr
, bool *symbol_present
, bool *var_present
,
3807 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3810 HOST_WIDE_INT bitsize
;
3811 HOST_WIDE_INT bitpos
;
3814 int unsignedp
, volatilep
;
3816 core
= get_inner_reference (addr
, &bitsize
, &bitpos
, &toffset
, &mode
,
3817 &unsignedp
, &volatilep
, false);
3820 || bitpos
% BITS_PER_UNIT
!= 0
3821 || TREE_CODE (core
) != VAR_DECL
)
3823 *symbol_present
= false;
3824 *var_present
= true;
3825 fd_ivopts_data
= data
;
3826 walk_tree (&addr
, find_depends
, depends_on
, NULL
);
3827 return new_cost (target_spill_cost
[data
->speed
], 0);
3830 *offset
+= bitpos
/ BITS_PER_UNIT
;
3831 if (TREE_STATIC (core
)
3832 || DECL_EXTERNAL (core
))
3834 *symbol_present
= true;
3835 *var_present
= false;
3839 *symbol_present
= false;
3840 *var_present
= true;
3844 /* Estimates cost of expressing difference of addresses E1 - E2 as
3845 var + symbol + offset. The value of offset is added to OFFSET,
3846 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
3847 part is missing. DEPENDS_ON is a set of the invariants the computation
3851 ptr_difference_cost (struct ivopts_data
*data
,
3852 tree e1
, tree e2
, bool *symbol_present
, bool *var_present
,
3853 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3855 HOST_WIDE_INT diff
= 0;
3856 aff_tree aff_e1
, aff_e2
;
3859 gcc_assert (TREE_CODE (e1
) == ADDR_EXPR
);
3861 if (ptr_difference_const (e1
, e2
, &diff
))
3864 *symbol_present
= false;
3865 *var_present
= false;
3869 if (integer_zerop (e2
))
3870 return split_address_cost (data
, TREE_OPERAND (e1
, 0),
3871 symbol_present
, var_present
, offset
, depends_on
);
3873 *symbol_present
= false;
3874 *var_present
= true;
3876 type
= signed_type_for (TREE_TYPE (e1
));
3877 tree_to_aff_combination (e1
, type
, &aff_e1
);
3878 tree_to_aff_combination (e2
, type
, &aff_e2
);
3879 aff_combination_scale (&aff_e2
, -1);
3880 aff_combination_add (&aff_e1
, &aff_e2
);
3882 return force_var_cost (data
, aff_combination_to_tree (&aff_e1
), depends_on
);
3885 /* Estimates cost of expressing difference E1 - E2 as
3886 var + symbol + offset. The value of offset is added to OFFSET,
3887 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
3888 part is missing. DEPENDS_ON is a set of the invariants the computation
3892 difference_cost (struct ivopts_data
*data
,
3893 tree e1
, tree e2
, bool *symbol_present
, bool *var_present
,
3894 unsigned HOST_WIDE_INT
*offset
, bitmap
*depends_on
)
3896 machine_mode mode
= TYPE_MODE (TREE_TYPE (e1
));
3897 unsigned HOST_WIDE_INT off1
, off2
;
3898 aff_tree aff_e1
, aff_e2
;
3901 e1
= strip_offset (e1
, &off1
);
3902 e2
= strip_offset (e2
, &off2
);
3903 *offset
+= off1
- off2
;
3908 if (TREE_CODE (e1
) == ADDR_EXPR
)
3909 return ptr_difference_cost (data
, e1
, e2
, symbol_present
, var_present
,
3910 offset
, depends_on
);
3911 *symbol_present
= false;
3913 if (operand_equal_p (e1
, e2
, 0))
3915 *var_present
= false;
3919 *var_present
= true;
3921 if (integer_zerop (e2
))
3922 return force_var_cost (data
, e1
, depends_on
);
3924 if (integer_zerop (e1
))
3926 comp_cost cost
= force_var_cost (data
, e2
, depends_on
);
3927 cost
.cost
+= mult_by_coeff_cost (-1, mode
, data
->speed
);
3931 type
= signed_type_for (TREE_TYPE (e1
));
3932 tree_to_aff_combination (e1
, type
, &aff_e1
);
3933 tree_to_aff_combination (e2
, type
, &aff_e2
);
3934 aff_combination_scale (&aff_e2
, -1);
3935 aff_combination_add (&aff_e1
, &aff_e2
);
3937 return force_var_cost (data
, aff_combination_to_tree (&aff_e1
), depends_on
);
3940 /* Returns true if AFF1 and AFF2 are identical. */
3943 compare_aff_trees (aff_tree
*aff1
, aff_tree
*aff2
)
3947 if (aff1
->n
!= aff2
->n
)
3950 for (i
= 0; i
< aff1
->n
; i
++)
3952 if (aff1
->elts
[i
].coef
!= aff2
->elts
[i
].coef
)
3955 if (!operand_equal_p (aff1
->elts
[i
].val
, aff2
->elts
[i
].val
, 0))
3961 /* Stores EXPR in DATA->inv_expr_tab, and assigns it an inv_expr_id. */
3964 get_expr_id (struct ivopts_data
*data
, tree expr
)
3966 struct iv_inv_expr_ent ent
;
3967 struct iv_inv_expr_ent
**slot
;
3970 ent
.hash
= iterative_hash_expr (expr
, 0);
3971 slot
= data
->inv_expr_tab
->find_slot (&ent
, INSERT
);
3975 *slot
= XNEW (struct iv_inv_expr_ent
);
3976 (*slot
)->expr
= expr
;
3977 (*slot
)->hash
= ent
.hash
;
3978 (*slot
)->id
= data
->inv_expr_id
++;
3982 /* Returns the pseudo expr id if expression UBASE - RATIO * CBASE
3983 requires a new compiler generated temporary. Returns -1 otherwise.
3984 ADDRESS_P is a flag indicating if the expression is for address
3988 get_loop_invariant_expr_id (struct ivopts_data
*data
, tree ubase
,
3989 tree cbase
, HOST_WIDE_INT ratio
,
3992 aff_tree ubase_aff
, cbase_aff
;
4000 if ((TREE_CODE (ubase
) == INTEGER_CST
)
4001 && (TREE_CODE (cbase
) == INTEGER_CST
))
4004 /* Strips the constant part. */
4005 if (TREE_CODE (ubase
) == PLUS_EXPR
4006 || TREE_CODE (ubase
) == MINUS_EXPR
4007 || TREE_CODE (ubase
) == POINTER_PLUS_EXPR
)
4009 if (TREE_CODE (TREE_OPERAND (ubase
, 1)) == INTEGER_CST
)
4010 ubase
= TREE_OPERAND (ubase
, 0);
4013 /* Strips the constant part. */
4014 if (TREE_CODE (cbase
) == PLUS_EXPR
4015 || TREE_CODE (cbase
) == MINUS_EXPR
4016 || TREE_CODE (cbase
) == POINTER_PLUS_EXPR
)
4018 if (TREE_CODE (TREE_OPERAND (cbase
, 1)) == INTEGER_CST
)
4019 cbase
= TREE_OPERAND (cbase
, 0);
4024 if (((TREE_CODE (ubase
) == SSA_NAME
)
4025 || (TREE_CODE (ubase
) == ADDR_EXPR
4026 && is_gimple_min_invariant (ubase
)))
4027 && (TREE_CODE (cbase
) == INTEGER_CST
))
4030 if (((TREE_CODE (cbase
) == SSA_NAME
)
4031 || (TREE_CODE (cbase
) == ADDR_EXPR
4032 && is_gimple_min_invariant (cbase
)))
4033 && (TREE_CODE (ubase
) == INTEGER_CST
))
4039 if (operand_equal_p (ubase
, cbase
, 0))
4042 if (TREE_CODE (ubase
) == ADDR_EXPR
4043 && TREE_CODE (cbase
) == ADDR_EXPR
)
4047 usym
= TREE_OPERAND (ubase
, 0);
4048 csym
= TREE_OPERAND (cbase
, 0);
4049 if (TREE_CODE (usym
) == ARRAY_REF
)
4051 tree ind
= TREE_OPERAND (usym
, 1);
4052 if (TREE_CODE (ind
) == INTEGER_CST
4053 && tree_fits_shwi_p (ind
)
4054 && tree_to_shwi (ind
) == 0)
4055 usym
= TREE_OPERAND (usym
, 0);
4057 if (TREE_CODE (csym
) == ARRAY_REF
)
4059 tree ind
= TREE_OPERAND (csym
, 1);
4060 if (TREE_CODE (ind
) == INTEGER_CST
4061 && tree_fits_shwi_p (ind
)
4062 && tree_to_shwi (ind
) == 0)
4063 csym
= TREE_OPERAND (csym
, 0);
4065 if (operand_equal_p (usym
, csym
, 0))
4068 /* Now do more complex comparison */
4069 tree_to_aff_combination (ubase
, TREE_TYPE (ubase
), &ubase_aff
);
4070 tree_to_aff_combination (cbase
, TREE_TYPE (cbase
), &cbase_aff
);
4071 if (compare_aff_trees (&ubase_aff
, &cbase_aff
))
4075 tree_to_aff_combination (ub
, TREE_TYPE (ub
), &ubase_aff
);
4076 tree_to_aff_combination (cb
, TREE_TYPE (cb
), &cbase_aff
);
4078 aff_combination_scale (&cbase_aff
, -1 * ratio
);
4079 aff_combination_add (&ubase_aff
, &cbase_aff
);
4080 expr
= aff_combination_to_tree (&ubase_aff
);
4081 return get_expr_id (data
, expr
);
4086 /* Determines the cost of the computation by that USE is expressed
4087 from induction variable CAND. If ADDRESS_P is true, we just need
4088 to create an address from it, otherwise we want to get it into
4089 register. A set of invariants we depend on is stored in
4090 DEPENDS_ON. AT is the statement at that the value is computed.
4091 If CAN_AUTOINC is nonnull, use it to record whether autoinc
4092 addressing is likely. */
4095 get_computation_cost_at (struct ivopts_data
*data
,
4096 struct iv_use
*use
, struct iv_cand
*cand
,
4097 bool address_p
, bitmap
*depends_on
, gimple at
,
4101 tree ubase
= use
->iv
->base
, ustep
= use
->iv
->step
;
4103 tree utype
= TREE_TYPE (ubase
), ctype
;
4104 unsigned HOST_WIDE_INT cstepi
, offset
= 0;
4105 HOST_WIDE_INT ratio
, aratio
;
4106 bool var_present
, symbol_present
, stmt_is_after_inc
;
4109 bool speed
= optimize_bb_for_speed_p (gimple_bb (at
));
4110 machine_mode mem_mode
= (address_p
4111 ? TYPE_MODE (TREE_TYPE (*use
->op_p
))
4116 /* Only consider real candidates. */
4118 return infinite_cost
;
4120 cbase
= cand
->iv
->base
;
4121 cstep
= cand
->iv
->step
;
4122 ctype
= TREE_TYPE (cbase
);
4124 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
4126 /* We do not have a precision to express the values of use. */
4127 return infinite_cost
;
4131 || (use
->iv
->base_object
4132 && cand
->iv
->base_object
4133 && POINTER_TYPE_P (TREE_TYPE (use
->iv
->base_object
))
4134 && POINTER_TYPE_P (TREE_TYPE (cand
->iv
->base_object
))))
4136 /* Do not try to express address of an object with computation based
4137 on address of a different object. This may cause problems in rtl
4138 level alias analysis (that does not expect this to be happening,
4139 as this is illegal in C), and would be unlikely to be useful
4141 if (use
->iv
->base_object
4142 && cand
->iv
->base_object
4143 && !operand_equal_p (use
->iv
->base_object
, cand
->iv
->base_object
, 0))
4144 return infinite_cost
;
4147 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
4149 /* TODO -- add direct handling of this case. */
4153 /* CSTEPI is removed from the offset in case statement is after the
4154 increment. If the step is not constant, we use zero instead.
4155 This is a bit imprecise (there is the extra addition), but
4156 redundancy elimination is likely to transform the code so that
4157 it uses value of the variable before increment anyway,
4158 so it is not that much unrealistic. */
4159 if (cst_and_fits_in_hwi (cstep
))
4160 cstepi
= int_cst_value (cstep
);
4164 if (!constant_multiple_of (ustep
, cstep
, &rat
))
4165 return infinite_cost
;
4167 if (wi::fits_shwi_p (rat
))
4168 ratio
= rat
.to_shwi ();
4170 return infinite_cost
;
4173 ctype
= TREE_TYPE (cbase
);
4175 stmt_is_after_inc
= stmt_after_increment (data
->current_loop
, cand
, at
);
4177 /* use = ubase + ratio * (var - cbase). If either cbase is a constant
4178 or ratio == 1, it is better to handle this like
4180 ubase - ratio * cbase + ratio * var
4182 (also holds in the case ratio == -1, TODO. */
4184 if (cst_and_fits_in_hwi (cbase
))
4186 offset
= - ratio
* (unsigned HOST_WIDE_INT
) int_cst_value (cbase
);
4187 cost
= difference_cost (data
,
4188 ubase
, build_int_cst (utype
, 0),
4189 &symbol_present
, &var_present
, &offset
,
4191 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4193 else if (ratio
== 1)
4195 tree real_cbase
= cbase
;
4197 /* Check to see if any adjustment is needed. */
4198 if (cstepi
== 0 && stmt_is_after_inc
)
4200 aff_tree real_cbase_aff
;
4203 tree_to_aff_combination (cbase
, TREE_TYPE (real_cbase
),
4205 tree_to_aff_combination (cstep
, TREE_TYPE (cstep
), &cstep_aff
);
4207 aff_combination_add (&real_cbase_aff
, &cstep_aff
);
4208 real_cbase
= aff_combination_to_tree (&real_cbase_aff
);
4211 cost
= difference_cost (data
,
4213 &symbol_present
, &var_present
, &offset
,
4215 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4218 && !POINTER_TYPE_P (ctype
)
4219 && multiplier_allowed_in_address_p
4221 TYPE_ADDR_SPACE (TREE_TYPE (utype
))))
4224 = fold_build2 (MULT_EXPR
, ctype
, cbase
, build_int_cst (ctype
, ratio
));
4225 cost
= difference_cost (data
,
4227 &symbol_present
, &var_present
, &offset
,
4229 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4233 cost
= force_var_cost (data
, cbase
, depends_on
);
4234 cost
= add_costs (cost
,
4235 difference_cost (data
,
4236 ubase
, build_int_cst (utype
, 0),
4237 &symbol_present
, &var_present
,
4238 &offset
, depends_on
));
4239 cost
.cost
/= avg_loop_niter (data
->current_loop
);
4240 cost
.cost
+= add_cost (data
->speed
, TYPE_MODE (ctype
));
4246 get_loop_invariant_expr_id (data
, ubase
, cbase
, ratio
, address_p
);
4247 /* Clear depends on. */
4248 if (*inv_expr_id
!= -1 && depends_on
&& *depends_on
)
4249 bitmap_clear (*depends_on
);
4252 /* If we are after the increment, the value of the candidate is higher by
4254 if (stmt_is_after_inc
)
4255 offset
-= ratio
* cstepi
;
4257 /* Now the computation is in shape symbol + var1 + const + ratio * var2.
4258 (symbol/var1/const parts may be omitted). If we are looking for an
4259 address, find the cost of addressing this. */
4261 return add_costs (cost
,
4262 get_address_cost (symbol_present
, var_present
,
4263 offset
, ratio
, cstepi
,
4265 TYPE_ADDR_SPACE (TREE_TYPE (utype
)),
4266 speed
, stmt_is_after_inc
,
4269 /* Otherwise estimate the costs for computing the expression. */
4270 if (!symbol_present
&& !var_present
&& !offset
)
4273 cost
.cost
+= mult_by_coeff_cost (ratio
, TYPE_MODE (ctype
), speed
);
4277 /* Symbol + offset should be compile-time computable so consider that they
4278 are added once to the variable, if present. */
4279 if (var_present
&& (symbol_present
|| offset
))
4280 cost
.cost
+= adjust_setup_cost (data
,
4281 add_cost (speed
, TYPE_MODE (ctype
)));
4283 /* Having offset does not affect runtime cost in case it is added to
4284 symbol, but it increases complexity. */
4288 cost
.cost
+= add_cost (speed
, TYPE_MODE (ctype
));
4290 aratio
= ratio
> 0 ? ratio
: -ratio
;
4292 cost
.cost
+= mult_by_coeff_cost (aratio
, TYPE_MODE (ctype
), speed
);
4297 *can_autoinc
= false;
4300 /* Just get the expression, expand it and measure the cost. */
4301 tree comp
= get_computation_at (data
->current_loop
, use
, cand
, at
);
4304 return infinite_cost
;
4307 comp
= build_simple_mem_ref (comp
);
4309 return new_cost (computation_cost (comp
, speed
), 0);
4313 /* Determines the cost of the computation by that USE is expressed
4314 from induction variable CAND. If ADDRESS_P is true, we just need
4315 to create an address from it, otherwise we want to get it into
4316 register. A set of invariants we depend on is stored in
4317 DEPENDS_ON. If CAN_AUTOINC is nonnull, use it to record whether
4318 autoinc addressing is likely. */
4321 get_computation_cost (struct ivopts_data
*data
,
4322 struct iv_use
*use
, struct iv_cand
*cand
,
4323 bool address_p
, bitmap
*depends_on
,
4324 bool *can_autoinc
, int *inv_expr_id
)
4326 return get_computation_cost_at (data
,
4327 use
, cand
, address_p
, depends_on
, use
->stmt
,
4328 can_autoinc
, inv_expr_id
);
4331 /* Determines cost of basing replacement of USE on CAND in a generic
4335 determine_use_iv_cost_generic (struct ivopts_data
*data
,
4336 struct iv_use
*use
, struct iv_cand
*cand
)
4340 int inv_expr_id
= -1;
4342 /* The simple case first -- if we need to express value of the preserved
4343 original biv, the cost is 0. This also prevents us from counting the
4344 cost of increment twice -- once at this use and once in the cost of
4346 if (cand
->pos
== IP_ORIGINAL
4347 && cand
->incremented_at
== use
->stmt
)
4349 set_use_iv_cost (data
, use
, cand
, no_cost
, NULL
, NULL_TREE
,
4354 cost
= get_computation_cost (data
, use
, cand
, false, &depends_on
,
4355 NULL
, &inv_expr_id
);
4357 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, NULL_TREE
, ERROR_MARK
,
4360 return !infinite_cost_p (cost
);
4363 /* Determines cost of basing replacement of USE on CAND in an address. */
4366 determine_use_iv_cost_address (struct ivopts_data
*data
,
4367 struct iv_use
*use
, struct iv_cand
*cand
)
4371 int inv_expr_id
= -1;
4372 comp_cost cost
= get_computation_cost (data
, use
, cand
, true, &depends_on
,
4373 &can_autoinc
, &inv_expr_id
);
4375 if (cand
->ainc_use
== use
)
4378 cost
.cost
-= cand
->cost_step
;
4379 /* If we generated the candidate solely for exploiting autoincrement
4380 opportunities, and it turns out it can't be used, set the cost to
4381 infinity to make sure we ignore it. */
4382 else if (cand
->pos
== IP_AFTER_USE
|| cand
->pos
== IP_BEFORE_USE
)
4383 cost
= infinite_cost
;
4385 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, NULL_TREE
, ERROR_MARK
,
4388 return !infinite_cost_p (cost
);
4391 /* Computes value of candidate CAND at position AT in iteration NITER, and
4392 stores it to VAL. */
4395 cand_value_at (struct loop
*loop
, struct iv_cand
*cand
, gimple at
, tree niter
,
4398 aff_tree step
, delta
, nit
;
4399 struct iv
*iv
= cand
->iv
;
4400 tree type
= TREE_TYPE (iv
->base
);
4401 tree steptype
= type
;
4402 if (POINTER_TYPE_P (type
))
4403 steptype
= sizetype
;
4404 steptype
= unsigned_type_for (type
);
4406 tree_to_aff_combination (iv
->step
, TREE_TYPE (iv
->step
), &step
);
4407 aff_combination_convert (&step
, steptype
);
4408 tree_to_aff_combination (niter
, TREE_TYPE (niter
), &nit
);
4409 aff_combination_convert (&nit
, steptype
);
4410 aff_combination_mult (&nit
, &step
, &delta
);
4411 if (stmt_after_increment (loop
, cand
, at
))
4412 aff_combination_add (&delta
, &step
);
4414 tree_to_aff_combination (iv
->base
, type
, val
);
4415 if (!POINTER_TYPE_P (type
))
4416 aff_combination_convert (val
, steptype
);
4417 aff_combination_add (val
, &delta
);
4420 /* Returns period of induction variable iv. */
4423 iv_period (struct iv
*iv
)
4425 tree step
= iv
->step
, period
, type
;
4428 gcc_assert (step
&& TREE_CODE (step
) == INTEGER_CST
);
4430 type
= unsigned_type_for (TREE_TYPE (step
));
4431 /* Period of the iv is lcm (step, type_range)/step -1,
4432 i.e., N*type_range/step - 1. Since type range is power
4433 of two, N == (step >> num_of_ending_zeros_binary (step),
4434 so the final result is
4436 (type_range >> num_of_ending_zeros_binary (step)) - 1
4439 pow2div
= num_ending_zeros (step
);
4441 period
= build_low_bits_mask (type
,
4442 (TYPE_PRECISION (type
)
4443 - tree_to_uhwi (pow2div
)));
4448 /* Returns the comparison operator used when eliminating the iv USE. */
4450 static enum tree_code
4451 iv_elimination_compare (struct ivopts_data
*data
, struct iv_use
*use
)
4453 struct loop
*loop
= data
->current_loop
;
4457 ex_bb
= gimple_bb (use
->stmt
);
4458 exit
= EDGE_SUCC (ex_bb
, 0);
4459 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4460 exit
= EDGE_SUCC (ex_bb
, 1);
4462 return (exit
->flags
& EDGE_TRUE_VALUE
? EQ_EXPR
: NE_EXPR
);
4465 /* Returns true if we can prove that BASE - OFFSET does not overflow. For now,
4466 we only detect the situation that BASE = SOMETHING + OFFSET, where the
4467 calculation is performed in non-wrapping type.
4469 TODO: More generally, we could test for the situation that
4470 BASE = SOMETHING + OFFSET' and OFFSET is between OFFSET' and zero.
4471 This would require knowing the sign of OFFSET. */
4474 difference_cannot_overflow_p (struct ivopts_data
*data
, tree base
, tree offset
)
4476 enum tree_code code
;
4478 aff_tree aff_e1
, aff_e2
, aff_offset
;
4480 if (!nowrap_type_p (TREE_TYPE (base
)))
4483 base
= expand_simple_operations (base
);
4485 if (TREE_CODE (base
) == SSA_NAME
)
4487 gimple stmt
= SSA_NAME_DEF_STMT (base
);
4489 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
4492 code
= gimple_assign_rhs_code (stmt
);
4493 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
4496 e1
= gimple_assign_rhs1 (stmt
);
4497 e2
= gimple_assign_rhs2 (stmt
);
4501 code
= TREE_CODE (base
);
4502 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
4504 e1
= TREE_OPERAND (base
, 0);
4505 e2
= TREE_OPERAND (base
, 1);
4508 /* Use affine expansion as deeper inspection to prove the equality. */
4509 tree_to_aff_combination_expand (e2
, TREE_TYPE (e2
),
4510 &aff_e2
, &data
->name_expansion_cache
);
4511 tree_to_aff_combination_expand (offset
, TREE_TYPE (offset
),
4512 &aff_offset
, &data
->name_expansion_cache
);
4513 aff_combination_scale (&aff_offset
, -1);
4517 aff_combination_add (&aff_e2
, &aff_offset
);
4518 if (aff_combination_zero_p (&aff_e2
))
4521 tree_to_aff_combination_expand (e1
, TREE_TYPE (e1
),
4522 &aff_e1
, &data
->name_expansion_cache
);
4523 aff_combination_add (&aff_e1
, &aff_offset
);
4524 return aff_combination_zero_p (&aff_e1
);
4526 case POINTER_PLUS_EXPR
:
4527 aff_combination_add (&aff_e2
, &aff_offset
);
4528 return aff_combination_zero_p (&aff_e2
);
4535 /* Tries to replace loop exit by one formulated in terms of a LT_EXPR
4536 comparison with CAND. NITER describes the number of iterations of
4537 the loops. If successful, the comparison in COMP_P is altered accordingly.
4539 We aim to handle the following situation:
4555 Here, the number of iterations of the loop is (a + 1 > b) ? 0 : b - a - 1.
4556 We aim to optimize this to
4564 while (p < p_0 - a + b);
4566 This preserves the correctness, since the pointer arithmetics does not
4567 overflow. More precisely:
4569 1) if a + 1 <= b, then p_0 - a + b is the final value of p, hence there is no
4570 overflow in computing it or the values of p.
4571 2) if a + 1 > b, then we need to verify that the expression p_0 - a does not
4572 overflow. To prove this, we use the fact that p_0 = base + a. */
4575 iv_elimination_compare_lt (struct ivopts_data
*data
,
4576 struct iv_cand
*cand
, enum tree_code
*comp_p
,
4577 struct tree_niter_desc
*niter
)
4579 tree cand_type
, a
, b
, mbz
, nit_type
= TREE_TYPE (niter
->niter
), offset
;
4580 struct aff_tree nit
, tmpa
, tmpb
;
4581 enum tree_code comp
;
4584 /* We need to know that the candidate induction variable does not overflow.
4585 While more complex analysis may be used to prove this, for now just
4586 check that the variable appears in the original program and that it
4587 is computed in a type that guarantees no overflows. */
4588 cand_type
= TREE_TYPE (cand
->iv
->base
);
4589 if (cand
->pos
!= IP_ORIGINAL
|| !nowrap_type_p (cand_type
))
4592 /* Make sure that the loop iterates till the loop bound is hit, as otherwise
4593 the calculation of the BOUND could overflow, making the comparison
4595 if (!data
->loop_single_exit_p
)
4598 /* We need to be able to decide whether candidate is increasing or decreasing
4599 in order to choose the right comparison operator. */
4600 if (!cst_and_fits_in_hwi (cand
->iv
->step
))
4602 step
= int_cst_value (cand
->iv
->step
);
4604 /* Check that the number of iterations matches the expected pattern:
4605 a + 1 > b ? 0 : b - a - 1. */
4606 mbz
= niter
->may_be_zero
;
4607 if (TREE_CODE (mbz
) == GT_EXPR
)
4609 /* Handle a + 1 > b. */
4610 tree op0
= TREE_OPERAND (mbz
, 0);
4611 if (TREE_CODE (op0
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op0
, 1)))
4613 a
= TREE_OPERAND (op0
, 0);
4614 b
= TREE_OPERAND (mbz
, 1);
4619 else if (TREE_CODE (mbz
) == LT_EXPR
)
4621 tree op1
= TREE_OPERAND (mbz
, 1);
4623 /* Handle b < a + 1. */
4624 if (TREE_CODE (op1
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op1
, 1)))
4626 a
= TREE_OPERAND (op1
, 0);
4627 b
= TREE_OPERAND (mbz
, 0);
4635 /* Expected number of iterations is B - A - 1. Check that it matches
4636 the actual number, i.e., that B - A - NITER = 1. */
4637 tree_to_aff_combination (niter
->niter
, nit_type
, &nit
);
4638 tree_to_aff_combination (fold_convert (nit_type
, a
), nit_type
, &tmpa
);
4639 tree_to_aff_combination (fold_convert (nit_type
, b
), nit_type
, &tmpb
);
4640 aff_combination_scale (&nit
, -1);
4641 aff_combination_scale (&tmpa
, -1);
4642 aff_combination_add (&tmpb
, &tmpa
);
4643 aff_combination_add (&tmpb
, &nit
);
4644 if (tmpb
.n
!= 0 || tmpb
.offset
!= 1)
4647 /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
4649 offset
= fold_build2 (MULT_EXPR
, TREE_TYPE (cand
->iv
->step
),
4651 fold_convert (TREE_TYPE (cand
->iv
->step
), a
));
4652 if (!difference_cannot_overflow_p (data
, cand
->iv
->base
, offset
))
4655 /* Determine the new comparison operator. */
4656 comp
= step
< 0 ? GT_EXPR
: LT_EXPR
;
4657 if (*comp_p
== NE_EXPR
)
4659 else if (*comp_p
== EQ_EXPR
)
4660 *comp_p
= invert_tree_comparison (comp
, false);
4667 /* Check whether it is possible to express the condition in USE by comparison
4668 of candidate CAND. If so, store the value compared with to BOUND, and the
4669 comparison operator to COMP. */
4672 may_eliminate_iv (struct ivopts_data
*data
,
4673 struct iv_use
*use
, struct iv_cand
*cand
, tree
*bound
,
4674 enum tree_code
*comp
)
4679 struct loop
*loop
= data
->current_loop
;
4681 struct tree_niter_desc
*desc
= NULL
;
4683 if (TREE_CODE (cand
->iv
->step
) != INTEGER_CST
)
4686 /* For now works only for exits that dominate the loop latch.
4687 TODO: extend to other conditions inside loop body. */
4688 ex_bb
= gimple_bb (use
->stmt
);
4689 if (use
->stmt
!= last_stmt (ex_bb
)
4690 || gimple_code (use
->stmt
) != GIMPLE_COND
4691 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, ex_bb
))
4694 exit
= EDGE_SUCC (ex_bb
, 0);
4695 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4696 exit
= EDGE_SUCC (ex_bb
, 1);
4697 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4700 desc
= niter_for_exit (data
, exit
);
4704 /* Determine whether we can use the variable to test the exit condition.
4705 This is the case iff the period of the induction variable is greater
4706 than the number of iterations for which the exit condition is true. */
4707 period
= iv_period (cand
->iv
);
4709 /* If the number of iterations is constant, compare against it directly. */
4710 if (TREE_CODE (desc
->niter
) == INTEGER_CST
)
4712 /* See cand_value_at. */
4713 if (stmt_after_increment (loop
, cand
, use
->stmt
))
4715 if (!tree_int_cst_lt (desc
->niter
, period
))
4720 if (tree_int_cst_lt (period
, desc
->niter
))
4725 /* If not, and if this is the only possible exit of the loop, see whether
4726 we can get a conservative estimate on the number of iterations of the
4727 entire loop and compare against that instead. */
4730 widest_int period_value
, max_niter
;
4732 max_niter
= desc
->max
;
4733 if (stmt_after_increment (loop
, cand
, use
->stmt
))
4735 period_value
= wi::to_widest (period
);
4736 if (wi::gtu_p (max_niter
, period_value
))
4738 /* See if we can take advantage of inferred loop bound information. */
4739 if (data
->loop_single_exit_p
)
4741 if (!max_loop_iterations (loop
, &max_niter
))
4743 /* The loop bound is already adjusted by adding 1. */
4744 if (wi::gtu_p (max_niter
, period_value
))
4752 cand_value_at (loop
, cand
, use
->stmt
, desc
->niter
, &bnd
);
4754 *bound
= fold_convert (TREE_TYPE (cand
->iv
->base
),
4755 aff_combination_to_tree (&bnd
));
4756 *comp
= iv_elimination_compare (data
, use
);
4758 /* It is unlikely that computing the number of iterations using division
4759 would be more profitable than keeping the original induction variable. */
4760 if (expression_expensive_p (*bound
))
4763 /* Sometimes, it is possible to handle the situation that the number of
4764 iterations may be zero unless additional assumtions by using <
4765 instead of != in the exit condition.
4767 TODO: we could also calculate the value MAY_BE_ZERO ? 0 : NITER and
4768 base the exit condition on it. However, that is often too
4770 if (!integer_zerop (desc
->may_be_zero
))
4771 return iv_elimination_compare_lt (data
, cand
, comp
, desc
);
4776 /* Calculates the cost of BOUND, if it is a PARM_DECL. A PARM_DECL must
4777 be copied, if is is used in the loop body and DATA->body_includes_call. */
4780 parm_decl_cost (struct ivopts_data
*data
, tree bound
)
4782 tree sbound
= bound
;
4783 STRIP_NOPS (sbound
);
4785 if (TREE_CODE (sbound
) == SSA_NAME
4786 && SSA_NAME_IS_DEFAULT_DEF (sbound
)
4787 && TREE_CODE (SSA_NAME_VAR (sbound
)) == PARM_DECL
4788 && data
->body_includes_call
)
4789 return COSTS_N_INSNS (1);
4794 /* Determines cost of basing replacement of USE on CAND in a condition. */
4797 determine_use_iv_cost_condition (struct ivopts_data
*data
,
4798 struct iv_use
*use
, struct iv_cand
*cand
)
4800 tree bound
= NULL_TREE
;
4802 bitmap depends_on_elim
= NULL
, depends_on_express
= NULL
, depends_on
;
4803 comp_cost elim_cost
, express_cost
, cost
, bound_cost
;
4805 int elim_inv_expr_id
= -1, express_inv_expr_id
= -1, inv_expr_id
;
4806 tree
*control_var
, *bound_cst
;
4807 enum tree_code comp
= ERROR_MARK
;
4809 /* Only consider real candidates. */
4812 set_use_iv_cost (data
, use
, cand
, infinite_cost
, NULL
, NULL_TREE
,
4817 /* Try iv elimination. */
4818 if (may_eliminate_iv (data
, use
, cand
, &bound
, &comp
))
4820 elim_cost
= force_var_cost (data
, bound
, &depends_on_elim
);
4821 if (elim_cost
.cost
== 0)
4822 elim_cost
.cost
= parm_decl_cost (data
, bound
);
4823 else if (TREE_CODE (bound
) == INTEGER_CST
)
4825 /* If we replace a loop condition 'i < n' with 'p < base + n',
4826 depends_on_elim will have 'base' and 'n' set, which implies
4827 that both 'base' and 'n' will be live during the loop. More likely,
4828 'base + n' will be loop invariant, resulting in only one live value
4829 during the loop. So in that case we clear depends_on_elim and set
4830 elim_inv_expr_id instead. */
4831 if (depends_on_elim
&& bitmap_count_bits (depends_on_elim
) > 1)
4833 elim_inv_expr_id
= get_expr_id (data
, bound
);
4834 bitmap_clear (depends_on_elim
);
4836 /* The bound is a loop invariant, so it will be only computed
4838 elim_cost
.cost
= adjust_setup_cost (data
, elim_cost
.cost
);
4841 elim_cost
= infinite_cost
;
4843 /* Try expressing the original giv. If it is compared with an invariant,
4844 note that we cannot get rid of it. */
4845 ok
= extract_cond_operands (data
, use
->stmt
, &control_var
, &bound_cst
,
4849 /* When the condition is a comparison of the candidate IV against
4850 zero, prefer this IV.
4852 TODO: The constant that we're subtracting from the cost should
4853 be target-dependent. This information should be added to the
4854 target costs for each backend. */
4855 if (!infinite_cost_p (elim_cost
) /* Do not try to decrease infinite! */
4856 && integer_zerop (*bound_cst
)
4857 && (operand_equal_p (*control_var
, cand
->var_after
, 0)
4858 || operand_equal_p (*control_var
, cand
->var_before
, 0)))
4859 elim_cost
.cost
-= 1;
4861 express_cost
= get_computation_cost (data
, use
, cand
, false,
4862 &depends_on_express
, NULL
,
4863 &express_inv_expr_id
);
4864 fd_ivopts_data
= data
;
4865 walk_tree (&cmp_iv
->base
, find_depends
, &depends_on_express
, NULL
);
4867 /* Count the cost of the original bound as well. */
4868 bound_cost
= force_var_cost (data
, *bound_cst
, NULL
);
4869 if (bound_cost
.cost
== 0)
4870 bound_cost
.cost
= parm_decl_cost (data
, *bound_cst
);
4871 else if (TREE_CODE (*bound_cst
) == INTEGER_CST
)
4872 bound_cost
.cost
= 0;
4873 express_cost
.cost
+= bound_cost
.cost
;
4875 /* Choose the better approach, preferring the eliminated IV. */
4876 if (compare_costs (elim_cost
, express_cost
) <= 0)
4879 depends_on
= depends_on_elim
;
4880 depends_on_elim
= NULL
;
4881 inv_expr_id
= elim_inv_expr_id
;
4885 cost
= express_cost
;
4886 depends_on
= depends_on_express
;
4887 depends_on_express
= NULL
;
4890 inv_expr_id
= express_inv_expr_id
;
4893 set_use_iv_cost (data
, use
, cand
, cost
, depends_on
, bound
, comp
, inv_expr_id
);
4895 if (depends_on_elim
)
4896 BITMAP_FREE (depends_on_elim
);
4897 if (depends_on_express
)
4898 BITMAP_FREE (depends_on_express
);
4900 return !infinite_cost_p (cost
);
4903 /* Determines cost of basing replacement of USE on CAND. Returns false
4904 if USE cannot be based on CAND. */
4907 determine_use_iv_cost (struct ivopts_data
*data
,
4908 struct iv_use
*use
, struct iv_cand
*cand
)
4912 case USE_NONLINEAR_EXPR
:
4913 return determine_use_iv_cost_generic (data
, use
, cand
);
4916 return determine_use_iv_cost_address (data
, use
, cand
);
4919 return determine_use_iv_cost_condition (data
, use
, cand
);
4926 /* Return true if get_computation_cost indicates that autoincrement is
4927 a possibility for the pair of USE and CAND, false otherwise. */
4930 autoinc_possible_for_pair (struct ivopts_data
*data
, struct iv_use
*use
,
4931 struct iv_cand
*cand
)
4937 if (use
->type
!= USE_ADDRESS
)
4940 cost
= get_computation_cost (data
, use
, cand
, true, &depends_on
,
4941 &can_autoinc
, NULL
);
4943 BITMAP_FREE (depends_on
);
4945 return !infinite_cost_p (cost
) && can_autoinc
;
4948 /* Examine IP_ORIGINAL candidates to see if they are incremented next to a
4949 use that allows autoincrement, and set their AINC_USE if possible. */
4952 set_autoinc_for_original_candidates (struct ivopts_data
*data
)
4956 for (i
= 0; i
< n_iv_cands (data
); i
++)
4958 struct iv_cand
*cand
= iv_cand (data
, i
);
4959 struct iv_use
*closest_before
= NULL
;
4960 struct iv_use
*closest_after
= NULL
;
4961 if (cand
->pos
!= IP_ORIGINAL
)
4964 for (j
= 0; j
< n_iv_uses (data
); j
++)
4966 struct iv_use
*use
= iv_use (data
, j
);
4967 unsigned uid
= gimple_uid (use
->stmt
);
4969 if (gimple_bb (use
->stmt
) != gimple_bb (cand
->incremented_at
))
4972 if (uid
< gimple_uid (cand
->incremented_at
)
4973 && (closest_before
== NULL
4974 || uid
> gimple_uid (closest_before
->stmt
)))
4975 closest_before
= use
;
4977 if (uid
> gimple_uid (cand
->incremented_at
)
4978 && (closest_after
== NULL
4979 || uid
< gimple_uid (closest_after
->stmt
)))
4980 closest_after
= use
;
4983 if (closest_before
!= NULL
4984 && autoinc_possible_for_pair (data
, closest_before
, cand
))
4985 cand
->ainc_use
= closest_before
;
4986 else if (closest_after
!= NULL
4987 && autoinc_possible_for_pair (data
, closest_after
, cand
))
4988 cand
->ainc_use
= closest_after
;
4992 /* Finds the candidates for the induction variables. */
4995 find_iv_candidates (struct ivopts_data
*data
)
4997 /* Add commonly used ivs. */
4998 add_standard_iv_candidates (data
);
5000 /* Add old induction variables. */
5001 add_old_ivs_candidates (data
);
5003 /* Add induction variables derived from uses. */
5004 add_derived_ivs_candidates (data
);
5006 set_autoinc_for_original_candidates (data
);
5008 /* Record the important candidates. */
5009 record_important_candidates (data
);
5012 /* Determines costs of basing the use of the iv on an iv candidate. */
5015 determine_use_iv_costs (struct ivopts_data
*data
)
5019 struct iv_cand
*cand
;
5020 bitmap to_clear
= BITMAP_ALLOC (NULL
);
5022 alloc_use_cost_map (data
);
5024 for (i
= 0; i
< n_iv_uses (data
); i
++)
5026 use
= iv_use (data
, i
);
5028 if (data
->consider_all_candidates
)
5030 for (j
= 0; j
< n_iv_cands (data
); j
++)
5032 cand
= iv_cand (data
, j
);
5033 determine_use_iv_cost (data
, use
, cand
);
5040 EXECUTE_IF_SET_IN_BITMAP (use
->related_cands
, 0, j
, bi
)
5042 cand
= iv_cand (data
, j
);
5043 if (!determine_use_iv_cost (data
, use
, cand
))
5044 bitmap_set_bit (to_clear
, j
);
5047 /* Remove the candidates for that the cost is infinite from
5048 the list of related candidates. */
5049 bitmap_and_compl_into (use
->related_cands
, to_clear
);
5050 bitmap_clear (to_clear
);
5054 BITMAP_FREE (to_clear
);
5056 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5058 fprintf (dump_file
, "Use-candidate costs:\n");
5060 for (i
= 0; i
< n_iv_uses (data
); i
++)
5062 use
= iv_use (data
, i
);
5064 fprintf (dump_file
, "Use %d:\n", i
);
5065 fprintf (dump_file
, " cand\tcost\tcompl.\tdepends on\n");
5066 for (j
= 0; j
< use
->n_map_members
; j
++)
5068 if (!use
->cost_map
[j
].cand
5069 || infinite_cost_p (use
->cost_map
[j
].cost
))
5072 fprintf (dump_file
, " %d\t%d\t%d\t",
5073 use
->cost_map
[j
].cand
->id
,
5074 use
->cost_map
[j
].cost
.cost
,
5075 use
->cost_map
[j
].cost
.complexity
);
5076 if (use
->cost_map
[j
].depends_on
)
5077 bitmap_print (dump_file
,
5078 use
->cost_map
[j
].depends_on
, "","");
5079 if (use
->cost_map
[j
].inv_expr_id
!= -1)
5080 fprintf (dump_file
, " inv_expr:%d", use
->cost_map
[j
].inv_expr_id
);
5081 fprintf (dump_file
, "\n");
5084 fprintf (dump_file
, "\n");
5086 fprintf (dump_file
, "\n");
5090 /* Determines cost of the candidate CAND. */
5093 determine_iv_cost (struct ivopts_data
*data
, struct iv_cand
*cand
)
5095 comp_cost cost_base
;
5096 unsigned cost
, cost_step
;
5105 /* There are two costs associated with the candidate -- its increment
5106 and its initialization. The second is almost negligible for any loop
5107 that rolls enough, so we take it just very little into account. */
5109 base
= cand
->iv
->base
;
5110 cost_base
= force_var_cost (data
, base
, NULL
);
5111 /* It will be exceptional that the iv register happens to be initialized with
5112 the proper value at no cost. In general, there will at least be a regcopy
5114 if (cost_base
.cost
== 0)
5115 cost_base
.cost
= COSTS_N_INSNS (1);
5116 cost_step
= add_cost (data
->speed
, TYPE_MODE (TREE_TYPE (base
)));
5118 cost
= cost_step
+ adjust_setup_cost (data
, cost_base
.cost
);
5120 /* Prefer the original ivs unless we may gain something by replacing it.
5121 The reason is to make debugging simpler; so this is not relevant for
5122 artificial ivs created by other optimization passes. */
5123 if (cand
->pos
!= IP_ORIGINAL
5124 || !SSA_NAME_VAR (cand
->var_before
)
5125 || DECL_ARTIFICIAL (SSA_NAME_VAR (cand
->var_before
)))
5128 /* Prefer not to insert statements into latch unless there are some
5129 already (so that we do not create unnecessary jumps). */
5130 if (cand
->pos
== IP_END
5131 && empty_block_p (ip_end_pos (data
->current_loop
)))
5135 cand
->cost_step
= cost_step
;
5138 /* Determines costs of computation of the candidates. */
5141 determine_iv_costs (struct ivopts_data
*data
)
5145 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5147 fprintf (dump_file
, "Candidate costs:\n");
5148 fprintf (dump_file
, " cand\tcost\n");
5151 for (i
= 0; i
< n_iv_cands (data
); i
++)
5153 struct iv_cand
*cand
= iv_cand (data
, i
);
5155 determine_iv_cost (data
, cand
);
5157 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5158 fprintf (dump_file
, " %d\t%d\n", i
, cand
->cost
);
5161 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5162 fprintf (dump_file
, "\n");
5165 /* Calculates cost for having SIZE induction variables. */
5168 ivopts_global_cost_for_size (struct ivopts_data
*data
, unsigned size
)
5170 /* We add size to the cost, so that we prefer eliminating ivs
5172 return size
+ estimate_reg_pressure_cost (size
, data
->regs_used
, data
->speed
,
5173 data
->body_includes_call
);
5176 /* For each size of the induction variable set determine the penalty. */
5179 determine_set_costs (struct ivopts_data
*data
)
5185 struct loop
*loop
= data
->current_loop
;
5188 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5190 fprintf (dump_file
, "Global costs:\n");
5191 fprintf (dump_file
, " target_avail_regs %d\n", target_avail_regs
);
5192 fprintf (dump_file
, " target_clobbered_regs %d\n", target_clobbered_regs
);
5193 fprintf (dump_file
, " target_reg_cost %d\n", target_reg_cost
[data
->speed
]);
5194 fprintf (dump_file
, " target_spill_cost %d\n", target_spill_cost
[data
->speed
]);
5198 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
5201 op
= PHI_RESULT (phi
);
5203 if (virtual_operand_p (op
))
5206 if (get_iv (data
, op
))
5212 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
5214 struct version_info
*info
= ver_info (data
, j
);
5216 if (info
->inv_id
&& info
->has_nonlin_use
)
5220 data
->regs_used
= n
;
5221 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5222 fprintf (dump_file
, " regs_used %d\n", n
);
5224 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5226 fprintf (dump_file
, " cost for size:\n");
5227 fprintf (dump_file
, " ivs\tcost\n");
5228 for (j
= 0; j
<= 2 * target_avail_regs
; j
++)
5229 fprintf (dump_file
, " %d\t%d\n", j
,
5230 ivopts_global_cost_for_size (data
, j
));
5231 fprintf (dump_file
, "\n");
5235 /* Returns true if A is a cheaper cost pair than B. */
5238 cheaper_cost_pair (struct cost_pair
*a
, struct cost_pair
*b
)
5248 cmp
= compare_costs (a
->cost
, b
->cost
);
5255 /* In case the costs are the same, prefer the cheaper candidate. */
5256 if (a
->cand
->cost
< b
->cand
->cost
)
5263 /* Returns candidate by that USE is expressed in IVS. */
5265 static struct cost_pair
*
5266 iv_ca_cand_for_use (struct iv_ca
*ivs
, struct iv_use
*use
)
5268 return ivs
->cand_for_use
[use
->id
];
5271 /* Computes the cost field of IVS structure. */
5274 iv_ca_recount_cost (struct ivopts_data
*data
, struct iv_ca
*ivs
)
5276 comp_cost cost
= ivs
->cand_use_cost
;
5278 cost
.cost
+= ivs
->cand_cost
;
5280 cost
.cost
+= ivopts_global_cost_for_size (data
,
5281 ivs
->n_regs
+ ivs
->num_used_inv_expr
);
5286 /* Remove invariants in set INVS to set IVS. */
5289 iv_ca_set_remove_invariants (struct iv_ca
*ivs
, bitmap invs
)
5297 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
5299 ivs
->n_invariant_uses
[iid
]--;
5300 if (ivs
->n_invariant_uses
[iid
] == 0)
5305 /* Set USE not to be expressed by any candidate in IVS. */
5308 iv_ca_set_no_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5311 unsigned uid
= use
->id
, cid
;
5312 struct cost_pair
*cp
;
5314 cp
= ivs
->cand_for_use
[uid
];
5320 ivs
->cand_for_use
[uid
] = NULL
;
5321 ivs
->n_cand_uses
[cid
]--;
5323 if (ivs
->n_cand_uses
[cid
] == 0)
5325 bitmap_clear_bit (ivs
->cands
, cid
);
5326 /* Do not count the pseudocandidates. */
5330 ivs
->cand_cost
-= cp
->cand
->cost
;
5332 iv_ca_set_remove_invariants (ivs
, cp
->cand
->depends_on
);
5335 ivs
->cand_use_cost
= sub_costs (ivs
->cand_use_cost
, cp
->cost
);
5337 iv_ca_set_remove_invariants (ivs
, cp
->depends_on
);
5339 if (cp
->inv_expr_id
!= -1)
5341 ivs
->used_inv_expr
[cp
->inv_expr_id
]--;
5342 if (ivs
->used_inv_expr
[cp
->inv_expr_id
] == 0)
5343 ivs
->num_used_inv_expr
--;
5345 iv_ca_recount_cost (data
, ivs
);
5348 /* Add invariants in set INVS to set IVS. */
5351 iv_ca_set_add_invariants (struct iv_ca
*ivs
, bitmap invs
)
5359 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
5361 ivs
->n_invariant_uses
[iid
]++;
5362 if (ivs
->n_invariant_uses
[iid
] == 1)
5367 /* Set cost pair for USE in set IVS to CP. */
5370 iv_ca_set_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5371 struct iv_use
*use
, struct cost_pair
*cp
)
5373 unsigned uid
= use
->id
, cid
;
5375 if (ivs
->cand_for_use
[uid
] == cp
)
5378 if (ivs
->cand_for_use
[uid
])
5379 iv_ca_set_no_cp (data
, ivs
, use
);
5386 ivs
->cand_for_use
[uid
] = cp
;
5387 ivs
->n_cand_uses
[cid
]++;
5388 if (ivs
->n_cand_uses
[cid
] == 1)
5390 bitmap_set_bit (ivs
->cands
, cid
);
5391 /* Do not count the pseudocandidates. */
5395 ivs
->cand_cost
+= cp
->cand
->cost
;
5397 iv_ca_set_add_invariants (ivs
, cp
->cand
->depends_on
);
5400 ivs
->cand_use_cost
= add_costs (ivs
->cand_use_cost
, cp
->cost
);
5401 iv_ca_set_add_invariants (ivs
, cp
->depends_on
);
5403 if (cp
->inv_expr_id
!= -1)
5405 ivs
->used_inv_expr
[cp
->inv_expr_id
]++;
5406 if (ivs
->used_inv_expr
[cp
->inv_expr_id
] == 1)
5407 ivs
->num_used_inv_expr
++;
5409 iv_ca_recount_cost (data
, ivs
);
5413 /* Extend set IVS by expressing USE by some of the candidates in it
5414 if possible. Consider all important candidates if candidates in
5415 set IVS don't give any result. */
5418 iv_ca_add_use (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5421 struct cost_pair
*best_cp
= NULL
, *cp
;
5424 struct iv_cand
*cand
;
5426 gcc_assert (ivs
->upto
>= use
->id
);
5430 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
5432 cand
= iv_cand (data
, i
);
5433 cp
= get_use_iv_cost (data
, use
, cand
);
5434 if (cheaper_cost_pair (cp
, best_cp
))
5438 if (best_cp
== NULL
)
5440 EXECUTE_IF_SET_IN_BITMAP (data
->important_candidates
, 0, i
, bi
)
5442 cand
= iv_cand (data
, i
);
5443 cp
= get_use_iv_cost (data
, use
, cand
);
5444 if (cheaper_cost_pair (cp
, best_cp
))
5449 iv_ca_set_cp (data
, ivs
, use
, best_cp
);
5452 /* Get cost for assignment IVS. */
5455 iv_ca_cost (struct iv_ca
*ivs
)
5457 /* This was a conditional expression but it triggered a bug in
5460 return infinite_cost
;
5465 /* Returns true if all dependences of CP are among invariants in IVS. */
5468 iv_ca_has_deps (struct iv_ca
*ivs
, struct cost_pair
*cp
)
5473 if (!cp
->depends_on
)
5476 EXECUTE_IF_SET_IN_BITMAP (cp
->depends_on
, 0, i
, bi
)
5478 if (ivs
->n_invariant_uses
[i
] == 0)
5485 /* Creates change of expressing USE by NEW_CP instead of OLD_CP and chains
5486 it before NEXT_CHANGE. */
5488 static struct iv_ca_delta
*
5489 iv_ca_delta_add (struct iv_use
*use
, struct cost_pair
*old_cp
,
5490 struct cost_pair
*new_cp
, struct iv_ca_delta
*next_change
)
5492 struct iv_ca_delta
*change
= XNEW (struct iv_ca_delta
);
5495 change
->old_cp
= old_cp
;
5496 change
->new_cp
= new_cp
;
5497 change
->next_change
= next_change
;
5502 /* Joins two lists of changes L1 and L2. Destructive -- old lists
5505 static struct iv_ca_delta
*
5506 iv_ca_delta_join (struct iv_ca_delta
*l1
, struct iv_ca_delta
*l2
)
5508 struct iv_ca_delta
*last
;
5516 for (last
= l1
; last
->next_change
; last
= last
->next_change
)
5518 last
->next_change
= l2
;
5523 /* Reverse the list of changes DELTA, forming the inverse to it. */
5525 static struct iv_ca_delta
*
5526 iv_ca_delta_reverse (struct iv_ca_delta
*delta
)
5528 struct iv_ca_delta
*act
, *next
, *prev
= NULL
;
5529 struct cost_pair
*tmp
;
5531 for (act
= delta
; act
; act
= next
)
5533 next
= act
->next_change
;
5534 act
->next_change
= prev
;
5538 act
->old_cp
= act
->new_cp
;
5545 /* Commit changes in DELTA to IVS. If FORWARD is false, the changes are
5546 reverted instead. */
5549 iv_ca_delta_commit (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5550 struct iv_ca_delta
*delta
, bool forward
)
5552 struct cost_pair
*from
, *to
;
5553 struct iv_ca_delta
*act
;
5556 delta
= iv_ca_delta_reverse (delta
);
5558 for (act
= delta
; act
; act
= act
->next_change
)
5562 gcc_assert (iv_ca_cand_for_use (ivs
, act
->use
) == from
);
5563 iv_ca_set_cp (data
, ivs
, act
->use
, to
);
5567 iv_ca_delta_reverse (delta
);
5570 /* Returns true if CAND is used in IVS. */
5573 iv_ca_cand_used_p (struct iv_ca
*ivs
, struct iv_cand
*cand
)
5575 return ivs
->n_cand_uses
[cand
->id
] > 0;
5578 /* Returns number of induction variable candidates in the set IVS. */
5581 iv_ca_n_cands (struct iv_ca
*ivs
)
5583 return ivs
->n_cands
;
5586 /* Free the list of changes DELTA. */
5589 iv_ca_delta_free (struct iv_ca_delta
**delta
)
5591 struct iv_ca_delta
*act
, *next
;
5593 for (act
= *delta
; act
; act
= next
)
5595 next
= act
->next_change
;
5602 /* Allocates new iv candidates assignment. */
5604 static struct iv_ca
*
5605 iv_ca_new (struct ivopts_data
*data
)
5607 struct iv_ca
*nw
= XNEW (struct iv_ca
);
5611 nw
->cand_for_use
= XCNEWVEC (struct cost_pair
*, n_iv_uses (data
));
5612 nw
->n_cand_uses
= XCNEWVEC (unsigned, n_iv_cands (data
));
5613 nw
->cands
= BITMAP_ALLOC (NULL
);
5616 nw
->cand_use_cost
= no_cost
;
5618 nw
->n_invariant_uses
= XCNEWVEC (unsigned, data
->max_inv_id
+ 1);
5620 nw
->used_inv_expr
= XCNEWVEC (unsigned, data
->inv_expr_id
+ 1);
5621 nw
->num_used_inv_expr
= 0;
5626 /* Free memory occupied by the set IVS. */
5629 iv_ca_free (struct iv_ca
**ivs
)
5631 free ((*ivs
)->cand_for_use
);
5632 free ((*ivs
)->n_cand_uses
);
5633 BITMAP_FREE ((*ivs
)->cands
);
5634 free ((*ivs
)->n_invariant_uses
);
5635 free ((*ivs
)->used_inv_expr
);
5640 /* Dumps IVS to FILE. */
5643 iv_ca_dump (struct ivopts_data
*data
, FILE *file
, struct iv_ca
*ivs
)
5645 const char *pref
= " invariants ";
5647 comp_cost cost
= iv_ca_cost (ivs
);
5649 fprintf (file
, " cost: %d (complexity %d)\n", cost
.cost
, cost
.complexity
);
5650 fprintf (file
, " cand_cost: %d\n cand_use_cost: %d (complexity %d)\n",
5651 ivs
->cand_cost
, ivs
->cand_use_cost
.cost
, ivs
->cand_use_cost
.complexity
);
5652 bitmap_print (file
, ivs
->cands
, " candidates: ","\n");
5654 for (i
= 0; i
< ivs
->upto
; i
++)
5656 struct iv_use
*use
= iv_use (data
, i
);
5657 struct cost_pair
*cp
= iv_ca_cand_for_use (ivs
, use
);
5659 fprintf (file
, " use:%d --> iv_cand:%d, cost=(%d,%d)\n",
5660 use
->id
, cp
->cand
->id
, cp
->cost
.cost
, cp
->cost
.complexity
);
5662 fprintf (file
, " use:%d --> ??\n", use
->id
);
5665 for (i
= 1; i
<= data
->max_inv_id
; i
++)
5666 if (ivs
->n_invariant_uses
[i
])
5668 fprintf (file
, "%s%d", pref
, i
);
5671 fprintf (file
, "\n\n");
5674 /* Try changing candidate in IVS to CAND for each use. Return cost of the
5675 new set, and store differences in DELTA. Number of induction variables
5676 in the new set is stored to N_IVS. MIN_NCAND is a flag. When it is true
5677 the function will try to find a solution with mimimal iv candidates. */
5680 iv_ca_extend (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5681 struct iv_cand
*cand
, struct iv_ca_delta
**delta
,
5682 unsigned *n_ivs
, bool min_ncand
)
5687 struct cost_pair
*old_cp
, *new_cp
;
5690 for (i
= 0; i
< ivs
->upto
; i
++)
5692 use
= iv_use (data
, i
);
5693 old_cp
= iv_ca_cand_for_use (ivs
, use
);
5696 && old_cp
->cand
== cand
)
5699 new_cp
= get_use_iv_cost (data
, use
, cand
);
5703 if (!min_ncand
&& !iv_ca_has_deps (ivs
, new_cp
))
5706 if (!min_ncand
&& !cheaper_cost_pair (new_cp
, old_cp
))
5709 *delta
= iv_ca_delta_add (use
, old_cp
, new_cp
, *delta
);
5712 iv_ca_delta_commit (data
, ivs
, *delta
, true);
5713 cost
= iv_ca_cost (ivs
);
5715 *n_ivs
= iv_ca_n_cands (ivs
);
5716 iv_ca_delta_commit (data
, ivs
, *delta
, false);
5721 /* Try narrowing set IVS by removing CAND. Return the cost of
5722 the new set and store the differences in DELTA. START is
5723 the candidate with which we start narrowing. */
5726 iv_ca_narrow (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5727 struct iv_cand
*cand
, struct iv_cand
*start
,
5728 struct iv_ca_delta
**delta
)
5732 struct cost_pair
*old_cp
, *new_cp
, *cp
;
5734 struct iv_cand
*cnd
;
5735 comp_cost cost
, best_cost
, acost
;
5738 for (i
= 0; i
< n_iv_uses (data
); i
++)
5740 use
= iv_use (data
, i
);
5742 old_cp
= iv_ca_cand_for_use (ivs
, use
);
5743 if (old_cp
->cand
!= cand
)
5746 best_cost
= iv_ca_cost (ivs
);
5747 /* Start narrowing with START. */
5748 new_cp
= get_use_iv_cost (data
, use
, start
);
5750 if (data
->consider_all_candidates
)
5752 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, ci
, bi
)
5754 if (ci
== cand
->id
|| (start
&& ci
== start
->id
))
5757 cnd
= iv_cand (data
, ci
);
5759 cp
= get_use_iv_cost (data
, use
, cnd
);
5763 iv_ca_set_cp (data
, ivs
, use
, cp
);
5764 acost
= iv_ca_cost (ivs
);
5766 if (compare_costs (acost
, best_cost
) < 0)
5775 EXECUTE_IF_AND_IN_BITMAP (use
->related_cands
, ivs
->cands
, 0, ci
, bi
)
5777 if (ci
== cand
->id
|| (start
&& ci
== start
->id
))
5780 cnd
= iv_cand (data
, ci
);
5782 cp
= get_use_iv_cost (data
, use
, cnd
);
5786 iv_ca_set_cp (data
, ivs
, use
, cp
);
5787 acost
= iv_ca_cost (ivs
);
5789 if (compare_costs (acost
, best_cost
) < 0)
5796 /* Restore to old cp for use. */
5797 iv_ca_set_cp (data
, ivs
, use
, old_cp
);
5801 iv_ca_delta_free (delta
);
5802 return infinite_cost
;
5805 *delta
= iv_ca_delta_add (use
, old_cp
, new_cp
, *delta
);
5808 iv_ca_delta_commit (data
, ivs
, *delta
, true);
5809 cost
= iv_ca_cost (ivs
);
5810 iv_ca_delta_commit (data
, ivs
, *delta
, false);
5815 /* Try optimizing the set of candidates IVS by removing candidates different
5816 from to EXCEPT_CAND from it. Return cost of the new set, and store
5817 differences in DELTA. */
5820 iv_ca_prune (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5821 struct iv_cand
*except_cand
, struct iv_ca_delta
**delta
)
5824 struct iv_ca_delta
*act_delta
, *best_delta
;
5826 comp_cost best_cost
, acost
;
5827 struct iv_cand
*cand
;
5830 best_cost
= iv_ca_cost (ivs
);
5832 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
5834 cand
= iv_cand (data
, i
);
5836 if (cand
== except_cand
)
5839 acost
= iv_ca_narrow (data
, ivs
, cand
, except_cand
, &act_delta
);
5841 if (compare_costs (acost
, best_cost
) < 0)
5844 iv_ca_delta_free (&best_delta
);
5845 best_delta
= act_delta
;
5848 iv_ca_delta_free (&act_delta
);
5857 /* Recurse to possibly remove other unnecessary ivs. */
5858 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
5859 best_cost
= iv_ca_prune (data
, ivs
, except_cand
, delta
);
5860 iv_ca_delta_commit (data
, ivs
, best_delta
, false);
5861 *delta
= iv_ca_delta_join (best_delta
, *delta
);
5865 /* Check if CAND_IDX is a candidate other than OLD_CAND and has
5866 cheaper local cost for USE than BEST_CP. Return pointer to
5867 the corresponding cost_pair, otherwise just return BEST_CP. */
5869 static struct cost_pair
*
5870 cheaper_cost_with_cand (struct ivopts_data
*data
, struct iv_use
*use
,
5871 unsigned int cand_idx
, struct iv_cand
*old_cand
,
5872 struct cost_pair
*best_cp
)
5874 struct iv_cand
*cand
;
5875 struct cost_pair
*cp
;
5877 gcc_assert (old_cand
!= NULL
&& best_cp
!= NULL
);
5878 if (cand_idx
== old_cand
->id
)
5881 cand
= iv_cand (data
, cand_idx
);
5882 cp
= get_use_iv_cost (data
, use
, cand
);
5883 if (cp
!= NULL
&& cheaper_cost_pair (cp
, best_cp
))
5889 /* Try breaking local optimal fixed-point for IVS by replacing candidates
5890 which are used by more than one iv uses. For each of those candidates,
5891 this function tries to represent iv uses under that candidate using
5892 other ones with lower local cost, then tries to prune the new set.
5893 If the new set has lower cost, It returns the new cost after recording
5894 candidate replacement in list DELTA. */
5897 iv_ca_replace (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5898 struct iv_ca_delta
**delta
)
5900 bitmap_iterator bi
, bj
;
5901 unsigned int i
, j
, k
;
5903 struct iv_cand
*cand
;
5904 comp_cost orig_cost
, acost
;
5905 struct iv_ca_delta
*act_delta
, *tmp_delta
;
5906 struct cost_pair
*old_cp
, *best_cp
= NULL
;
5909 orig_cost
= iv_ca_cost (ivs
);
5911 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
5913 if (ivs
->n_cand_uses
[i
] == 1
5914 || ivs
->n_cand_uses
[i
] > ALWAYS_PRUNE_CAND_SET_BOUND
)
5917 cand
= iv_cand (data
, i
);
5920 /* Represent uses under current candidate using other ones with
5921 lower local cost. */
5922 for (j
= 0; j
< ivs
->upto
; j
++)
5924 use
= iv_use (data
, j
);
5925 old_cp
= iv_ca_cand_for_use (ivs
, use
);
5927 if (old_cp
->cand
!= cand
)
5931 if (data
->consider_all_candidates
)
5932 for (k
= 0; k
< n_iv_cands (data
); k
++)
5933 best_cp
= cheaper_cost_with_cand (data
, use
, k
,
5934 old_cp
->cand
, best_cp
);
5936 EXECUTE_IF_SET_IN_BITMAP (use
->related_cands
, 0, k
, bj
)
5937 best_cp
= cheaper_cost_with_cand (data
, use
, k
,
5938 old_cp
->cand
, best_cp
);
5940 if (best_cp
== old_cp
)
5943 act_delta
= iv_ca_delta_add (use
, old_cp
, best_cp
, act_delta
);
5945 /* No need for further prune. */
5949 /* Prune the new candidate set. */
5950 iv_ca_delta_commit (data
, ivs
, act_delta
, true);
5951 acost
= iv_ca_prune (data
, ivs
, NULL
, &tmp_delta
);
5952 iv_ca_delta_commit (data
, ivs
, act_delta
, false);
5953 act_delta
= iv_ca_delta_join (act_delta
, tmp_delta
);
5955 if (compare_costs (acost
, orig_cost
) < 0)
5961 iv_ca_delta_free (&act_delta
);
5967 /* Tries to extend the sets IVS in the best possible way in order
5968 to express the USE. If ORIGINALP is true, prefer candidates from
5969 the original set of IVs, otherwise favor important candidates not
5970 based on any memory object. */
5973 try_add_cand_for (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5974 struct iv_use
*use
, bool originalp
)
5976 comp_cost best_cost
, act_cost
;
5979 struct iv_cand
*cand
;
5980 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
;
5981 struct cost_pair
*cp
;
5983 iv_ca_add_use (data
, ivs
, use
);
5984 best_cost
= iv_ca_cost (ivs
);
5985 cp
= iv_ca_cand_for_use (ivs
, use
);
5988 best_delta
= iv_ca_delta_add (use
, NULL
, cp
, NULL
);
5989 iv_ca_set_no_cp (data
, ivs
, use
);
5992 /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
5993 first try important candidates not based on any memory object. Only if
5994 this fails, try the specific ones. Rationale -- in loops with many
5995 variables the best choice often is to use just one generic biv. If we
5996 added here many ivs specific to the uses, the optimization algorithm later
5997 would be likely to get stuck in a local minimum, thus causing us to create
5998 too many ivs. The approach from few ivs to more seems more likely to be
5999 successful -- starting from few ivs, replacing an expensive use by a
6000 specific iv should always be a win. */
6001 EXECUTE_IF_SET_IN_BITMAP (data
->important_candidates
, 0, i
, bi
)
6003 cand
= iv_cand (data
, i
);
6005 if (originalp
&& cand
->pos
!=IP_ORIGINAL
)
6008 if (!originalp
&& cand
->iv
->base_object
!= NULL_TREE
)
6011 if (iv_ca_cand_used_p (ivs
, cand
))
6014 cp
= get_use_iv_cost (data
, use
, cand
);
6018 iv_ca_set_cp (data
, ivs
, use
, cp
);
6019 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
,
6021 iv_ca_set_no_cp (data
, ivs
, use
);
6022 act_delta
= iv_ca_delta_add (use
, NULL
, cp
, act_delta
);
6024 if (compare_costs (act_cost
, best_cost
) < 0)
6026 best_cost
= act_cost
;
6028 iv_ca_delta_free (&best_delta
);
6029 best_delta
= act_delta
;
6032 iv_ca_delta_free (&act_delta
);
6035 if (infinite_cost_p (best_cost
))
6037 for (i
= 0; i
< use
->n_map_members
; i
++)
6039 cp
= use
->cost_map
+ i
;
6044 /* Already tried this. */
6045 if (cand
->important
)
6047 if (originalp
&& cand
->pos
== IP_ORIGINAL
)
6049 if (!originalp
&& cand
->iv
->base_object
== NULL_TREE
)
6053 if (iv_ca_cand_used_p (ivs
, cand
))
6057 iv_ca_set_cp (data
, ivs
, use
, cp
);
6058 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
, true);
6059 iv_ca_set_no_cp (data
, ivs
, use
);
6060 act_delta
= iv_ca_delta_add (use
, iv_ca_cand_for_use (ivs
, use
),
6063 if (compare_costs (act_cost
, best_cost
) < 0)
6065 best_cost
= act_cost
;
6068 iv_ca_delta_free (&best_delta
);
6069 best_delta
= act_delta
;
6072 iv_ca_delta_free (&act_delta
);
6076 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
6077 iv_ca_delta_free (&best_delta
);
6079 return !infinite_cost_p (best_cost
);
6082 /* Finds an initial assignment of candidates to uses. */
6084 static struct iv_ca
*
6085 get_initial_solution (struct ivopts_data
*data
, bool originalp
)
6087 struct iv_ca
*ivs
= iv_ca_new (data
);
6090 for (i
= 0; i
< n_iv_uses (data
); i
++)
6091 if (!try_add_cand_for (data
, ivs
, iv_use (data
, i
), originalp
))
6100 /* Tries to improve set of induction variables IVS. TRY_REPLACE_P
6101 points to a bool variable, this function tries to break local
6102 optimal fixed-point by replacing candidates in IVS if it's true. */
6105 try_improve_iv_set (struct ivopts_data
*data
,
6106 struct iv_ca
*ivs
, bool *try_replace_p
)
6109 comp_cost acost
, best_cost
= iv_ca_cost (ivs
);
6110 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
, *tmp_delta
;
6111 struct iv_cand
*cand
;
6113 /* Try extending the set of induction variables by one. */
6114 for (i
= 0; i
< n_iv_cands (data
); i
++)
6116 cand
= iv_cand (data
, i
);
6118 if (iv_ca_cand_used_p (ivs
, cand
))
6121 acost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, &n_ivs
, false);
6125 /* If we successfully added the candidate and the set is small enough,
6126 try optimizing it by removing other candidates. */
6127 if (n_ivs
<= ALWAYS_PRUNE_CAND_SET_BOUND
)
6129 iv_ca_delta_commit (data
, ivs
, act_delta
, true);
6130 acost
= iv_ca_prune (data
, ivs
, cand
, &tmp_delta
);
6131 iv_ca_delta_commit (data
, ivs
, act_delta
, false);
6132 act_delta
= iv_ca_delta_join (act_delta
, tmp_delta
);
6135 if (compare_costs (acost
, best_cost
) < 0)
6138 iv_ca_delta_free (&best_delta
);
6139 best_delta
= act_delta
;
6142 iv_ca_delta_free (&act_delta
);
6147 /* Try removing the candidates from the set instead. */
6148 best_cost
= iv_ca_prune (data
, ivs
, NULL
, &best_delta
);
6150 if (!best_delta
&& *try_replace_p
)
6152 *try_replace_p
= false;
6153 /* So far candidate selecting algorithm tends to choose fewer IVs
6154 so that it can handle cases in which loops have many variables
6155 but the best choice is often to use only one general biv. One
6156 weakness is it can't handle opposite cases, in which different
6157 candidates should be chosen with respect to each use. To solve
6158 the problem, we replace candidates in a manner described by the
6159 comments of iv_ca_replace, thus give general algorithm a chance
6160 to break local optimal fixed-point in these cases. */
6161 best_cost
= iv_ca_replace (data
, ivs
, &best_delta
);
6168 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
6169 gcc_assert (compare_costs (best_cost
, iv_ca_cost (ivs
)) == 0);
6170 iv_ca_delta_free (&best_delta
);
6174 /* Attempts to find the optimal set of induction variables. We do simple
6175 greedy heuristic -- we try to replace at most one candidate in the selected
6176 solution and remove the unused ivs while this improves the cost. */
6178 static struct iv_ca
*
6179 find_optimal_iv_set_1 (struct ivopts_data
*data
, bool originalp
)
6182 bool try_replace_p
= true;
6184 /* Get the initial solution. */
6185 set
= get_initial_solution (data
, originalp
);
6188 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6189 fprintf (dump_file
, "Unable to substitute for ivs, failed.\n");
6193 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6195 fprintf (dump_file
, "Initial set of candidates:\n");
6196 iv_ca_dump (data
, dump_file
, set
);
6199 while (try_improve_iv_set (data
, set
, &try_replace_p
))
6201 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6203 fprintf (dump_file
, "Improved to:\n");
6204 iv_ca_dump (data
, dump_file
, set
);
6211 static struct iv_ca
*
6212 find_optimal_iv_set (struct ivopts_data
*data
)
6215 struct iv_ca
*set
, *origset
;
6217 comp_cost cost
, origcost
;
6219 /* Determine the cost based on a strategy that starts with original IVs,
6220 and try again using a strategy that prefers candidates not based
6222 origset
= find_optimal_iv_set_1 (data
, true);
6223 set
= find_optimal_iv_set_1 (data
, false);
6225 if (!origset
&& !set
)
6228 origcost
= origset
? iv_ca_cost (origset
) : infinite_cost
;
6229 cost
= set
? iv_ca_cost (set
) : infinite_cost
;
6231 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6233 fprintf (dump_file
, "Original cost %d (complexity %d)\n\n",
6234 origcost
.cost
, origcost
.complexity
);
6235 fprintf (dump_file
, "Final cost %d (complexity %d)\n\n",
6236 cost
.cost
, cost
.complexity
);
6239 /* Choose the one with the best cost. */
6240 if (compare_costs (origcost
, cost
) <= 0)
6247 iv_ca_free (&origset
);
6249 for (i
= 0; i
< n_iv_uses (data
); i
++)
6251 use
= iv_use (data
, i
);
6252 use
->selected
= iv_ca_cand_for_use (set
, use
)->cand
;
6258 /* Creates a new induction variable corresponding to CAND. */
6261 create_new_iv (struct ivopts_data
*data
, struct iv_cand
*cand
)
6263 gimple_stmt_iterator incr_pos
;
6273 incr_pos
= gsi_last_bb (ip_normal_pos (data
->current_loop
));
6277 incr_pos
= gsi_last_bb (ip_end_pos (data
->current_loop
));
6285 incr_pos
= gsi_for_stmt (cand
->incremented_at
);
6289 /* Mark that the iv is preserved. */
6290 name_info (data
, cand
->var_before
)->preserve_biv
= true;
6291 name_info (data
, cand
->var_after
)->preserve_biv
= true;
6293 /* Rewrite the increment so that it uses var_before directly. */
6294 find_interesting_uses_op (data
, cand
->var_after
)->selected
= cand
;
6298 gimple_add_tmp_var (cand
->var_before
);
6300 base
= unshare_expr (cand
->iv
->base
);
6302 create_iv (base
, unshare_expr (cand
->iv
->step
),
6303 cand
->var_before
, data
->current_loop
,
6304 &incr_pos
, after
, &cand
->var_before
, &cand
->var_after
);
6307 /* Creates new induction variables described in SET. */
6310 create_new_ivs (struct ivopts_data
*data
, struct iv_ca
*set
)
6313 struct iv_cand
*cand
;
6316 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
6318 cand
= iv_cand (data
, i
);
6319 create_new_iv (data
, cand
);
6322 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6324 fprintf (dump_file
, "\nSelected IV set: \n");
6325 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
6327 cand
= iv_cand (data
, i
);
6328 dump_cand (dump_file
, cand
);
6330 fprintf (dump_file
, "\n");
6334 /* Rewrites USE (definition of iv used in a nonlinear expression)
6335 using candidate CAND. */
6338 rewrite_use_nonlinear_expr (struct ivopts_data
*data
,
6339 struct iv_use
*use
, struct iv_cand
*cand
)
6344 gimple_stmt_iterator bsi
;
6346 /* An important special case -- if we are asked to express value of
6347 the original iv by itself, just exit; there is no need to
6348 introduce a new computation (that might also need casting the
6349 variable to unsigned and back). */
6350 if (cand
->pos
== IP_ORIGINAL
6351 && cand
->incremented_at
== use
->stmt
)
6353 enum tree_code stmt_code
;
6355 gcc_assert (is_gimple_assign (use
->stmt
));
6356 gcc_assert (gimple_assign_lhs (use
->stmt
) == cand
->var_after
);
6358 /* Check whether we may leave the computation unchanged.
6359 This is the case only if it does not rely on other
6360 computations in the loop -- otherwise, the computation
6361 we rely upon may be removed in remove_unused_ivs,
6362 thus leading to ICE. */
6363 stmt_code
= gimple_assign_rhs_code (use
->stmt
);
6364 if (stmt_code
== PLUS_EXPR
6365 || stmt_code
== MINUS_EXPR
6366 || stmt_code
== POINTER_PLUS_EXPR
)
6368 if (gimple_assign_rhs1 (use
->stmt
) == cand
->var_before
)
6369 op
= gimple_assign_rhs2 (use
->stmt
);
6370 else if (gimple_assign_rhs2 (use
->stmt
) == cand
->var_before
)
6371 op
= gimple_assign_rhs1 (use
->stmt
);
6378 if (op
&& expr_invariant_in_loop_p (data
->current_loop
, op
))
6382 comp
= get_computation (data
->current_loop
, use
, cand
);
6383 gcc_assert (comp
!= NULL_TREE
);
6385 switch (gimple_code (use
->stmt
))
6388 tgt
= PHI_RESULT (use
->stmt
);
6390 /* If we should keep the biv, do not replace it. */
6391 if (name_info (data
, tgt
)->preserve_biv
)
6394 bsi
= gsi_after_labels (gimple_bb (use
->stmt
));
6398 tgt
= gimple_assign_lhs (use
->stmt
);
6399 bsi
= gsi_for_stmt (use
->stmt
);
6406 if (!valid_gimple_rhs_p (comp
)
6407 || (gimple_code (use
->stmt
) != GIMPLE_PHI
6408 /* We can't allow re-allocating the stmt as it might be pointed
6410 && (get_gimple_rhs_num_ops (TREE_CODE (comp
))
6411 >= gimple_num_ops (gsi_stmt (bsi
)))))
6413 comp
= force_gimple_operand_gsi (&bsi
, comp
, true, NULL_TREE
,
6414 true, GSI_SAME_STMT
);
6415 if (POINTER_TYPE_P (TREE_TYPE (tgt
)))
6417 duplicate_ssa_name_ptr_info (comp
, SSA_NAME_PTR_INFO (tgt
));
6418 /* As this isn't a plain copy we have to reset alignment
6420 if (SSA_NAME_PTR_INFO (comp
))
6421 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (comp
));
6425 if (gimple_code (use
->stmt
) == GIMPLE_PHI
)
6427 ass
= gimple_build_assign (tgt
, comp
);
6428 gsi_insert_before (&bsi
, ass
, GSI_SAME_STMT
);
6430 bsi
= gsi_for_stmt (use
->stmt
);
6431 remove_phi_node (&bsi
, false);
6435 gimple_assign_set_rhs_from_tree (&bsi
, comp
);
6436 use
->stmt
= gsi_stmt (bsi
);
6440 /* Performs a peephole optimization to reorder the iv update statement with
6441 a mem ref to enable instruction combining in later phases. The mem ref uses
6442 the iv value before the update, so the reordering transformation requires
6443 adjustment of the offset. CAND is the selected IV_CAND.
6447 t = MEM_REF (base, iv1, 8, 16); // base, index, stride, offset
6455 directly propagating t over to (1) will introduce overlapping live range
6456 thus increase register pressure. This peephole transform it into:
6460 t = MEM_REF (base, iv2, 8, 8);
6467 adjust_iv_update_pos (struct iv_cand
*cand
, struct iv_use
*use
)
6470 gimple iv_update
, stmt
;
6472 gimple_stmt_iterator gsi
, gsi_iv
;
6474 if (cand
->pos
!= IP_NORMAL
)
6477 var_after
= cand
->var_after
;
6478 iv_update
= SSA_NAME_DEF_STMT (var_after
);
6480 bb
= gimple_bb (iv_update
);
6481 gsi
= gsi_last_nondebug_bb (bb
);
6482 stmt
= gsi_stmt (gsi
);
6484 /* Only handle conditional statement for now. */
6485 if (gimple_code (stmt
) != GIMPLE_COND
)
6488 gsi_prev_nondebug (&gsi
);
6489 stmt
= gsi_stmt (gsi
);
6490 if (stmt
!= iv_update
)
6493 gsi_prev_nondebug (&gsi
);
6494 if (gsi_end_p (gsi
))
6497 stmt
= gsi_stmt (gsi
);
6498 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
6501 if (stmt
!= use
->stmt
)
6504 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
6507 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6509 fprintf (dump_file
, "Reordering \n");
6510 print_gimple_stmt (dump_file
, iv_update
, 0, 0);
6511 print_gimple_stmt (dump_file
, use
->stmt
, 0, 0);
6512 fprintf (dump_file
, "\n");
6515 gsi
= gsi_for_stmt (use
->stmt
);
6516 gsi_iv
= gsi_for_stmt (iv_update
);
6517 gsi_move_before (&gsi_iv
, &gsi
);
6519 cand
->pos
= IP_BEFORE_USE
;
6520 cand
->incremented_at
= use
->stmt
;
6523 /* Rewrites USE (address that is an iv) using candidate CAND. */
6526 rewrite_use_address (struct ivopts_data
*data
,
6527 struct iv_use
*use
, struct iv_cand
*cand
)
6530 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
6531 tree base_hint
= NULL_TREE
;
6535 adjust_iv_update_pos (cand
, use
);
6536 ok
= get_computation_aff (data
->current_loop
, use
, cand
, use
->stmt
, &aff
);
6538 unshare_aff_combination (&aff
);
6540 /* To avoid undefined overflow problems, all IV candidates use unsigned
6541 integer types. The drawback is that this makes it impossible for
6542 create_mem_ref to distinguish an IV that is based on a memory object
6543 from one that represents simply an offset.
6545 To work around this problem, we pass a hint to create_mem_ref that
6546 indicates which variable (if any) in aff is an IV based on a memory
6547 object. Note that we only consider the candidate. If this is not
6548 based on an object, the base of the reference is in some subexpression
6549 of the use -- but these will use pointer types, so they are recognized
6550 by the create_mem_ref heuristics anyway. */
6551 if (cand
->iv
->base_object
)
6552 base_hint
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6554 iv
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6555 ref
= create_mem_ref (&bsi
, TREE_TYPE (*use
->op_p
), &aff
,
6556 reference_alias_ptr_type (*use
->op_p
),
6557 iv
, base_hint
, data
->speed
);
6558 copy_ref_info (ref
, *use
->op_p
);
6562 /* Rewrites USE (the condition such that one of the arguments is an iv) using
6566 rewrite_use_compare (struct ivopts_data
*data
,
6567 struct iv_use
*use
, struct iv_cand
*cand
)
6569 tree comp
, *var_p
, op
, bound
;
6570 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
6571 enum tree_code compare
;
6572 struct cost_pair
*cp
= get_use_iv_cost (data
, use
, cand
);
6578 tree var
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
6579 tree var_type
= TREE_TYPE (var
);
6582 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6584 fprintf (dump_file
, "Replacing exit test: ");
6585 print_gimple_stmt (dump_file
, use
->stmt
, 0, TDF_SLIM
);
6588 bound
= unshare_expr (fold_convert (var_type
, bound
));
6589 op
= force_gimple_operand (bound
, &stmts
, true, NULL_TREE
);
6591 gsi_insert_seq_on_edge_immediate (
6592 loop_preheader_edge (data
->current_loop
),
6595 gcond
*cond_stmt
= as_a
<gcond
*> (use
->stmt
);
6596 gimple_cond_set_lhs (cond_stmt
, var
);
6597 gimple_cond_set_code (cond_stmt
, compare
);
6598 gimple_cond_set_rhs (cond_stmt
, op
);
6602 /* The induction variable elimination failed; just express the original
6604 comp
= get_computation (data
->current_loop
, use
, cand
);
6605 gcc_assert (comp
!= NULL_TREE
);
6607 ok
= extract_cond_operands (data
, use
->stmt
, &var_p
, NULL
, NULL
, NULL
);
6610 *var_p
= force_gimple_operand_gsi (&bsi
, comp
, true, SSA_NAME_VAR (*var_p
),
6611 true, GSI_SAME_STMT
);
6614 /* Rewrites USE using candidate CAND. */
6617 rewrite_use (struct ivopts_data
*data
, struct iv_use
*use
, struct iv_cand
*cand
)
6621 case USE_NONLINEAR_EXPR
:
6622 rewrite_use_nonlinear_expr (data
, use
, cand
);
6626 rewrite_use_address (data
, use
, cand
);
6630 rewrite_use_compare (data
, use
, cand
);
6637 update_stmt (use
->stmt
);
6640 /* Rewrite the uses using the selected induction variables. */
6643 rewrite_uses (struct ivopts_data
*data
)
6646 struct iv_cand
*cand
;
6649 for (i
= 0; i
< n_iv_uses (data
); i
++)
6651 use
= iv_use (data
, i
);
6652 cand
= use
->selected
;
6655 rewrite_use (data
, use
, cand
);
6659 /* Removes the ivs that are not used after rewriting. */
6662 remove_unused_ivs (struct ivopts_data
*data
)
6666 bitmap toremove
= BITMAP_ALLOC (NULL
);
6668 /* Figure out an order in which to release SSA DEFs so that we don't
6669 release something that we'd have to propagate into a debug stmt
6671 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
6673 struct version_info
*info
;
6675 info
= ver_info (data
, j
);
6677 && !integer_zerop (info
->iv
->step
)
6679 && !info
->iv
->have_use_for
6680 && !info
->preserve_biv
)
6682 bitmap_set_bit (toremove
, SSA_NAME_VERSION (info
->iv
->ssa_name
));
6684 tree def
= info
->iv
->ssa_name
;
6686 if (MAY_HAVE_DEBUG_STMTS
&& SSA_NAME_DEF_STMT (def
))
6688 imm_use_iterator imm_iter
;
6689 use_operand_p use_p
;
6693 FOR_EACH_IMM_USE_STMT (stmt
, imm_iter
, def
)
6695 if (!gimple_debug_bind_p (stmt
))
6698 /* We just want to determine whether to do nothing
6699 (count == 0), to substitute the computed
6700 expression into a single use of the SSA DEF by
6701 itself (count == 1), or to use a debug temp
6702 because the SSA DEF is used multiple times or as
6703 part of a larger expression (count > 1). */
6705 if (gimple_debug_bind_get_value (stmt
) != def
)
6709 BREAK_FROM_IMM_USE_STMT (imm_iter
);
6715 struct iv_use dummy_use
;
6716 struct iv_cand
*best_cand
= NULL
, *cand
;
6717 unsigned i
, best_pref
= 0, cand_pref
;
6719 memset (&dummy_use
, 0, sizeof (dummy_use
));
6720 dummy_use
.iv
= info
->iv
;
6721 for (i
= 0; i
< n_iv_uses (data
) && i
< 64; i
++)
6723 cand
= iv_use (data
, i
)->selected
;
6724 if (cand
== best_cand
)
6726 cand_pref
= operand_equal_p (cand
->iv
->step
,
6730 += TYPE_MODE (TREE_TYPE (cand
->iv
->base
))
6731 == TYPE_MODE (TREE_TYPE (info
->iv
->base
))
6734 += TREE_CODE (cand
->iv
->base
) == INTEGER_CST
6736 if (best_cand
== NULL
|| best_pref
< cand_pref
)
6739 best_pref
= cand_pref
;
6746 tree comp
= get_computation_at (data
->current_loop
,
6747 &dummy_use
, best_cand
,
6748 SSA_NAME_DEF_STMT (def
));
6754 tree vexpr
= make_node (DEBUG_EXPR_DECL
);
6755 DECL_ARTIFICIAL (vexpr
) = 1;
6756 TREE_TYPE (vexpr
) = TREE_TYPE (comp
);
6757 if (SSA_NAME_VAR (def
))
6758 DECL_MODE (vexpr
) = DECL_MODE (SSA_NAME_VAR (def
));
6760 DECL_MODE (vexpr
) = TYPE_MODE (TREE_TYPE (vexpr
));
6762 = gimple_build_debug_bind (vexpr
, comp
, NULL
);
6763 gimple_stmt_iterator gsi
;
6765 if (gimple_code (SSA_NAME_DEF_STMT (def
)) == GIMPLE_PHI
)
6766 gsi
= gsi_after_labels (gimple_bb
6767 (SSA_NAME_DEF_STMT (def
)));
6769 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (def
));
6771 gsi_insert_before (&gsi
, def_temp
, GSI_SAME_STMT
);
6775 FOR_EACH_IMM_USE_STMT (stmt
, imm_iter
, def
)
6777 if (!gimple_debug_bind_p (stmt
))
6780 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
6781 SET_USE (use_p
, comp
);
6789 release_defs_bitset (toremove
);
6791 BITMAP_FREE (toremove
);
6794 /* Frees memory occupied by struct tree_niter_desc in *VALUE. Callback
6795 for hash_map::traverse. */
6798 free_tree_niter_desc (edge
const &, tree_niter_desc
*const &value
, void *)
6804 /* Frees data allocated by the optimization of a single loop. */
6807 free_loop_data (struct ivopts_data
*data
)
6815 data
->niters
->traverse
<void *, free_tree_niter_desc
> (NULL
);
6816 delete data
->niters
;
6817 data
->niters
= NULL
;
6820 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
6822 struct version_info
*info
;
6824 info
= ver_info (data
, i
);
6827 info
->has_nonlin_use
= false;
6828 info
->preserve_biv
= false;
6831 bitmap_clear (data
->relevant
);
6832 bitmap_clear (data
->important_candidates
);
6834 for (i
= 0; i
< n_iv_uses (data
); i
++)
6836 struct iv_use
*use
= iv_use (data
, i
);
6839 BITMAP_FREE (use
->related_cands
);
6840 for (j
= 0; j
< use
->n_map_members
; j
++)
6841 if (use
->cost_map
[j
].depends_on
)
6842 BITMAP_FREE (use
->cost_map
[j
].depends_on
);
6843 free (use
->cost_map
);
6846 data
->iv_uses
.truncate (0);
6848 for (i
= 0; i
< n_iv_cands (data
); i
++)
6850 struct iv_cand
*cand
= iv_cand (data
, i
);
6853 if (cand
->depends_on
)
6854 BITMAP_FREE (cand
->depends_on
);
6857 data
->iv_candidates
.truncate (0);
6859 if (data
->version_info_size
< num_ssa_names
)
6861 data
->version_info_size
= 2 * num_ssa_names
;
6862 free (data
->version_info
);
6863 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
6866 data
->max_inv_id
= 0;
6868 FOR_EACH_VEC_ELT (decl_rtl_to_reset
, i
, obj
)
6869 SET_DECL_RTL (obj
, NULL_RTX
);
6871 decl_rtl_to_reset
.truncate (0);
6873 data
->inv_expr_tab
->empty ();
6874 data
->inv_expr_id
= 0;
6877 /* Finalizes data structures used by the iv optimization pass. LOOPS is the
6881 tree_ssa_iv_optimize_finalize (struct ivopts_data
*data
)
6883 free_loop_data (data
);
6884 free (data
->version_info
);
6885 BITMAP_FREE (data
->relevant
);
6886 BITMAP_FREE (data
->important_candidates
);
6888 decl_rtl_to_reset
.release ();
6889 data
->iv_uses
.release ();
6890 data
->iv_candidates
.release ();
6891 delete data
->inv_expr_tab
;
6892 data
->inv_expr_tab
= NULL
;
6893 free_affine_expand_cache (&data
->name_expansion_cache
);
6896 /* Returns true if the loop body BODY includes any function calls. */
6899 loop_body_includes_call (basic_block
*body
, unsigned num_nodes
)
6901 gimple_stmt_iterator gsi
;
6904 for (i
= 0; i
< num_nodes
; i
++)
6905 for (gsi
= gsi_start_bb (body
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
6907 gimple stmt
= gsi_stmt (gsi
);
6908 if (is_gimple_call (stmt
)
6909 && !is_inexpensive_builtin (gimple_call_fndecl (stmt
)))
6915 /* Optimizes the LOOP. Returns true if anything changed. */
6918 tree_ssa_iv_optimize_loop (struct ivopts_data
*data
, struct loop
*loop
)
6920 bool changed
= false;
6921 struct iv_ca
*iv_ca
;
6922 edge exit
= single_dom_exit (loop
);
6925 gcc_assert (!data
->niters
);
6926 data
->current_loop
= loop
;
6927 data
->speed
= optimize_loop_for_speed_p (loop
);
6929 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6931 fprintf (dump_file
, "Processing loop %d\n", loop
->num
);
6935 fprintf (dump_file
, " single exit %d -> %d, exit condition ",
6936 exit
->src
->index
, exit
->dest
->index
);
6937 print_gimple_stmt (dump_file
, last_stmt (exit
->src
), 0, TDF_SLIM
);
6938 fprintf (dump_file
, "\n");
6941 fprintf (dump_file
, "\n");
6944 body
= get_loop_body (loop
);
6945 data
->body_includes_call
= loop_body_includes_call (body
, loop
->num_nodes
);
6946 renumber_gimple_stmt_uids_in_blocks (body
, loop
->num_nodes
);
6949 data
->loop_single_exit_p
= exit
!= NULL
&& loop_only_exit_p (loop
, exit
);
6951 /* For each ssa name determines whether it behaves as an induction variable
6953 if (!find_induction_variables (data
))
6956 /* Finds interesting uses (item 1). */
6957 find_interesting_uses (data
);
6958 if (n_iv_uses (data
) > MAX_CONSIDERED_USES
)
6961 /* Finds candidates for the induction variables (item 2). */
6962 find_iv_candidates (data
);
6964 /* Calculates the costs (item 3, part 1). */
6965 determine_iv_costs (data
);
6966 determine_use_iv_costs (data
);
6967 determine_set_costs (data
);
6969 /* Find the optimal set of induction variables (item 3, part 2). */
6970 iv_ca
= find_optimal_iv_set (data
);
6975 /* Create the new induction variables (item 4, part 1). */
6976 create_new_ivs (data
, iv_ca
);
6977 iv_ca_free (&iv_ca
);
6979 /* Rewrite the uses (item 4, part 2). */
6980 rewrite_uses (data
);
6982 /* Remove the ivs that are unused after rewriting. */
6983 remove_unused_ivs (data
);
6985 /* We have changed the structure of induction variables; it might happen
6986 that definitions in the scev database refer to some of them that were
6991 free_loop_data (data
);
6996 /* Main entry point. Optimizes induction variables in loops. */
6999 tree_ssa_iv_optimize (void)
7002 struct ivopts_data data
;
7004 tree_ssa_iv_optimize_init (&data
);
7006 /* Optimize the loops starting with the innermost ones. */
7007 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
7009 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7010 flow_loop_dump (loop
, dump_file
, NULL
, 1);
7012 tree_ssa_iv_optimize_loop (&data
, loop
);
7015 tree_ssa_iv_optimize_finalize (&data
);