1 /* Induction variable optimizations.
2 Copyright (C) 2003-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass tries to find the optimal set of induction variables for the loop.
21 It optimizes just the basic linear induction variables (although adding
22 support for other types should not be too hard). It includes the
23 optimizations commonly known as strength reduction, induction variable
24 coalescing and induction variable elimination. It does it in the
27 1) The interesting uses of induction variables are found. This includes
29 -- uses of induction variables in non-linear expressions
30 -- addresses of arrays
31 -- comparisons of induction variables
33 Note the interesting uses are categorized and handled in group.
34 Generally, address type uses are grouped together if their iv bases
35 are different in constant offset.
37 2) Candidates for the induction variables are found. This includes
39 -- old induction variables
40 -- the variables defined by expressions derived from the "interesting
43 3) The optimal (w.r. to a cost function) set of variables is chosen. The
44 cost function assigns a cost to sets of induction variables and consists
47 -- The group/use costs. Each of the interesting groups/uses chooses
48 the best induction variable in the set and adds its cost to the sum.
49 The cost reflects the time spent on modifying the induction variables
50 value to be usable for the given purpose (adding base and offset for
52 -- The variable costs. Each of the variables has a cost assigned that
53 reflects the costs associated with incrementing the value of the
54 variable. The original variables are somewhat preferred.
55 -- The set cost. Depending on the size of the set, extra cost may be
56 added to reflect register pressure.
58 All the costs are defined in a machine-specific way, using the target
59 hooks and machine descriptions to determine them.
61 4) The trees are transformed to use the new variables, the dead code is
64 All of this is done loop by loop. Doing it globally is theoretically
65 possible, it might give a better performance and it might enable us
66 to decide costs more precisely, but getting all the interactions right
69 For the targets supporting low-overhead loops, IVOPTs has to take care of
70 the loops which will probably be transformed in RTL doloop optimization,
71 to try to make selected IV candidate set optimal. The process of doloop
74 1) Analyze the current loop will be transformed to doloop or not, find and
75 mark its compare type IV use as doloop use (iv_group field doloop_p), and
76 set flag doloop_use_p of ivopts_data to notify subsequent processings on
77 doloop. See analyze_and_mark_doloop_use and its callees for the details.
78 The target hook predict_doloop_p can be used for target specific checks.
80 2) Add one doloop dedicated IV cand {(may_be_zero ? 1 : (niter + 1)), +, -1},
81 set flag doloop_p of iv_cand, step cost is set as zero and no extra cost
82 like biv. For cost determination between doloop IV cand and IV use, the
83 target hooks doloop_cost_for_generic and doloop_cost_for_address are
84 provided to add on extra costs for generic type and address type IV use.
85 Zero cost is assigned to the pair between doloop IV cand and doloop IV
86 use, and bound zero is set for IV elimination.
88 3) With the cost setting in step 2), the current cost model based IV
89 selection algorithm will process as usual, pick up doloop dedicated IV if
94 #include "coretypes.h"
100 #include "tree-pass.h"
101 #include "memmodel.h"
105 #include "insn-config.h"
106 #include "emit-rtl.h"
109 #include "gimple-pretty-print.h"
111 #include "fold-const.h"
112 #include "stor-layout.h"
114 #include "gimplify.h"
115 #include "gimple-iterator.h"
116 #include "gimplify-me.h"
117 #include "tree-cfg.h"
118 #include "tree-ssa-loop-ivopts.h"
119 #include "tree-ssa-loop-manip.h"
120 #include "tree-ssa-loop-niter.h"
121 #include "tree-ssa-loop.h"
124 #include "tree-dfa.h"
125 #include "tree-ssa.h"
127 #include "tree-scalar-evolution.h"
128 #include "tree-affine.h"
129 #include "tree-ssa-propagate.h"
130 #include "tree-ssa-address.h"
131 #include "builtins.h"
132 #include "tree-vectorizer.h"
135 /* For lang_hooks.types.type_for_mode. */
136 #include "langhooks.h"
138 /* FIXME: Expressions are expanded to RTL in this pass to determine the
139 cost of different addressing modes. This should be moved to a TBD
140 interface between the GIMPLE and RTL worlds. */
142 /* The infinite cost. */
143 #define INFTY 1000000000
145 /* Returns the expected number of loop iterations for LOOP.
146 The average trip count is computed from profile data if it
149 static inline HOST_WIDE_INT
150 avg_loop_niter (class loop
*loop
)
152 HOST_WIDE_INT niter
= estimated_stmt_executions_int (loop
);
155 niter
= likely_max_stmt_executions_int (loop
);
157 if (niter
== -1 || niter
> param_avg_loop_niter
)
158 return param_avg_loop_niter
;
166 /* Representation of the induction variable. */
169 tree base
; /* Initial value of the iv. */
170 tree base_object
; /* A memory object to that the induction variable points. */
171 tree step
; /* Step of the iv (constant only). */
172 tree ssa_name
; /* The ssa name with the value. */
173 struct iv_use
*nonlin_use
; /* The identifier in the use if it is the case. */
174 bool biv_p
; /* Is it a biv? */
175 bool no_overflow
; /* True if the iv doesn't overflow. */
176 bool have_address_use
;/* For biv, indicate if it's used in any address
180 /* Per-ssa version information (induction variable descriptions, etc.). */
183 tree name
; /* The ssa name. */
184 struct iv
*iv
; /* Induction variable description. */
185 bool has_nonlin_use
; /* For a loop-level invariant, whether it is used in
186 an expression that is not an induction variable. */
187 bool preserve_biv
; /* For the original biv, whether to preserve it. */
188 unsigned inv_id
; /* Id of an invariant. */
194 USE_NONLINEAR_EXPR
, /* Use in a nonlinear expression. */
195 USE_REF_ADDRESS
, /* Use is an address for an explicit memory
197 USE_PTR_ADDRESS
, /* Use is a pointer argument to a function in
198 cases where the expansion of the function
199 will turn the argument into a normal address. */
200 USE_COMPARE
/* Use is a compare. */
203 /* Cost of a computation. */
207 comp_cost (): cost (0), complexity (0), scratch (0)
210 comp_cost (int64_t cost
, unsigned complexity
, int64_t scratch
= 0)
211 : cost (cost
), complexity (complexity
), scratch (scratch
)
214 /* Returns true if COST is infinite. */
215 bool infinite_cost_p ();
217 /* Adds costs COST1 and COST2. */
218 friend comp_cost
operator+ (comp_cost cost1
, comp_cost cost2
);
220 /* Adds COST to the comp_cost. */
221 comp_cost
operator+= (comp_cost cost
);
223 /* Adds constant C to this comp_cost. */
224 comp_cost
operator+= (HOST_WIDE_INT c
);
226 /* Subtracts constant C to this comp_cost. */
227 comp_cost
operator-= (HOST_WIDE_INT c
);
229 /* Divide the comp_cost by constant C. */
230 comp_cost
operator/= (HOST_WIDE_INT c
);
232 /* Multiply the comp_cost by constant C. */
233 comp_cost
operator*= (HOST_WIDE_INT c
);
235 /* Subtracts costs COST1 and COST2. */
236 friend comp_cost
operator- (comp_cost cost1
, comp_cost cost2
);
238 /* Subtracts COST from this comp_cost. */
239 comp_cost
operator-= (comp_cost cost
);
241 /* Returns true if COST1 is smaller than COST2. */
242 friend bool operator< (comp_cost cost1
, comp_cost cost2
);
244 /* Returns true if COST1 and COST2 are equal. */
245 friend bool operator== (comp_cost cost1
, comp_cost cost2
);
247 /* Returns true if COST1 is smaller or equal than COST2. */
248 friend bool operator<= (comp_cost cost1
, comp_cost cost2
);
250 int64_t cost
; /* The runtime cost. */
251 unsigned complexity
; /* The estimate of the complexity of the code for
252 the computation (in no concrete units --
253 complexity field should be larger for more
254 complex expressions and addressing modes). */
255 int64_t scratch
; /* Scratch used during cost computation. */
258 static const comp_cost no_cost
;
259 static const comp_cost
infinite_cost (INFTY
, 0, INFTY
);
262 comp_cost::infinite_cost_p ()
264 return cost
== INFTY
;
268 operator+ (comp_cost cost1
, comp_cost cost2
)
270 if (cost1
.infinite_cost_p () || cost2
.infinite_cost_p ())
271 return infinite_cost
;
273 gcc_assert (cost1
.cost
+ cost2
.cost
< infinite_cost
.cost
);
274 cost1
.cost
+= cost2
.cost
;
275 cost1
.complexity
+= cost2
.complexity
;
281 operator- (comp_cost cost1
, comp_cost cost2
)
283 if (cost1
.infinite_cost_p ())
284 return infinite_cost
;
286 gcc_assert (!cost2
.infinite_cost_p ());
287 gcc_assert (cost1
.cost
- cost2
.cost
< infinite_cost
.cost
);
289 cost1
.cost
-= cost2
.cost
;
290 cost1
.complexity
-= cost2
.complexity
;
296 comp_cost::operator+= (comp_cost cost
)
298 *this = *this + cost
;
303 comp_cost::operator+= (HOST_WIDE_INT c
)
308 if (infinite_cost_p ())
311 gcc_assert (this->cost
+ c
< infinite_cost
.cost
);
318 comp_cost::operator-= (HOST_WIDE_INT c
)
320 if (infinite_cost_p ())
323 gcc_assert (this->cost
- c
< infinite_cost
.cost
);
330 comp_cost::operator/= (HOST_WIDE_INT c
)
333 if (infinite_cost_p ())
342 comp_cost::operator*= (HOST_WIDE_INT c
)
344 if (infinite_cost_p ())
347 gcc_assert (this->cost
* c
< infinite_cost
.cost
);
354 comp_cost::operator-= (comp_cost cost
)
356 *this = *this - cost
;
361 operator< (comp_cost cost1
, comp_cost cost2
)
363 if (cost1
.cost
== cost2
.cost
)
364 return cost1
.complexity
< cost2
.complexity
;
366 return cost1
.cost
< cost2
.cost
;
370 operator== (comp_cost cost1
, comp_cost cost2
)
372 return cost1
.cost
== cost2
.cost
373 && cost1
.complexity
== cost2
.complexity
;
377 operator<= (comp_cost cost1
, comp_cost cost2
)
379 return cost1
< cost2
|| cost1
== cost2
;
382 struct iv_inv_expr_ent
;
384 /* The candidate - cost pair. */
388 struct iv_cand
*cand
; /* The candidate. */
389 comp_cost cost
; /* The cost. */
390 enum tree_code comp
; /* For iv elimination, the comparison. */
391 bitmap inv_vars
; /* The list of invariant ssa_vars that have to be
392 preserved when representing iv_use with iv_cand. */
393 bitmap inv_exprs
; /* The list of newly created invariant expressions
394 when representing iv_use with iv_cand. */
395 tree value
; /* For final value elimination, the expression for
396 the final value of the iv. For iv elimination,
397 the new bound to compare with. */
403 unsigned id
; /* The id of the use. */
404 unsigned group_id
; /* The group id the use belongs to. */
405 enum use_type type
; /* Type of the use. */
406 tree mem_type
; /* The memory type to use when testing whether an
407 address is legitimate, and what the address's
409 struct iv
*iv
; /* The induction variable it is based on. */
410 gimple
*stmt
; /* Statement in that it occurs. */
411 tree
*op_p
; /* The place where it occurs. */
413 tree addr_base
; /* Base address with const offset stripped. */
414 poly_uint64_pod addr_offset
;
415 /* Const offset stripped from base address. */
421 /* The id of the group. */
423 /* Uses of the group are of the same type. */
425 /* The set of "related" IV candidates, plus the important ones. */
426 bitmap related_cands
;
427 /* Number of IV candidates in the cost_map. */
428 unsigned n_map_members
;
429 /* The costs wrto the iv candidates. */
430 class cost_pair
*cost_map
;
431 /* The selected candidate for the group. */
432 struct iv_cand
*selected
;
433 /* To indicate this is a doloop use group. */
435 /* Uses in the group. */
436 vec
<struct iv_use
*> vuses
;
439 /* The position where the iv is computed. */
442 IP_NORMAL
, /* At the end, just before the exit condition. */
443 IP_END
, /* At the end of the latch block. */
444 IP_BEFORE_USE
, /* Immediately before a specific use. */
445 IP_AFTER_USE
, /* Immediately after a specific use. */
446 IP_ORIGINAL
/* The original biv. */
449 /* The induction variable candidate. */
452 unsigned id
; /* The number of the candidate. */
453 bool important
; /* Whether this is an "important" candidate, i.e. such
454 that it should be considered by all uses. */
455 ENUM_BITFIELD(iv_position
) pos
: 8; /* Where it is computed. */
456 gimple
*incremented_at
;/* For original biv, the statement where it is
458 tree var_before
; /* The variable used for it before increment. */
459 tree var_after
; /* The variable used for it after increment. */
460 struct iv
*iv
; /* The value of the candidate. NULL for
461 "pseudocandidate" used to indicate the possibility
462 to replace the final value of an iv by direct
463 computation of the value. */
464 unsigned cost
; /* Cost of the candidate. */
465 unsigned cost_step
; /* Cost of the candidate's increment operation. */
466 struct iv_use
*ainc_use
; /* For IP_{BEFORE,AFTER}_USE candidates, the place
467 where it is incremented. */
468 bitmap inv_vars
; /* The list of invariant ssa_vars used in step of the
470 bitmap inv_exprs
; /* If step is more complicated than a single ssa_var,
471 hanlde it as a new invariant expression which will
472 be hoisted out of loop. */
473 struct iv
*orig_iv
; /* The original iv if this cand is added from biv with
475 bool doloop_p
; /* Whether this is a doloop candidate. */
478 /* Hashtable entry for common candidate derived from iv uses. */
484 /* IV uses from which this common candidate is derived. */
485 auto_vec
<struct iv_use
*> uses
;
489 /* Hashtable helpers. */
491 struct iv_common_cand_hasher
: delete_ptr_hash
<iv_common_cand
>
493 static inline hashval_t
hash (const iv_common_cand
*);
494 static inline bool equal (const iv_common_cand
*, const iv_common_cand
*);
497 /* Hash function for possible common candidates. */
500 iv_common_cand_hasher::hash (const iv_common_cand
*ccand
)
505 /* Hash table equality function for common candidates. */
508 iv_common_cand_hasher::equal (const iv_common_cand
*ccand1
,
509 const iv_common_cand
*ccand2
)
511 return (ccand1
->hash
== ccand2
->hash
512 && operand_equal_p (ccand1
->base
, ccand2
->base
, 0)
513 && operand_equal_p (ccand1
->step
, ccand2
->step
, 0)
514 && (TYPE_PRECISION (TREE_TYPE (ccand1
->base
))
515 == TYPE_PRECISION (TREE_TYPE (ccand2
->base
))));
518 /* Loop invariant expression hashtable entry. */
520 struct iv_inv_expr_ent
522 /* Tree expression of the entry. */
524 /* Unique indentifier. */
530 /* Sort iv_inv_expr_ent pair A and B by id field. */
533 sort_iv_inv_expr_ent (const void *a
, const void *b
)
535 const iv_inv_expr_ent
* const *e1
= (const iv_inv_expr_ent
* const *) (a
);
536 const iv_inv_expr_ent
* const *e2
= (const iv_inv_expr_ent
* const *) (b
);
538 unsigned id1
= (*e1
)->id
;
539 unsigned id2
= (*e2
)->id
;
549 /* Hashtable helpers. */
551 struct iv_inv_expr_hasher
: free_ptr_hash
<iv_inv_expr_ent
>
553 static inline hashval_t
hash (const iv_inv_expr_ent
*);
554 static inline bool equal (const iv_inv_expr_ent
*, const iv_inv_expr_ent
*);
557 /* Return true if uses of type TYPE represent some form of address. */
560 address_p (use_type type
)
562 return type
== USE_REF_ADDRESS
|| type
== USE_PTR_ADDRESS
;
565 /* Hash function for loop invariant expressions. */
568 iv_inv_expr_hasher::hash (const iv_inv_expr_ent
*expr
)
573 /* Hash table equality function for expressions. */
576 iv_inv_expr_hasher::equal (const iv_inv_expr_ent
*expr1
,
577 const iv_inv_expr_ent
*expr2
)
579 return expr1
->hash
== expr2
->hash
580 && operand_equal_p (expr1
->expr
, expr2
->expr
, 0);
585 /* The currently optimized loop. */
586 class loop
*current_loop
;
589 /* Numbers of iterations for all exits of the current loop. */
590 hash_map
<edge
, tree_niter_desc
*> *niters
;
592 /* Number of registers used in it. */
595 /* The size of version_info array allocated. */
596 unsigned version_info_size
;
598 /* The array of information for the ssa names. */
599 struct version_info
*version_info
;
601 /* The hashtable of loop invariant expressions created
603 hash_table
<iv_inv_expr_hasher
> *inv_expr_tab
;
605 /* The bitmap of indices in version_info whose value was changed. */
608 /* The uses of induction variables. */
609 vec
<iv_group
*> vgroups
;
611 /* The candidates. */
612 vec
<iv_cand
*> vcands
;
614 /* A bitmap of important candidates. */
615 bitmap important_candidates
;
617 /* Cache used by tree_to_aff_combination_expand. */
618 hash_map
<tree
, name_expansion
*> *name_expansion_cache
;
620 /* The hashtable of common candidates derived from iv uses. */
621 hash_table
<iv_common_cand_hasher
> *iv_common_cand_tab
;
623 /* The common candidates. */
624 vec
<iv_common_cand
*> iv_common_cands
;
626 /* Hash map recording base object information of tree exp. */
627 hash_map
<tree
, tree
> *base_object_map
;
629 /* The maximum invariant variable id. */
630 unsigned max_inv_var_id
;
632 /* The maximum invariant expression id. */
633 unsigned max_inv_expr_id
;
635 /* Number of no_overflow BIVs which are not used in memory address. */
636 unsigned bivs_not_used_in_addr
;
638 /* Obstack for iv structure. */
639 struct obstack iv_obstack
;
641 /* Whether to consider just related and important candidates when replacing a
643 bool consider_all_candidates
;
645 /* Are we optimizing for speed? */
648 /* Whether the loop body includes any function calls. */
649 bool body_includes_call
;
651 /* Whether the loop body can only be exited via single exit. */
652 bool loop_single_exit_p
;
654 /* Whether the loop has doloop comparison use. */
658 /* An assignment of iv candidates to uses. */
663 /* The number of uses covered by the assignment. */
666 /* Number of uses that cannot be expressed by the candidates in the set. */
669 /* Candidate assigned to a use, together with the related costs. */
670 class cost_pair
**cand_for_group
;
672 /* Number of times each candidate is used. */
673 unsigned *n_cand_uses
;
675 /* The candidates used. */
678 /* The number of candidates in the set. */
681 /* The number of invariants needed, including both invariant variants and
682 invariant expressions. */
685 /* Total cost of expressing uses. */
686 comp_cost cand_use_cost
;
688 /* Total cost of candidates. */
691 /* Number of times each invariant variable is used. */
692 unsigned *n_inv_var_uses
;
694 /* Number of times each invariant expression is used. */
695 unsigned *n_inv_expr_uses
;
697 /* Total cost of the assignment. */
701 /* Difference of two iv candidate assignments. */
706 struct iv_group
*group
;
708 /* An old assignment (for rollback purposes). */
709 class cost_pair
*old_cp
;
711 /* A new assignment. */
712 class cost_pair
*new_cp
;
714 /* Next change in the list. */
715 struct iv_ca_delta
*next
;
718 /* Bound on number of candidates below that all candidates are considered. */
720 #define CONSIDER_ALL_CANDIDATES_BOUND \
721 ((unsigned) param_iv_consider_all_candidates_bound)
723 /* If there are more iv occurrences, we just give up (it is quite unlikely that
724 optimizing such a loop would help, and it would take ages). */
726 #define MAX_CONSIDERED_GROUPS \
727 ((unsigned) param_iv_max_considered_uses)
729 /* If there are at most this number of ivs in the set, try removing unnecessary
730 ivs from the set always. */
732 #define ALWAYS_PRUNE_CAND_SET_BOUND \
733 ((unsigned) param_iv_always_prune_cand_set_bound)
735 /* The list of trees for that the decl_rtl field must be reset is stored
738 static vec
<tree
> decl_rtl_to_reset
;
740 static comp_cost
force_expr_to_var_cost (tree
, bool);
742 /* The single loop exit if it dominates the latch, NULL otherwise. */
745 single_dom_exit (class loop
*loop
)
747 edge exit
= single_exit (loop
);
752 if (!just_once_each_iteration_p (loop
, exit
->src
))
758 /* Dumps information about the induction variable IV to FILE. Don't dump
759 variable's name if DUMP_NAME is FALSE. The information is dumped with
760 preceding spaces indicated by INDENT_LEVEL. */
763 dump_iv (FILE *file
, struct iv
*iv
, bool dump_name
, unsigned indent_level
)
766 const char spaces
[9] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\0'};
768 if (indent_level
> 4)
770 p
= spaces
+ 8 - (indent_level
<< 1);
772 fprintf (file
, "%sIV struct:\n", p
);
773 if (iv
->ssa_name
&& dump_name
)
775 fprintf (file
, "%s SSA_NAME:\t", p
);
776 print_generic_expr (file
, iv
->ssa_name
, TDF_SLIM
);
777 fprintf (file
, "\n");
780 fprintf (file
, "%s Type:\t", p
);
781 print_generic_expr (file
, TREE_TYPE (iv
->base
), TDF_SLIM
);
782 fprintf (file
, "\n");
784 fprintf (file
, "%s Base:\t", p
);
785 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
786 fprintf (file
, "\n");
788 fprintf (file
, "%s Step:\t", p
);
789 print_generic_expr (file
, iv
->step
, TDF_SLIM
);
790 fprintf (file
, "\n");
794 fprintf (file
, "%s Object:\t", p
);
795 print_generic_expr (file
, iv
->base_object
, TDF_SLIM
);
796 fprintf (file
, "\n");
799 fprintf (file
, "%s Biv:\t%c\n", p
, iv
->biv_p
? 'Y' : 'N');
801 fprintf (file
, "%s Overflowness wrto loop niter:\t%s\n",
802 p
, iv
->no_overflow
? "No-overflow" : "Overflow");
805 /* Dumps information about the USE to FILE. */
808 dump_use (FILE *file
, struct iv_use
*use
)
810 fprintf (file
, " Use %d.%d:\n", use
->group_id
, use
->id
);
811 fprintf (file
, " At stmt:\t");
812 print_gimple_stmt (file
, use
->stmt
, 0);
813 fprintf (file
, " At pos:\t");
815 print_generic_expr (file
, *use
->op_p
, TDF_SLIM
);
816 fprintf (file
, "\n");
817 dump_iv (file
, use
->iv
, false, 2);
820 /* Dumps information about the uses to FILE. */
823 dump_groups (FILE *file
, struct ivopts_data
*data
)
826 struct iv_group
*group
;
828 for (i
= 0; i
< data
->vgroups
.length (); i
++)
830 group
= data
->vgroups
[i
];
831 fprintf (file
, "Group %d:\n", group
->id
);
832 if (group
->type
== USE_NONLINEAR_EXPR
)
833 fprintf (file
, " Type:\tGENERIC\n");
834 else if (group
->type
== USE_REF_ADDRESS
)
835 fprintf (file
, " Type:\tREFERENCE ADDRESS\n");
836 else if (group
->type
== USE_PTR_ADDRESS
)
837 fprintf (file
, " Type:\tPOINTER ARGUMENT ADDRESS\n");
840 gcc_assert (group
->type
== USE_COMPARE
);
841 fprintf (file
, " Type:\tCOMPARE\n");
843 for (j
= 0; j
< group
->vuses
.length (); j
++)
844 dump_use (file
, group
->vuses
[j
]);
848 /* Dumps information about induction variable candidate CAND to FILE. */
851 dump_cand (FILE *file
, struct iv_cand
*cand
)
853 struct iv
*iv
= cand
->iv
;
855 fprintf (file
, "Candidate %d:\n", cand
->id
);
858 fprintf (file
, " Depend on inv.vars: ");
859 dump_bitmap (file
, cand
->inv_vars
);
863 fprintf (file
, " Depend on inv.exprs: ");
864 dump_bitmap (file
, cand
->inv_exprs
);
867 if (cand
->var_before
)
869 fprintf (file
, " Var befor: ");
870 print_generic_expr (file
, cand
->var_before
, TDF_SLIM
);
871 fprintf (file
, "\n");
875 fprintf (file
, " Var after: ");
876 print_generic_expr (file
, cand
->var_after
, TDF_SLIM
);
877 fprintf (file
, "\n");
883 fprintf (file
, " Incr POS: before exit test\n");
887 fprintf (file
, " Incr POS: before use %d\n", cand
->ainc_use
->id
);
891 fprintf (file
, " Incr POS: after use %d\n", cand
->ainc_use
->id
);
895 fprintf (file
, " Incr POS: at end\n");
899 fprintf (file
, " Incr POS: orig biv\n");
903 dump_iv (file
, iv
, false, 1);
906 /* Returns the info for ssa version VER. */
908 static inline struct version_info
*
909 ver_info (struct ivopts_data
*data
, unsigned ver
)
911 return data
->version_info
+ ver
;
914 /* Returns the info for ssa name NAME. */
916 static inline struct version_info
*
917 name_info (struct ivopts_data
*data
, tree name
)
919 return ver_info (data
, SSA_NAME_VERSION (name
));
922 /* Returns true if STMT is after the place where the IP_NORMAL ivs will be
926 stmt_after_ip_normal_pos (class loop
*loop
, gimple
*stmt
)
928 basic_block bb
= ip_normal_pos (loop
), sbb
= gimple_bb (stmt
);
932 if (sbb
== loop
->latch
)
938 return stmt
== last_stmt (bb
);
941 /* Returns true if STMT if after the place where the original induction
942 variable CAND is incremented. If TRUE_IF_EQUAL is set, we return true
943 if the positions are identical. */
946 stmt_after_inc_pos (struct iv_cand
*cand
, gimple
*stmt
, bool true_if_equal
)
948 basic_block cand_bb
= gimple_bb (cand
->incremented_at
);
949 basic_block stmt_bb
= gimple_bb (stmt
);
951 if (!dominated_by_p (CDI_DOMINATORS
, stmt_bb
, cand_bb
))
954 if (stmt_bb
!= cand_bb
)
958 && gimple_uid (stmt
) == gimple_uid (cand
->incremented_at
))
960 return gimple_uid (stmt
) > gimple_uid (cand
->incremented_at
);
963 /* Returns true if STMT if after the place where the induction variable
964 CAND is incremented in LOOP. */
967 stmt_after_increment (class loop
*loop
, struct iv_cand
*cand
, gimple
*stmt
)
975 return stmt_after_ip_normal_pos (loop
, stmt
);
979 return stmt_after_inc_pos (cand
, stmt
, false);
982 return stmt_after_inc_pos (cand
, stmt
, true);
989 /* walk_tree callback for contains_abnormal_ssa_name_p. */
992 contains_abnormal_ssa_name_p_1 (tree
*tp
, int *walk_subtrees
, void *)
994 if (TREE_CODE (*tp
) == SSA_NAME
995 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (*tp
))
1004 /* Returns true if EXPR contains a ssa name that occurs in an
1005 abnormal phi node. */
1008 contains_abnormal_ssa_name_p (tree expr
)
1010 return walk_tree_without_duplicates
1011 (&expr
, contains_abnormal_ssa_name_p_1
, NULL
) != NULL_TREE
;
1014 /* Returns the structure describing number of iterations determined from
1015 EXIT of DATA->current_loop, or NULL if something goes wrong. */
1017 static class tree_niter_desc
*
1018 niter_for_exit (struct ivopts_data
*data
, edge exit
)
1020 class tree_niter_desc
*desc
;
1021 tree_niter_desc
**slot
;
1025 data
->niters
= new hash_map
<edge
, tree_niter_desc
*>;
1029 slot
= data
->niters
->get (exit
);
1033 /* Try to determine number of iterations. We cannot safely work with ssa
1034 names that appear in phi nodes on abnormal edges, so that we do not
1035 create overlapping life ranges for them (PR 27283). */
1036 desc
= XNEW (class tree_niter_desc
);
1037 if (!number_of_iterations_exit (data
->current_loop
,
1039 || contains_abnormal_ssa_name_p (desc
->niter
))
1044 data
->niters
->put (exit
, desc
);
1052 /* Returns the structure describing number of iterations determined from
1053 single dominating exit of DATA->current_loop, or NULL if something
1056 static class tree_niter_desc
*
1057 niter_for_single_dom_exit (struct ivopts_data
*data
)
1059 edge exit
= single_dom_exit (data
->current_loop
);
1064 return niter_for_exit (data
, exit
);
1067 /* Initializes data structures used by the iv optimization pass, stored
1071 tree_ssa_iv_optimize_init (struct ivopts_data
*data
)
1073 data
->version_info_size
= 2 * num_ssa_names
;
1074 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
1075 data
->relevant
= BITMAP_ALLOC (NULL
);
1076 data
->important_candidates
= BITMAP_ALLOC (NULL
);
1077 data
->max_inv_var_id
= 0;
1078 data
->max_inv_expr_id
= 0;
1079 data
->niters
= NULL
;
1080 data
->vgroups
.create (20);
1081 data
->vcands
.create (20);
1082 data
->inv_expr_tab
= new hash_table
<iv_inv_expr_hasher
> (10);
1083 data
->name_expansion_cache
= NULL
;
1084 data
->base_object_map
= NULL
;
1085 data
->iv_common_cand_tab
= new hash_table
<iv_common_cand_hasher
> (10);
1086 data
->iv_common_cands
.create (20);
1087 decl_rtl_to_reset
.create (20);
1088 gcc_obstack_init (&data
->iv_obstack
);
1091 /* walk_tree callback for determine_base_object. */
1094 determine_base_object_1 (tree
*tp
, int *walk_subtrees
, void *wdata
)
1096 tree_code code
= TREE_CODE (*tp
);
1097 tree obj
= NULL_TREE
;
1098 if (code
== ADDR_EXPR
)
1100 tree base
= get_base_address (TREE_OPERAND (*tp
, 0));
1103 else if (TREE_CODE (base
) != MEM_REF
)
1104 obj
= fold_convert (ptr_type_node
, build_fold_addr_expr (base
));
1106 else if (code
== SSA_NAME
&& POINTER_TYPE_P (TREE_TYPE (*tp
)))
1107 obj
= fold_convert (ptr_type_node
, *tp
);
1116 /* Record special node for multiple base objects and stop. */
1117 if (*static_cast<tree
*> (wdata
))
1119 *static_cast<tree
*> (wdata
) = integer_zero_node
;
1120 return integer_zero_node
;
1122 /* Record the base object and continue looking. */
1123 *static_cast<tree
*> (wdata
) = obj
;
1127 /* Returns a memory object to that EXPR points with caching. Return NULL if we
1128 are able to determine that it does not point to any such object; specially
1129 return integer_zero_node if EXPR contains multiple base objects. */
1132 determine_base_object (struct ivopts_data
*data
, tree expr
)
1134 tree
*slot
, obj
= NULL_TREE
;
1135 if (data
->base_object_map
)
1137 if ((slot
= data
->base_object_map
->get(expr
)) != NULL
)
1141 data
->base_object_map
= new hash_map
<tree
, tree
>;
1143 (void) walk_tree_without_duplicates (&expr
, determine_base_object_1
, &obj
);
1144 data
->base_object_map
->put (expr
, obj
);
1148 /* Return true if address expression with non-DECL_P operand appears
1152 contain_complex_addr_expr (tree expr
)
1157 switch (TREE_CODE (expr
))
1159 case POINTER_PLUS_EXPR
:
1162 res
|= contain_complex_addr_expr (TREE_OPERAND (expr
, 0));
1163 res
|= contain_complex_addr_expr (TREE_OPERAND (expr
, 1));
1167 return (!DECL_P (TREE_OPERAND (expr
, 0)));
1176 /* Allocates an induction variable with given initial value BASE and step STEP
1177 for loop LOOP. NO_OVERFLOW implies the iv doesn't overflow. */
1180 alloc_iv (struct ivopts_data
*data
, tree base
, tree step
,
1181 bool no_overflow
= false)
1184 struct iv
*iv
= (struct iv
*) obstack_alloc (&data
->iv_obstack
,
1185 sizeof (struct iv
));
1186 gcc_assert (step
!= NULL_TREE
);
1188 /* Lower address expression in base except ones with DECL_P as operand.
1190 1) More accurate cost can be computed for address expressions;
1191 2) Duplicate candidates won't be created for bases in different
1192 forms, like &a[0] and &a. */
1194 if ((TREE_CODE (expr
) == ADDR_EXPR
&& !DECL_P (TREE_OPERAND (expr
, 0)))
1195 || contain_complex_addr_expr (expr
))
1198 tree_to_aff_combination (expr
, TREE_TYPE (expr
), &comb
);
1199 base
= fold_convert (TREE_TYPE (base
), aff_combination_to_tree (&comb
));
1203 iv
->base_object
= determine_base_object (data
, base
);
1206 iv
->nonlin_use
= NULL
;
1207 iv
->ssa_name
= NULL_TREE
;
1209 && !iv_can_overflow_p (data
->current_loop
, TREE_TYPE (base
),
1212 iv
->no_overflow
= no_overflow
;
1213 iv
->have_address_use
= false;
1218 /* Sets STEP and BASE for induction variable IV. NO_OVERFLOW implies the IV
1219 doesn't overflow. */
1222 set_iv (struct ivopts_data
*data
, tree iv
, tree base
, tree step
,
1225 struct version_info
*info
= name_info (data
, iv
);
1227 gcc_assert (!info
->iv
);
1229 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (iv
));
1230 info
->iv
= alloc_iv (data
, base
, step
, no_overflow
);
1231 info
->iv
->ssa_name
= iv
;
1234 /* Finds induction variable declaration for VAR. */
1237 get_iv (struct ivopts_data
*data
, tree var
)
1240 tree type
= TREE_TYPE (var
);
1242 if (!POINTER_TYPE_P (type
)
1243 && !INTEGRAL_TYPE_P (type
))
1246 if (!name_info (data
, var
)->iv
)
1248 bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
1251 || !flow_bb_inside_loop_p (data
->current_loop
, bb
))
1253 if (POINTER_TYPE_P (type
))
1255 set_iv (data
, var
, var
, build_int_cst (type
, 0), true);
1259 return name_info (data
, var
)->iv
;
1262 /* Return the first non-invariant ssa var found in EXPR. */
1265 extract_single_var_from_expr (tree expr
)
1269 enum tree_code code
;
1271 if (!expr
|| is_gimple_min_invariant (expr
))
1274 code
= TREE_CODE (expr
);
1275 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
1277 n
= TREE_OPERAND_LENGTH (expr
);
1278 for (i
= 0; i
< n
; i
++)
1280 tmp
= extract_single_var_from_expr (TREE_OPERAND (expr
, i
));
1286 return (TREE_CODE (expr
) == SSA_NAME
) ? expr
: NULL
;
1289 /* Finds basic ivs. */
1292 find_bivs (struct ivopts_data
*data
)
1296 tree step
, type
, base
, stop
;
1298 class loop
*loop
= data
->current_loop
;
1301 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1305 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi
)))
1308 if (virtual_operand_p (PHI_RESULT (phi
)))
1311 if (!simple_iv (loop
, loop
, PHI_RESULT (phi
), &iv
, true))
1314 if (integer_zerop (iv
.step
))
1318 base
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
1319 /* Stop expanding iv base at the first ssa var referred by iv step.
1320 Ideally we should stop at any ssa var, because that's expensive
1321 and unusual to happen, we just do it on the first one.
1323 See PR64705 for the rationale. */
1324 stop
= extract_single_var_from_expr (step
);
1325 base
= expand_simple_operations (base
, stop
);
1326 if (contains_abnormal_ssa_name_p (base
)
1327 || contains_abnormal_ssa_name_p (step
))
1330 type
= TREE_TYPE (PHI_RESULT (phi
));
1331 base
= fold_convert (type
, base
);
1334 if (POINTER_TYPE_P (type
))
1335 step
= convert_to_ptrofftype (step
);
1337 step
= fold_convert (type
, step
);
1340 set_iv (data
, PHI_RESULT (phi
), base
, step
, iv
.no_overflow
);
1347 /* Marks basic ivs. */
1350 mark_bivs (struct ivopts_data
*data
)
1355 struct iv
*iv
, *incr_iv
;
1356 class loop
*loop
= data
->current_loop
;
1357 basic_block incr_bb
;
1360 data
->bivs_not_used_in_addr
= 0;
1361 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1365 iv
= get_iv (data
, PHI_RESULT (phi
));
1369 var
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
1370 def
= SSA_NAME_DEF_STMT (var
);
1371 /* Don't mark iv peeled from other one as biv. */
1373 && gimple_code (def
) == GIMPLE_PHI
1374 && gimple_bb (def
) == loop
->header
)
1377 incr_iv
= get_iv (data
, var
);
1381 /* If the increment is in the subloop, ignore it. */
1382 incr_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
1383 if (incr_bb
->loop_father
!= data
->current_loop
1384 || (incr_bb
->flags
& BB_IRREDUCIBLE_LOOP
))
1388 incr_iv
->biv_p
= true;
1389 if (iv
->no_overflow
)
1390 data
->bivs_not_used_in_addr
++;
1391 if (incr_iv
->no_overflow
)
1392 data
->bivs_not_used_in_addr
++;
1396 /* Checks whether STMT defines a linear induction variable and stores its
1397 parameters to IV. */
1400 find_givs_in_stmt_scev (struct ivopts_data
*data
, gimple
*stmt
, affine_iv
*iv
)
1403 class loop
*loop
= data
->current_loop
;
1405 iv
->base
= NULL_TREE
;
1406 iv
->step
= NULL_TREE
;
1408 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1411 lhs
= gimple_assign_lhs (stmt
);
1412 if (TREE_CODE (lhs
) != SSA_NAME
)
1415 if (!simple_iv (loop
, loop_containing_stmt (stmt
), lhs
, iv
, true))
1418 /* Stop expanding iv base at the first ssa var referred by iv step.
1419 Ideally we should stop at any ssa var, because that's expensive
1420 and unusual to happen, we just do it on the first one.
1422 See PR64705 for the rationale. */
1423 stop
= extract_single_var_from_expr (iv
->step
);
1424 iv
->base
= expand_simple_operations (iv
->base
, stop
);
1425 if (contains_abnormal_ssa_name_p (iv
->base
)
1426 || contains_abnormal_ssa_name_p (iv
->step
))
1429 /* If STMT could throw, then do not consider STMT as defining a GIV.
1430 While this will suppress optimizations, we cannot safely delete this
1431 GIV and associated statements, even if it appears it is not used. */
1432 if (stmt_could_throw_p (cfun
, stmt
))
1438 /* Finds general ivs in statement STMT. */
1441 find_givs_in_stmt (struct ivopts_data
*data
, gimple
*stmt
)
1445 if (!find_givs_in_stmt_scev (data
, stmt
, &iv
))
1448 set_iv (data
, gimple_assign_lhs (stmt
), iv
.base
, iv
.step
, iv
.no_overflow
);
1451 /* Finds general ivs in basic block BB. */
1454 find_givs_in_bb (struct ivopts_data
*data
, basic_block bb
)
1456 gimple_stmt_iterator bsi
;
1458 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1459 find_givs_in_stmt (data
, gsi_stmt (bsi
));
1462 /* Finds general ivs. */
1465 find_givs (struct ivopts_data
*data
)
1467 class loop
*loop
= data
->current_loop
;
1468 basic_block
*body
= get_loop_body_in_dom_order (loop
);
1471 for (i
= 0; i
< loop
->num_nodes
; i
++)
1472 find_givs_in_bb (data
, body
[i
]);
1476 /* For each ssa name defined in LOOP determines whether it is an induction
1477 variable and if so, its initial value and step. */
1480 find_induction_variables (struct ivopts_data
*data
)
1485 if (!find_bivs (data
))
1491 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1493 class tree_niter_desc
*niter
= niter_for_single_dom_exit (data
);
1497 fprintf (dump_file
, " number of iterations ");
1498 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
1499 if (!integer_zerop (niter
->may_be_zero
))
1501 fprintf (dump_file
, "; zero if ");
1502 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
1504 fprintf (dump_file
, "\n");
1507 fprintf (dump_file
, "\n<Induction Vars>:\n");
1508 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1510 struct version_info
*info
= ver_info (data
, i
);
1511 if (info
->iv
&& info
->iv
->step
&& !integer_zerop (info
->iv
->step
))
1512 dump_iv (dump_file
, ver_info (data
, i
)->iv
, true, 0);
1519 /* Records a use of TYPE at *USE_P in STMT whose value is IV in GROUP.
1520 For address type use, ADDR_BASE is the stripped IV base, ADDR_OFFSET
1521 is the const offset stripped from IV base and MEM_TYPE is the type
1522 of the memory being addressed. For uses of other types, ADDR_BASE
1523 and ADDR_OFFSET are zero by default and MEM_TYPE is NULL_TREE. */
1525 static struct iv_use
*
1526 record_use (struct iv_group
*group
, tree
*use_p
, struct iv
*iv
,
1527 gimple
*stmt
, enum use_type type
, tree mem_type
,
1528 tree addr_base
, poly_uint64 addr_offset
)
1530 struct iv_use
*use
= XCNEW (struct iv_use
);
1532 use
->id
= group
->vuses
.length ();
1533 use
->group_id
= group
->id
;
1535 use
->mem_type
= mem_type
;
1539 use
->addr_base
= addr_base
;
1540 use
->addr_offset
= addr_offset
;
1542 group
->vuses
.safe_push (use
);
1546 /* Checks whether OP is a loop-level invariant and if so, records it.
1547 NONLINEAR_USE is true if the invariant is used in a way we do not
1548 handle specially. */
1551 record_invariant (struct ivopts_data
*data
, tree op
, bool nonlinear_use
)
1554 struct version_info
*info
;
1556 if (TREE_CODE (op
) != SSA_NAME
1557 || virtual_operand_p (op
))
1560 bb
= gimple_bb (SSA_NAME_DEF_STMT (op
));
1562 && flow_bb_inside_loop_p (data
->current_loop
, bb
))
1565 info
= name_info (data
, op
);
1567 info
->has_nonlin_use
|= nonlinear_use
;
1569 info
->inv_id
= ++data
->max_inv_var_id
;
1570 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (op
));
1573 /* Record a group of TYPE. */
1575 static struct iv_group
*
1576 record_group (struct ivopts_data
*data
, enum use_type type
)
1578 struct iv_group
*group
= XCNEW (struct iv_group
);
1580 group
->id
= data
->vgroups
.length ();
1582 group
->related_cands
= BITMAP_ALLOC (NULL
);
1583 group
->vuses
.create (1);
1584 group
->doloop_p
= false;
1586 data
->vgroups
.safe_push (group
);
1590 /* Record a use of TYPE at *USE_P in STMT whose value is IV in a group.
1591 New group will be created if there is no existing group for the use.
1592 MEM_TYPE is the type of memory being addressed, or NULL if this
1593 isn't an address reference. */
1595 static struct iv_use
*
1596 record_group_use (struct ivopts_data
*data
, tree
*use_p
,
1597 struct iv
*iv
, gimple
*stmt
, enum use_type type
,
1600 tree addr_base
= NULL
;
1601 struct iv_group
*group
= NULL
;
1602 poly_uint64 addr_offset
= 0;
1604 /* Record non address type use in a new group. */
1605 if (address_p (type
))
1609 addr_base
= strip_offset (iv
->base
, &addr_offset
);
1610 for (i
= 0; i
< data
->vgroups
.length (); i
++)
1614 group
= data
->vgroups
[i
];
1615 use
= group
->vuses
[0];
1616 if (!address_p (use
->type
))
1619 /* Check if it has the same stripped base and step. */
1620 if (operand_equal_p (iv
->base_object
, use
->iv
->base_object
, 0)
1621 && operand_equal_p (iv
->step
, use
->iv
->step
, 0)
1622 && operand_equal_p (addr_base
, use
->addr_base
, 0))
1625 if (i
== data
->vgroups
.length ())
1630 group
= record_group (data
, type
);
1632 return record_use (group
, use_p
, iv
, stmt
, type
, mem_type
,
1633 addr_base
, addr_offset
);
1636 /* Checks whether the use OP is interesting and if so, records it. */
1638 static struct iv_use
*
1639 find_interesting_uses_op (struct ivopts_data
*data
, tree op
)
1645 if (TREE_CODE (op
) != SSA_NAME
)
1648 iv
= get_iv (data
, op
);
1654 gcc_assert (iv
->nonlin_use
->type
== USE_NONLINEAR_EXPR
);
1655 return iv
->nonlin_use
;
1658 if (integer_zerop (iv
->step
))
1660 record_invariant (data
, op
, true);
1664 stmt
= SSA_NAME_DEF_STMT (op
);
1665 gcc_assert (gimple_code (stmt
) == GIMPLE_PHI
|| is_gimple_assign (stmt
));
1667 use
= record_group_use (data
, NULL
, iv
, stmt
, USE_NONLINEAR_EXPR
, NULL_TREE
);
1668 iv
->nonlin_use
= use
;
1672 /* Indicate how compare type iv_use can be handled. */
1673 enum comp_iv_rewrite
1676 /* We may rewrite compare type iv_use by expressing value of the iv_use. */
1678 /* We may rewrite compare type iv_uses on both sides of comparison by
1679 expressing value of each iv_use. */
1681 /* We may rewrite compare type iv_use by expressing value of the iv_use
1682 or by eliminating it with other iv_cand. */
1686 /* Given a condition in statement STMT, checks whether it is a compare
1687 of an induction variable and an invariant. If this is the case,
1688 CONTROL_VAR is set to location of the iv, BOUND to the location of
1689 the invariant, IV_VAR and IV_BOUND are set to the corresponding
1690 induction variable descriptions, and true is returned. If this is not
1691 the case, CONTROL_VAR and BOUND are set to the arguments of the
1692 condition and false is returned. */
1694 static enum comp_iv_rewrite
1695 extract_cond_operands (struct ivopts_data
*data
, gimple
*stmt
,
1696 tree
**control_var
, tree
**bound
,
1697 struct iv
**iv_var
, struct iv
**iv_bound
)
1699 /* The objects returned when COND has constant operands. */
1700 static struct iv const_iv
;
1702 tree
*op0
= &zero
, *op1
= &zero
;
1703 struct iv
*iv0
= &const_iv
, *iv1
= &const_iv
;
1704 enum comp_iv_rewrite rewrite_type
= COMP_IV_NA
;
1706 if (gimple_code (stmt
) == GIMPLE_COND
)
1708 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
1709 op0
= gimple_cond_lhs_ptr (cond_stmt
);
1710 op1
= gimple_cond_rhs_ptr (cond_stmt
);
1714 op0
= gimple_assign_rhs1_ptr (stmt
);
1715 op1
= gimple_assign_rhs2_ptr (stmt
);
1718 zero
= integer_zero_node
;
1719 const_iv
.step
= integer_zero_node
;
1721 if (TREE_CODE (*op0
) == SSA_NAME
)
1722 iv0
= get_iv (data
, *op0
);
1723 if (TREE_CODE (*op1
) == SSA_NAME
)
1724 iv1
= get_iv (data
, *op1
);
1726 /* If both sides of comparison are IVs. We can express ivs on both end. */
1727 if (iv0
&& iv1
&& !integer_zerop (iv0
->step
) && !integer_zerop (iv1
->step
))
1729 rewrite_type
= COMP_IV_EXPR_2
;
1733 /* If none side of comparison is IV. */
1734 if ((!iv0
|| integer_zerop (iv0
->step
))
1735 && (!iv1
|| integer_zerop (iv1
->step
)))
1738 /* Control variable may be on the other side. */
1739 if (!iv0
|| integer_zerop (iv0
->step
))
1741 std::swap (op0
, op1
);
1742 std::swap (iv0
, iv1
);
1744 /* If one side is IV and the other side isn't loop invariant. */
1746 rewrite_type
= COMP_IV_EXPR
;
1747 /* If one side is IV and the other side is loop invariant. */
1748 else if (!integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
))
1749 rewrite_type
= COMP_IV_ELIM
;
1761 return rewrite_type
;
1764 /* Checks whether the condition in STMT is interesting and if so,
1768 find_interesting_uses_cond (struct ivopts_data
*data
, gimple
*stmt
)
1770 tree
*var_p
, *bound_p
;
1771 struct iv
*var_iv
, *bound_iv
;
1772 enum comp_iv_rewrite ret
;
1774 ret
= extract_cond_operands (data
, stmt
,
1775 &var_p
, &bound_p
, &var_iv
, &bound_iv
);
1776 if (ret
== COMP_IV_NA
)
1778 find_interesting_uses_op (data
, *var_p
);
1779 find_interesting_uses_op (data
, *bound_p
);
1783 record_group_use (data
, var_p
, var_iv
, stmt
, USE_COMPARE
, NULL_TREE
);
1784 /* Record compare type iv_use for iv on the other side of comparison. */
1785 if (ret
== COMP_IV_EXPR_2
)
1786 record_group_use (data
, bound_p
, bound_iv
, stmt
, USE_COMPARE
, NULL_TREE
);
1789 /* Returns the outermost loop EXPR is obviously invariant in
1790 relative to the loop LOOP, i.e. if all its operands are defined
1791 outside of the returned loop. Returns NULL if EXPR is not
1792 even obviously invariant in LOOP. */
1795 outermost_invariant_loop_for_expr (class loop
*loop
, tree expr
)
1800 if (is_gimple_min_invariant (expr
))
1801 return current_loops
->tree_root
;
1803 if (TREE_CODE (expr
) == SSA_NAME
)
1805 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1808 if (flow_bb_inside_loop_p (loop
, def_bb
))
1810 return superloop_at_depth (loop
,
1811 loop_depth (def_bb
->loop_father
) + 1);
1814 return current_loops
->tree_root
;
1820 unsigned maxdepth
= 0;
1821 len
= TREE_OPERAND_LENGTH (expr
);
1822 for (i
= 0; i
< len
; i
++)
1825 if (!TREE_OPERAND (expr
, i
))
1828 ivloop
= outermost_invariant_loop_for_expr (loop
, TREE_OPERAND (expr
, i
));
1831 maxdepth
= MAX (maxdepth
, loop_depth (ivloop
));
1834 return superloop_at_depth (loop
, maxdepth
);
1837 /* Returns true if expression EXPR is obviously invariant in LOOP,
1838 i.e. if all its operands are defined outside of the LOOP. LOOP
1839 should not be the function body. */
1842 expr_invariant_in_loop_p (class loop
*loop
, tree expr
)
1847 gcc_assert (loop_depth (loop
) > 0);
1849 if (is_gimple_min_invariant (expr
))
1852 if (TREE_CODE (expr
) == SSA_NAME
)
1854 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1856 && flow_bb_inside_loop_p (loop
, def_bb
))
1865 len
= TREE_OPERAND_LENGTH (expr
);
1866 for (i
= 0; i
< len
; i
++)
1867 if (TREE_OPERAND (expr
, i
)
1868 && !expr_invariant_in_loop_p (loop
, TREE_OPERAND (expr
, i
)))
1874 /* Given expression EXPR which computes inductive values with respect
1875 to loop recorded in DATA, this function returns biv from which EXPR
1876 is derived by tracing definition chains of ssa variables in EXPR. */
1879 find_deriving_biv_for_expr (struct ivopts_data
*data
, tree expr
)
1884 enum tree_code code
;
1887 if (expr
== NULL_TREE
)
1890 if (is_gimple_min_invariant (expr
))
1893 code
= TREE_CODE (expr
);
1894 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
1896 n
= TREE_OPERAND_LENGTH (expr
);
1897 for (i
= 0; i
< n
; i
++)
1899 iv
= find_deriving_biv_for_expr (data
, TREE_OPERAND (expr
, i
));
1905 /* Stop if it's not ssa name. */
1906 if (code
!= SSA_NAME
)
1909 iv
= get_iv (data
, expr
);
1910 if (!iv
|| integer_zerop (iv
->step
))
1915 stmt
= SSA_NAME_DEF_STMT (expr
);
1916 if (gphi
*phi
= dyn_cast
<gphi
*> (stmt
))
1919 use_operand_p use_p
;
1920 basic_block phi_bb
= gimple_bb (phi
);
1922 /* Skip loop header PHI that doesn't define biv. */
1923 if (phi_bb
->loop_father
== data
->current_loop
)
1926 if (virtual_operand_p (gimple_phi_result (phi
)))
1929 FOR_EACH_PHI_ARG (use_p
, phi
, iter
, SSA_OP_USE
)
1931 tree use
= USE_FROM_PTR (use_p
);
1932 iv
= find_deriving_biv_for_expr (data
, use
);
1938 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1941 e1
= gimple_assign_rhs1 (stmt
);
1942 code
= gimple_assign_rhs_code (stmt
);
1943 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
)
1944 return find_deriving_biv_for_expr (data
, e1
);
1951 case POINTER_PLUS_EXPR
:
1952 /* Increments, decrements and multiplications by a constant
1954 e2
= gimple_assign_rhs2 (stmt
);
1955 iv
= find_deriving_biv_for_expr (data
, e2
);
1961 /* Casts are simple. */
1962 return find_deriving_biv_for_expr (data
, e1
);
1971 /* Record BIV, its predecessor and successor that they are used in
1972 address type uses. */
1975 record_biv_for_address_use (struct ivopts_data
*data
, struct iv
*biv
)
1978 tree type
, base_1
, base_2
;
1981 if (!biv
|| !biv
->biv_p
|| integer_zerop (biv
->step
)
1982 || biv
->have_address_use
|| !biv
->no_overflow
)
1985 type
= TREE_TYPE (biv
->base
);
1986 if (!INTEGRAL_TYPE_P (type
))
1989 biv
->have_address_use
= true;
1990 data
->bivs_not_used_in_addr
--;
1991 base_1
= fold_build2 (PLUS_EXPR
, type
, biv
->base
, biv
->step
);
1992 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1994 struct iv
*iv
= ver_info (data
, i
)->iv
;
1996 if (!iv
|| !iv
->biv_p
|| integer_zerop (iv
->step
)
1997 || iv
->have_address_use
|| !iv
->no_overflow
)
2000 if (type
!= TREE_TYPE (iv
->base
)
2001 || !INTEGRAL_TYPE_P (TREE_TYPE (iv
->base
)))
2004 if (!operand_equal_p (biv
->step
, iv
->step
, 0))
2007 base_2
= fold_build2 (PLUS_EXPR
, type
, iv
->base
, iv
->step
);
2008 if (operand_equal_p (base_1
, iv
->base
, 0)
2009 || operand_equal_p (base_2
, biv
->base
, 0))
2011 iv
->have_address_use
= true;
2012 data
->bivs_not_used_in_addr
--;
2017 /* Cumulates the steps of indices into DATA and replaces their values with the
2018 initial ones. Returns false when the value of the index cannot be determined.
2019 Callback for for_each_index. */
2021 struct ifs_ivopts_data
2023 struct ivopts_data
*ivopts_data
;
2029 idx_find_step (tree base
, tree
*idx
, void *data
)
2031 struct ifs_ivopts_data
*dta
= (struct ifs_ivopts_data
*) data
;
2033 bool use_overflow_semantics
= false;
2034 tree step
, iv_base
, iv_step
, lbound
, off
;
2035 class loop
*loop
= dta
->ivopts_data
->current_loop
;
2037 /* If base is a component ref, require that the offset of the reference
2039 if (TREE_CODE (base
) == COMPONENT_REF
)
2041 off
= component_ref_field_offset (base
);
2042 return expr_invariant_in_loop_p (loop
, off
);
2045 /* If base is array, first check whether we will be able to move the
2046 reference out of the loop (in order to take its address in strength
2047 reduction). In order for this to work we need both lower bound
2048 and step to be loop invariants. */
2049 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
2051 /* Moreover, for a range, the size needs to be invariant as well. */
2052 if (TREE_CODE (base
) == ARRAY_RANGE_REF
2053 && !expr_invariant_in_loop_p (loop
, TYPE_SIZE (TREE_TYPE (base
))))
2056 step
= array_ref_element_size (base
);
2057 lbound
= array_ref_low_bound (base
);
2059 if (!expr_invariant_in_loop_p (loop
, step
)
2060 || !expr_invariant_in_loop_p (loop
, lbound
))
2064 if (TREE_CODE (*idx
) != SSA_NAME
)
2067 iv
= get_iv (dta
->ivopts_data
, *idx
);
2071 /* XXX We produce for a base of *D42 with iv->base being &x[0]
2072 *&x[0], which is not folded and does not trigger the
2073 ARRAY_REF path below. */
2076 if (integer_zerop (iv
->step
))
2079 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
2081 step
= array_ref_element_size (base
);
2083 /* We only handle addresses whose step is an integer constant. */
2084 if (TREE_CODE (step
) != INTEGER_CST
)
2088 /* The step for pointer arithmetics already is 1 byte. */
2089 step
= size_one_node
;
2093 if (iv
->no_overflow
&& nowrap_type_p (TREE_TYPE (iv_step
)))
2094 use_overflow_semantics
= true;
2096 if (!convert_affine_scev (dta
->ivopts_data
->current_loop
,
2097 sizetype
, &iv_base
, &iv_step
, dta
->stmt
,
2098 use_overflow_semantics
))
2100 /* The index might wrap. */
2104 step
= fold_build2 (MULT_EXPR
, sizetype
, step
, iv_step
);
2105 dta
->step
= fold_build2 (PLUS_EXPR
, sizetype
, dta
->step
, step
);
2107 if (dta
->ivopts_data
->bivs_not_used_in_addr
)
2110 iv
= find_deriving_biv_for_expr (dta
->ivopts_data
, iv
->ssa_name
);
2112 record_biv_for_address_use (dta
->ivopts_data
, iv
);
2117 /* Records use in index IDX. Callback for for_each_index. Ivopts data
2118 object is passed to it in DATA. */
2121 idx_record_use (tree base
, tree
*idx
,
2124 struct ivopts_data
*data
= (struct ivopts_data
*) vdata
;
2125 find_interesting_uses_op (data
, *idx
);
2126 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
2128 find_interesting_uses_op (data
, array_ref_element_size (base
));
2129 find_interesting_uses_op (data
, array_ref_low_bound (base
));
2134 /* If we can prove that TOP = cst * BOT for some constant cst,
2135 store cst to MUL and return true. Otherwise return false.
2136 The returned value is always sign-extended, regardless of the
2137 signedness of TOP and BOT. */
2140 constant_multiple_of (tree top
, tree bot
, widest_int
*mul
)
2143 enum tree_code code
;
2144 unsigned precision
= TYPE_PRECISION (TREE_TYPE (top
));
2145 widest_int res
, p0
, p1
;
2150 if (operand_equal_p (top
, bot
, 0))
2156 code
= TREE_CODE (top
);
2160 mby
= TREE_OPERAND (top
, 1);
2161 if (TREE_CODE (mby
) != INTEGER_CST
)
2164 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &res
))
2167 *mul
= wi::sext (res
* wi::to_widest (mby
), precision
);
2172 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &p0
)
2173 || !constant_multiple_of (TREE_OPERAND (top
, 1), bot
, &p1
))
2176 if (code
== MINUS_EXPR
)
2178 *mul
= wi::sext (p0
+ p1
, precision
);
2182 if (TREE_CODE (bot
) != INTEGER_CST
)
2185 p0
= widest_int::from (wi::to_wide (top
), SIGNED
);
2186 p1
= widest_int::from (wi::to_wide (bot
), SIGNED
);
2189 *mul
= wi::sext (wi::divmod_trunc (p0
, p1
, SIGNED
, &res
), precision
);
2193 if (POLY_INT_CST_P (top
)
2194 && POLY_INT_CST_P (bot
)
2195 && constant_multiple_p (wi::to_poly_widest (top
),
2196 wi::to_poly_widest (bot
), mul
))
2203 /* Return true if memory reference REF with step STEP may be unaligned. */
2206 may_be_unaligned_p (tree ref
, tree step
)
2208 /* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
2209 thus they are not misaligned. */
2210 if (TREE_CODE (ref
) == TARGET_MEM_REF
)
2213 unsigned int align
= TYPE_ALIGN (TREE_TYPE (ref
));
2214 if (GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref
))) > align
)
2215 align
= GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref
)));
2217 unsigned HOST_WIDE_INT bitpos
;
2218 unsigned int ref_align
;
2219 get_object_alignment_1 (ref
, &ref_align
, &bitpos
);
2220 if (ref_align
< align
2221 || (bitpos
% align
) != 0
2222 || (bitpos
% BITS_PER_UNIT
) != 0)
2225 unsigned int trailing_zeros
= tree_ctz (step
);
2226 if (trailing_zeros
< HOST_BITS_PER_INT
2227 && (1U << trailing_zeros
) * BITS_PER_UNIT
< align
)
2233 /* Return true if EXPR may be non-addressable. */
2236 may_be_nonaddressable_p (tree expr
)
2238 switch (TREE_CODE (expr
))
2241 /* Check if it's a register variable. */
2242 return DECL_HARD_REGISTER (expr
);
2244 case TARGET_MEM_REF
:
2245 /* TARGET_MEM_REFs are translated directly to valid MEMs on the
2246 target, thus they are always addressable. */
2250 /* Likewise for MEM_REFs, modulo the storage order. */
2251 return REF_REVERSE_STORAGE_ORDER (expr
);
2254 if (REF_REVERSE_STORAGE_ORDER (expr
))
2256 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
2259 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr
, 0))))
2261 return DECL_NONADDRESSABLE_P (TREE_OPERAND (expr
, 1))
2262 || may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
2265 case ARRAY_RANGE_REF
:
2266 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr
, 0))))
2268 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
2270 case VIEW_CONVERT_EXPR
:
2271 /* This kind of view-conversions may wrap non-addressable objects
2272 and make them look addressable. After some processing the
2273 non-addressability may be uncovered again, causing ADDR_EXPRs
2274 of inappropriate objects to be built. */
2275 if (is_gimple_reg (TREE_OPERAND (expr
, 0))
2276 || !is_gimple_addressable (TREE_OPERAND (expr
, 0)))
2278 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
2290 /* Finds addresses in *OP_P inside STMT. */
2293 find_interesting_uses_address (struct ivopts_data
*data
, gimple
*stmt
,
2296 tree base
= *op_p
, step
= size_zero_node
;
2298 struct ifs_ivopts_data ifs_ivopts_data
;
2300 /* Do not play with volatile memory references. A bit too conservative,
2301 perhaps, but safe. */
2302 if (gimple_has_volatile_ops (stmt
))
2305 /* Ignore bitfields for now. Not really something terribly complicated
2307 if (TREE_CODE (base
) == BIT_FIELD_REF
)
2310 base
= unshare_expr (base
);
2312 if (TREE_CODE (base
) == TARGET_MEM_REF
)
2314 tree type
= build_pointer_type (TREE_TYPE (base
));
2318 && TREE_CODE (TMR_BASE (base
)) == SSA_NAME
)
2320 civ
= get_iv (data
, TMR_BASE (base
));
2324 TMR_BASE (base
) = civ
->base
;
2327 if (TMR_INDEX2 (base
)
2328 && TREE_CODE (TMR_INDEX2 (base
)) == SSA_NAME
)
2330 civ
= get_iv (data
, TMR_INDEX2 (base
));
2334 TMR_INDEX2 (base
) = civ
->base
;
2337 if (TMR_INDEX (base
)
2338 && TREE_CODE (TMR_INDEX (base
)) == SSA_NAME
)
2340 civ
= get_iv (data
, TMR_INDEX (base
));
2344 TMR_INDEX (base
) = civ
->base
;
2349 if (TMR_STEP (base
))
2350 astep
= fold_build2 (MULT_EXPR
, type
, TMR_STEP (base
), astep
);
2352 step
= fold_build2 (PLUS_EXPR
, type
, step
, astep
);
2356 if (integer_zerop (step
))
2358 base
= tree_mem_ref_addr (type
, base
);
2362 ifs_ivopts_data
.ivopts_data
= data
;
2363 ifs_ivopts_data
.stmt
= stmt
;
2364 ifs_ivopts_data
.step
= size_zero_node
;
2365 if (!for_each_index (&base
, idx_find_step
, &ifs_ivopts_data
)
2366 || integer_zerop (ifs_ivopts_data
.step
))
2368 step
= ifs_ivopts_data
.step
;
2370 /* Check that the base expression is addressable. This needs
2371 to be done after substituting bases of IVs into it. */
2372 if (may_be_nonaddressable_p (base
))
2375 /* Moreover, on strict alignment platforms, check that it is
2376 sufficiently aligned. */
2377 if (STRICT_ALIGNMENT
&& may_be_unaligned_p (base
, step
))
2380 base
= build_fold_addr_expr (base
);
2382 /* Substituting bases of IVs into the base expression might
2383 have caused folding opportunities. */
2384 if (TREE_CODE (base
) == ADDR_EXPR
)
2386 tree
*ref
= &TREE_OPERAND (base
, 0);
2387 while (handled_component_p (*ref
))
2388 ref
= &TREE_OPERAND (*ref
, 0);
2389 if (TREE_CODE (*ref
) == MEM_REF
)
2391 tree tem
= fold_binary (MEM_REF
, TREE_TYPE (*ref
),
2392 TREE_OPERAND (*ref
, 0),
2393 TREE_OPERAND (*ref
, 1));
2400 civ
= alloc_iv (data
, base
, step
);
2401 /* Fail if base object of this memory reference is unknown. */
2402 if (civ
->base_object
== NULL_TREE
)
2405 record_group_use (data
, op_p
, civ
, stmt
, USE_REF_ADDRESS
, TREE_TYPE (*op_p
));
2409 for_each_index (op_p
, idx_record_use
, data
);
2412 /* Finds and records invariants used in STMT. */
2415 find_invariants_stmt (struct ivopts_data
*data
, gimple
*stmt
)
2418 use_operand_p use_p
;
2421 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
2423 op
= USE_FROM_PTR (use_p
);
2424 record_invariant (data
, op
, false);
2428 /* CALL calls an internal function. If operand *OP_P will become an
2429 address when the call is expanded, return the type of the memory
2430 being addressed, otherwise return null. */
2433 get_mem_type_for_internal_fn (gcall
*call
, tree
*op_p
)
2435 switch (gimple_call_internal_fn (call
))
2438 case IFN_MASK_LOAD_LANES
:
2440 if (op_p
== gimple_call_arg_ptr (call
, 0))
2441 return TREE_TYPE (gimple_call_lhs (call
));
2444 case IFN_MASK_STORE
:
2445 case IFN_MASK_STORE_LANES
:
2447 if (op_p
== gimple_call_arg_ptr (call
, 0))
2448 return TREE_TYPE (gimple_call_arg (call
, 3));
2456 /* IV is a (non-address) iv that describes operand *OP_P of STMT.
2457 Return true if the operand will become an address when STMT
2458 is expanded and record the associated address use if so. */
2461 find_address_like_use (struct ivopts_data
*data
, gimple
*stmt
, tree
*op_p
,
2464 /* Fail if base object of this memory reference is unknown. */
2465 if (iv
->base_object
== NULL_TREE
)
2468 tree mem_type
= NULL_TREE
;
2469 if (gcall
*call
= dyn_cast
<gcall
*> (stmt
))
2470 if (gimple_call_internal_p (call
))
2471 mem_type
= get_mem_type_for_internal_fn (call
, op_p
);
2474 iv
= alloc_iv (data
, iv
->base
, iv
->step
);
2475 record_group_use (data
, op_p
, iv
, stmt
, USE_PTR_ADDRESS
, mem_type
);
2481 /* Finds interesting uses of induction variables in the statement STMT. */
2484 find_interesting_uses_stmt (struct ivopts_data
*data
, gimple
*stmt
)
2487 tree op
, *lhs
, *rhs
;
2489 use_operand_p use_p
;
2490 enum tree_code code
;
2492 find_invariants_stmt (data
, stmt
);
2494 if (gimple_code (stmt
) == GIMPLE_COND
)
2496 find_interesting_uses_cond (data
, stmt
);
2500 if (is_gimple_assign (stmt
))
2502 lhs
= gimple_assign_lhs_ptr (stmt
);
2503 rhs
= gimple_assign_rhs1_ptr (stmt
);
2505 if (TREE_CODE (*lhs
) == SSA_NAME
)
2507 /* If the statement defines an induction variable, the uses are not
2508 interesting by themselves. */
2510 iv
= get_iv (data
, *lhs
);
2512 if (iv
&& !integer_zerop (iv
->step
))
2516 code
= gimple_assign_rhs_code (stmt
);
2517 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
2518 && (REFERENCE_CLASS_P (*rhs
)
2519 || is_gimple_val (*rhs
)))
2521 if (REFERENCE_CLASS_P (*rhs
))
2522 find_interesting_uses_address (data
, stmt
, rhs
);
2524 find_interesting_uses_op (data
, *rhs
);
2526 if (REFERENCE_CLASS_P (*lhs
))
2527 find_interesting_uses_address (data
, stmt
, lhs
);
2530 else if (TREE_CODE_CLASS (code
) == tcc_comparison
)
2532 find_interesting_uses_cond (data
, stmt
);
2536 /* TODO -- we should also handle address uses of type
2538 memory = call (whatever);
2545 if (gimple_code (stmt
) == GIMPLE_PHI
2546 && gimple_bb (stmt
) == data
->current_loop
->header
)
2548 iv
= get_iv (data
, PHI_RESULT (stmt
));
2550 if (iv
&& !integer_zerop (iv
->step
))
2554 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
2556 op
= USE_FROM_PTR (use_p
);
2558 if (TREE_CODE (op
) != SSA_NAME
)
2561 iv
= get_iv (data
, op
);
2565 if (!find_address_like_use (data
, stmt
, use_p
->use
, iv
))
2566 find_interesting_uses_op (data
, op
);
2570 /* Finds interesting uses of induction variables outside of loops
2571 on loop exit edge EXIT. */
2574 find_interesting_uses_outside (struct ivopts_data
*data
, edge exit
)
2580 for (psi
= gsi_start_phis (exit
->dest
); !gsi_end_p (psi
); gsi_next (&psi
))
2583 def
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
2584 if (!virtual_operand_p (def
))
2585 find_interesting_uses_op (data
, def
);
2589 /* Return TRUE if OFFSET is within the range of [base + offset] addressing
2590 mode for memory reference represented by USE. */
2592 static GTY (()) vec
<rtx
, va_gc
> *addr_list
;
2595 addr_offset_valid_p (struct iv_use
*use
, poly_int64 offset
)
2598 unsigned list_index
;
2599 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (use
->iv
->base
));
2600 machine_mode addr_mode
, mem_mode
= TYPE_MODE (use
->mem_type
);
2602 list_index
= (unsigned) as
* MAX_MACHINE_MODE
+ (unsigned) mem_mode
;
2603 if (list_index
>= vec_safe_length (addr_list
))
2604 vec_safe_grow_cleared (addr_list
, list_index
+ MAX_MACHINE_MODE
, true);
2606 addr
= (*addr_list
)[list_index
];
2609 addr_mode
= targetm
.addr_space
.address_mode (as
);
2610 reg
= gen_raw_REG (addr_mode
, LAST_VIRTUAL_REGISTER
+ 1);
2611 addr
= gen_rtx_fmt_ee (PLUS
, addr_mode
, reg
, NULL_RTX
);
2612 (*addr_list
)[list_index
] = addr
;
2615 addr_mode
= GET_MODE (addr
);
2617 XEXP (addr
, 1) = gen_int_mode (offset
, addr_mode
);
2618 return (memory_address_addr_space_p (mem_mode
, addr
, as
));
2621 /* Comparison function to sort group in ascending order of addr_offset. */
2624 group_compare_offset (const void *a
, const void *b
)
2626 const struct iv_use
*const *u1
= (const struct iv_use
*const *) a
;
2627 const struct iv_use
*const *u2
= (const struct iv_use
*const *) b
;
2629 return compare_sizes_for_sort ((*u1
)->addr_offset
, (*u2
)->addr_offset
);
2632 /* Check if small groups should be split. Return true if no group
2633 contains more than two uses with distinct addr_offsets. Return
2634 false otherwise. We want to split such groups because:
2636 1) Small groups don't have much benefit and may interfer with
2637 general candidate selection.
2638 2) Size for problem with only small groups is usually small and
2639 general algorithm can handle it well.
2641 TODO -- Above claim may not hold when we want to merge memory
2642 accesses with conseuctive addresses. */
2645 split_small_address_groups_p (struct ivopts_data
*data
)
2647 unsigned int i
, j
, distinct
= 1;
2649 struct iv_group
*group
;
2651 for (i
= 0; i
< data
->vgroups
.length (); i
++)
2653 group
= data
->vgroups
[i
];
2654 if (group
->vuses
.length () == 1)
2657 gcc_assert (address_p (group
->type
));
2658 if (group
->vuses
.length () == 2)
2660 if (compare_sizes_for_sort (group
->vuses
[0]->addr_offset
,
2661 group
->vuses
[1]->addr_offset
) > 0)
2662 std::swap (group
->vuses
[0], group
->vuses
[1]);
2665 group
->vuses
.qsort (group_compare_offset
);
2671 for (pre
= group
->vuses
[0], j
= 1; j
< group
->vuses
.length (); j
++)
2673 if (maybe_ne (group
->vuses
[j
]->addr_offset
, pre
->addr_offset
))
2675 pre
= group
->vuses
[j
];
2684 return (distinct
<= 2);
2687 /* For each group of address type uses, this function further groups
2688 these uses according to the maximum offset supported by target's
2689 [base + offset] addressing mode. */
2692 split_address_groups (struct ivopts_data
*data
)
2695 /* Always split group. */
2696 bool split_p
= split_small_address_groups_p (data
);
2698 for (i
= 0; i
< data
->vgroups
.length (); i
++)
2700 struct iv_group
*new_group
= NULL
;
2701 struct iv_group
*group
= data
->vgroups
[i
];
2702 struct iv_use
*use
= group
->vuses
[0];
2705 use
->group_id
= group
->id
;
2706 if (group
->vuses
.length () == 1)
2709 gcc_assert (address_p (use
->type
));
2711 for (j
= 1; j
< group
->vuses
.length ();)
2713 struct iv_use
*next
= group
->vuses
[j
];
2714 poly_int64 offset
= next
->addr_offset
- use
->addr_offset
;
2716 /* Split group if aksed to, or the offset against the first
2717 use can't fit in offset part of addressing mode. IV uses
2718 having the same offset are still kept in one group. */
2719 if (maybe_ne (offset
, 0)
2720 && (split_p
|| !addr_offset_valid_p (use
, offset
)))
2723 new_group
= record_group (data
, group
->type
);
2724 group
->vuses
.ordered_remove (j
);
2725 new_group
->vuses
.safe_push (next
);
2730 next
->group_id
= group
->id
;
2736 /* Finds uses of the induction variables that are interesting. */
2739 find_interesting_uses (struct ivopts_data
*data
)
2742 gimple_stmt_iterator bsi
;
2743 basic_block
*body
= get_loop_body (data
->current_loop
);
2747 for (i
= 0; i
< data
->current_loop
->num_nodes
; i
++)
2752 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2753 if (e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2754 && !flow_bb_inside_loop_p (data
->current_loop
, e
->dest
))
2755 find_interesting_uses_outside (data
, e
);
2757 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
2758 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
2759 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
2760 if (!is_gimple_debug (gsi_stmt (bsi
)))
2761 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
2765 split_address_groups (data
);
2767 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2769 fprintf (dump_file
, "\n<IV Groups>:\n");
2770 dump_groups (dump_file
, data
);
2771 fprintf (dump_file
, "\n");
2775 /* Strips constant offsets from EXPR and stores them to OFFSET. If INSIDE_ADDR
2776 is true, assume we are inside an address. If TOP_COMPREF is true, assume
2777 we are at the top-level of the processed address. */
2780 strip_offset_1 (tree expr
, bool inside_addr
, bool top_compref
,
2783 tree op0
= NULL_TREE
, op1
= NULL_TREE
, tmp
, step
;
2784 enum tree_code code
;
2785 tree type
, orig_type
= TREE_TYPE (expr
);
2786 poly_int64 off0
, off1
;
2788 tree orig_expr
= expr
;
2792 type
= TREE_TYPE (expr
);
2793 code
= TREE_CODE (expr
);
2798 case POINTER_PLUS_EXPR
:
2801 op0
= TREE_OPERAND (expr
, 0);
2802 op1
= TREE_OPERAND (expr
, 1);
2804 op0
= strip_offset_1 (op0
, false, false, &off0
);
2805 op1
= strip_offset_1 (op1
, false, false, &off1
);
2807 *offset
= (code
== MINUS_EXPR
? off0
- off1
: off0
+ off1
);
2808 if (op0
== TREE_OPERAND (expr
, 0)
2809 && op1
== TREE_OPERAND (expr
, 1))
2812 if (integer_zerop (op1
))
2814 else if (integer_zerop (op0
))
2816 if (code
== MINUS_EXPR
)
2817 expr
= fold_build1 (NEGATE_EXPR
, type
, op1
);
2822 expr
= fold_build2 (code
, type
, op0
, op1
);
2824 return fold_convert (orig_type
, expr
);
2827 op1
= TREE_OPERAND (expr
, 1);
2828 if (!cst_and_fits_in_hwi (op1
))
2831 op0
= TREE_OPERAND (expr
, 0);
2832 op0
= strip_offset_1 (op0
, false, false, &off0
);
2833 if (op0
== TREE_OPERAND (expr
, 0))
2836 *offset
= off0
* int_cst_value (op1
);
2837 if (integer_zerop (op0
))
2840 expr
= fold_build2 (MULT_EXPR
, type
, op0
, op1
);
2842 return fold_convert (orig_type
, expr
);
2845 case ARRAY_RANGE_REF
:
2849 step
= array_ref_element_size (expr
);
2850 if (!cst_and_fits_in_hwi (step
))
2853 st
= int_cst_value (step
);
2854 op1
= TREE_OPERAND (expr
, 1);
2855 op1
= strip_offset_1 (op1
, false, false, &off1
);
2856 *offset
= off1
* st
;
2859 && integer_zerop (op1
))
2861 /* Strip the component reference completely. */
2862 op0
= TREE_OPERAND (expr
, 0);
2863 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2876 tmp
= component_ref_field_offset (expr
);
2877 field
= TREE_OPERAND (expr
, 1);
2879 && cst_and_fits_in_hwi (tmp
)
2880 && cst_and_fits_in_hwi (DECL_FIELD_BIT_OFFSET (field
)))
2882 HOST_WIDE_INT boffset
, abs_off
;
2884 /* Strip the component reference completely. */
2885 op0
= TREE_OPERAND (expr
, 0);
2886 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2887 boffset
= int_cst_value (DECL_FIELD_BIT_OFFSET (field
));
2888 abs_off
= abs_hwi (boffset
) / BITS_PER_UNIT
;
2892 *offset
= off0
+ int_cst_value (tmp
) + abs_off
;
2899 op0
= TREE_OPERAND (expr
, 0);
2900 op0
= strip_offset_1 (op0
, true, true, &off0
);
2903 if (op0
== TREE_OPERAND (expr
, 0))
2906 expr
= build_fold_addr_expr (op0
);
2907 return fold_convert (orig_type
, expr
);
2910 /* ??? Offset operand? */
2911 inside_addr
= false;
2915 if (ptrdiff_tree_p (expr
, offset
) && maybe_ne (*offset
, 0))
2916 return build_int_cst (orig_type
, 0);
2920 /* Default handling of expressions for that we want to recurse into
2921 the first operand. */
2922 op0
= TREE_OPERAND (expr
, 0);
2923 op0
= strip_offset_1 (op0
, inside_addr
, false, &off0
);
2926 if (op0
== TREE_OPERAND (expr
, 0)
2927 && (!op1
|| op1
== TREE_OPERAND (expr
, 1)))
2930 expr
= copy_node (expr
);
2931 TREE_OPERAND (expr
, 0) = op0
;
2933 TREE_OPERAND (expr
, 1) = op1
;
2935 /* Inside address, we might strip the top level component references,
2936 thus changing type of the expression. Handling of ADDR_EXPR
2938 expr
= fold_convert (orig_type
, expr
);
2943 /* Strips constant offsets from EXPR and stores them to OFFSET. */
2946 strip_offset (tree expr
, poly_uint64_pod
*offset
)
2949 tree core
= strip_offset_1 (expr
, false, false, &off
);
2954 /* Returns variant of TYPE that can be used as base for different uses.
2955 We return unsigned type with the same precision, which avoids problems
2959 generic_type_for (tree type
)
2961 if (POINTER_TYPE_P (type
))
2962 return unsigned_type_for (type
);
2964 if (TYPE_UNSIGNED (type
))
2967 return unsigned_type_for (type
);
2970 /* Private data for walk_tree. */
2972 struct walk_tree_data
2975 struct ivopts_data
*idata
;
2978 /* Callback function for walk_tree, it records invariants and symbol
2979 reference in *EXPR_P. DATA is the structure storing result info. */
2982 find_inv_vars_cb (tree
*expr_p
, int *ws ATTRIBUTE_UNUSED
, void *data
)
2985 struct version_info
*info
;
2986 struct walk_tree_data
*wdata
= (struct walk_tree_data
*) data
;
2988 if (TREE_CODE (op
) != SSA_NAME
)
2991 info
= name_info (wdata
->idata
, op
);
2992 /* Because we expand simple operations when finding IVs, loop invariant
2993 variable that isn't referred by the original loop could be used now.
2994 Record such invariant variables here. */
2997 struct ivopts_data
*idata
= wdata
->idata
;
2998 basic_block bb
= gimple_bb (SSA_NAME_DEF_STMT (op
));
3000 if (!bb
|| !flow_bb_inside_loop_p (idata
->current_loop
, bb
))
3002 tree steptype
= TREE_TYPE (op
);
3003 if (POINTER_TYPE_P (steptype
))
3004 steptype
= sizetype
;
3005 set_iv (idata
, op
, op
, build_int_cst (steptype
, 0), true);
3006 record_invariant (idata
, op
, false);
3009 if (!info
->inv_id
|| info
->has_nonlin_use
)
3012 if (!*wdata
->inv_vars
)
3013 *wdata
->inv_vars
= BITMAP_ALLOC (NULL
);
3014 bitmap_set_bit (*wdata
->inv_vars
, info
->inv_id
);
3019 /* Records invariants in *EXPR_P. INV_VARS is the bitmap to that we should
3023 find_inv_vars (struct ivopts_data
*data
, tree
*expr_p
, bitmap
*inv_vars
)
3025 struct walk_tree_data wdata
;
3031 wdata
.inv_vars
= inv_vars
;
3032 walk_tree (expr_p
, find_inv_vars_cb
, &wdata
, NULL
);
3035 /* Get entry from invariant expr hash table for INV_EXPR. New entry
3036 will be recorded if it doesn't exist yet. Given below two exprs:
3037 inv_expr + cst1, inv_expr + cst2
3038 It's hard to make decision whether constant part should be stripped
3039 or not. We choose to not strip based on below facts:
3040 1) We need to count ADD cost for constant part if it's stripped,
3041 which isn't always trivial where this functions is called.
3042 2) Stripping constant away may be conflict with following loop
3043 invariant hoisting pass.
3044 3) Not stripping constant away results in more invariant exprs,
3045 which usually leads to decision preferring lower reg pressure. */
3047 static iv_inv_expr_ent
*
3048 get_loop_invariant_expr (struct ivopts_data
*data
, tree inv_expr
)
3050 STRIP_NOPS (inv_expr
);
3052 if (poly_int_tree_p (inv_expr
)
3053 || TREE_CODE (inv_expr
) == SSA_NAME
)
3056 /* Don't strip constant part away as we used to. */
3058 /* Stores EXPR in DATA->inv_expr_tab, return pointer to iv_inv_expr_ent. */
3059 struct iv_inv_expr_ent ent
;
3060 ent
.expr
= inv_expr
;
3061 ent
.hash
= iterative_hash_expr (inv_expr
, 0);
3062 struct iv_inv_expr_ent
**slot
= data
->inv_expr_tab
->find_slot (&ent
, INSERT
);
3066 *slot
= XNEW (struct iv_inv_expr_ent
);
3067 (*slot
)->expr
= inv_expr
;
3068 (*slot
)->hash
= ent
.hash
;
3069 (*slot
)->id
= ++data
->max_inv_expr_id
;
3075 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
3076 position to POS. If USE is not NULL, the candidate is set as related to
3077 it. If both BASE and STEP are NULL, we add a pseudocandidate for the
3078 replacement of the final value of the iv by a direct computation. */
3080 static struct iv_cand
*
3081 add_candidate_1 (struct ivopts_data
*data
, tree base
, tree step
, bool important
,
3082 enum iv_position pos
, struct iv_use
*use
,
3083 gimple
*incremented_at
, struct iv
*orig_iv
= NULL
,
3084 bool doloop
= false)
3087 struct iv_cand
*cand
= NULL
;
3088 tree type
, orig_type
;
3090 gcc_assert (base
&& step
);
3092 /* -fkeep-gc-roots-live means that we have to keep a real pointer
3093 live, but the ivopts code may replace a real pointer with one
3094 pointing before or after the memory block that is then adjusted
3095 into the memory block during the loop. FIXME: It would likely be
3096 better to actually force the pointer live and still use ivopts;
3097 for example, it would be enough to write the pointer into memory
3098 and keep it there until after the loop. */
3099 if (flag_keep_gc_roots_live
&& POINTER_TYPE_P (TREE_TYPE (base
)))
3102 /* For non-original variables, make sure their values are computed in a type
3103 that does not invoke undefined behavior on overflows (since in general,
3104 we cannot prove that these induction variables are non-wrapping). */
3105 if (pos
!= IP_ORIGINAL
)
3107 orig_type
= TREE_TYPE (base
);
3108 type
= generic_type_for (orig_type
);
3109 if (type
!= orig_type
)
3111 base
= fold_convert (type
, base
);
3112 step
= fold_convert (type
, step
);
3116 for (i
= 0; i
< data
->vcands
.length (); i
++)
3118 cand
= data
->vcands
[i
];
3120 if (cand
->pos
!= pos
)
3123 if (cand
->incremented_at
!= incremented_at
3124 || ((pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
3125 && cand
->ainc_use
!= use
))
3128 if (operand_equal_p (base
, cand
->iv
->base
, 0)
3129 && operand_equal_p (step
, cand
->iv
->step
, 0)
3130 && (TYPE_PRECISION (TREE_TYPE (base
))
3131 == TYPE_PRECISION (TREE_TYPE (cand
->iv
->base
))))
3135 if (i
== data
->vcands
.length ())
3137 cand
= XCNEW (struct iv_cand
);
3139 cand
->iv
= alloc_iv (data
, base
, step
);
3141 if (pos
!= IP_ORIGINAL
)
3144 cand
->var_before
= create_tmp_var_raw (TREE_TYPE (base
), "doloop");
3146 cand
->var_before
= create_tmp_var_raw (TREE_TYPE (base
), "ivtmp");
3147 cand
->var_after
= cand
->var_before
;
3149 cand
->important
= important
;
3150 cand
->incremented_at
= incremented_at
;
3151 cand
->doloop_p
= doloop
;
3152 data
->vcands
.safe_push (cand
);
3154 if (!poly_int_tree_p (step
))
3156 find_inv_vars (data
, &step
, &cand
->inv_vars
);
3158 iv_inv_expr_ent
*inv_expr
= get_loop_invariant_expr (data
, step
);
3159 /* Share bitmap between inv_vars and inv_exprs for cand. */
3160 if (inv_expr
!= NULL
)
3162 cand
->inv_exprs
= cand
->inv_vars
;
3163 cand
->inv_vars
= NULL
;
3164 if (cand
->inv_exprs
)
3165 bitmap_clear (cand
->inv_exprs
);
3167 cand
->inv_exprs
= BITMAP_ALLOC (NULL
);
3169 bitmap_set_bit (cand
->inv_exprs
, inv_expr
->id
);
3173 if (pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
3174 cand
->ainc_use
= use
;
3176 cand
->ainc_use
= NULL
;
3178 cand
->orig_iv
= orig_iv
;
3179 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3180 dump_cand (dump_file
, cand
);
3183 cand
->important
|= important
;
3184 cand
->doloop_p
|= doloop
;
3186 /* Relate candidate to the group for which it is added. */
3188 bitmap_set_bit (data
->vgroups
[use
->group_id
]->related_cands
, i
);
3193 /* Returns true if incrementing the induction variable at the end of the LOOP
3196 The purpose is to avoid splitting latch edge with a biv increment, thus
3197 creating a jump, possibly confusing other optimization passes and leaving
3198 less freedom to scheduler. So we allow IP_END only if IP_NORMAL is not
3199 available (so we do not have a better alternative), or if the latch edge
3200 is already nonempty. */
3203 allow_ip_end_pos_p (class loop
*loop
)
3205 if (!ip_normal_pos (loop
))
3208 if (!empty_block_p (ip_end_pos (loop
)))
3214 /* If possible, adds autoincrement candidates BASE + STEP * i based on use USE.
3215 Important field is set to IMPORTANT. */
3218 add_autoinc_candidates (struct ivopts_data
*data
, tree base
, tree step
,
3219 bool important
, struct iv_use
*use
)
3221 basic_block use_bb
= gimple_bb (use
->stmt
);
3222 machine_mode mem_mode
;
3223 unsigned HOST_WIDE_INT cstepi
;
3225 /* If we insert the increment in any position other than the standard
3226 ones, we must ensure that it is incremented once per iteration.
3227 It must not be in an inner nested loop, or one side of an if
3229 if (use_bb
->loop_father
!= data
->current_loop
3230 || !dominated_by_p (CDI_DOMINATORS
, data
->current_loop
->latch
, use_bb
)
3231 || stmt_can_throw_internal (cfun
, use
->stmt
)
3232 || !cst_and_fits_in_hwi (step
))
3235 cstepi
= int_cst_value (step
);
3237 mem_mode
= TYPE_MODE (use
->mem_type
);
3238 if (((USE_LOAD_PRE_INCREMENT (mem_mode
)
3239 || USE_STORE_PRE_INCREMENT (mem_mode
))
3240 && known_eq (GET_MODE_SIZE (mem_mode
), cstepi
))
3241 || ((USE_LOAD_PRE_DECREMENT (mem_mode
)
3242 || USE_STORE_PRE_DECREMENT (mem_mode
))
3243 && known_eq (GET_MODE_SIZE (mem_mode
), -cstepi
)))
3245 enum tree_code code
= MINUS_EXPR
;
3247 tree new_step
= step
;
3249 if (POINTER_TYPE_P (TREE_TYPE (base
)))
3251 new_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (step
), step
);
3252 code
= POINTER_PLUS_EXPR
;
3255 new_step
= fold_convert (TREE_TYPE (base
), new_step
);
3256 new_base
= fold_build2 (code
, TREE_TYPE (base
), base
, new_step
);
3257 add_candidate_1 (data
, new_base
, step
, important
, IP_BEFORE_USE
, use
,
3260 if (((USE_LOAD_POST_INCREMENT (mem_mode
)
3261 || USE_STORE_POST_INCREMENT (mem_mode
))
3262 && known_eq (GET_MODE_SIZE (mem_mode
), cstepi
))
3263 || ((USE_LOAD_POST_DECREMENT (mem_mode
)
3264 || USE_STORE_POST_DECREMENT (mem_mode
))
3265 && known_eq (GET_MODE_SIZE (mem_mode
), -cstepi
)))
3267 add_candidate_1 (data
, base
, step
, important
, IP_AFTER_USE
, use
,
3272 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
3273 position to POS. If USE is not NULL, the candidate is set as related to
3274 it. The candidate computation is scheduled before exit condition and at
3278 add_candidate (struct ivopts_data
*data
, tree base
, tree step
, bool important
,
3279 struct iv_use
*use
, struct iv
*orig_iv
= NULL
,
3280 bool doloop
= false)
3282 if (ip_normal_pos (data
->current_loop
))
3283 add_candidate_1 (data
, base
, step
, important
, IP_NORMAL
, use
, NULL
, orig_iv
,
3285 /* Exclude doloop candidate here since it requires decrement then comparison
3286 and jump, the IP_END position doesn't match. */
3287 if (!doloop
&& ip_end_pos (data
->current_loop
)
3288 && allow_ip_end_pos_p (data
->current_loop
))
3289 add_candidate_1 (data
, base
, step
, important
, IP_END
, use
, NULL
, orig_iv
);
3292 /* Adds standard iv candidates. */
3295 add_standard_iv_candidates (struct ivopts_data
*data
)
3297 add_candidate (data
, integer_zero_node
, integer_one_node
, true, NULL
);
3299 /* The same for a double-integer type if it is still fast enough. */
3301 (long_integer_type_node
) > TYPE_PRECISION (integer_type_node
)
3302 && TYPE_PRECISION (long_integer_type_node
) <= BITS_PER_WORD
)
3303 add_candidate (data
, build_int_cst (long_integer_type_node
, 0),
3304 build_int_cst (long_integer_type_node
, 1), true, NULL
);
3306 /* The same for a double-integer type if it is still fast enough. */
3308 (long_long_integer_type_node
) > TYPE_PRECISION (long_integer_type_node
)
3309 && TYPE_PRECISION (long_long_integer_type_node
) <= BITS_PER_WORD
)
3310 add_candidate (data
, build_int_cst (long_long_integer_type_node
, 0),
3311 build_int_cst (long_long_integer_type_node
, 1), true, NULL
);
3315 /* Adds candidates bases on the old induction variable IV. */
3318 add_iv_candidate_for_biv (struct ivopts_data
*data
, struct iv
*iv
)
3322 struct iv_cand
*cand
;
3324 /* Check if this biv is used in address type use. */
3325 if (iv
->no_overflow
&& iv
->have_address_use
3326 && INTEGRAL_TYPE_P (TREE_TYPE (iv
->base
))
3327 && TYPE_PRECISION (TREE_TYPE (iv
->base
)) < TYPE_PRECISION (sizetype
))
3329 tree base
= fold_convert (sizetype
, iv
->base
);
3330 tree step
= fold_convert (sizetype
, iv
->step
);
3332 /* Add iv cand of same precision as index part in TARGET_MEM_REF. */
3333 add_candidate (data
, base
, step
, true, NULL
, iv
);
3334 /* Add iv cand of the original type only if it has nonlinear use. */
3336 add_candidate (data
, iv
->base
, iv
->step
, true, NULL
);
3339 add_candidate (data
, iv
->base
, iv
->step
, true, NULL
);
3341 /* The same, but with initial value zero. */
3342 if (POINTER_TYPE_P (TREE_TYPE (iv
->base
)))
3343 add_candidate (data
, size_int (0), iv
->step
, true, NULL
);
3345 add_candidate (data
, build_int_cst (TREE_TYPE (iv
->base
), 0),
3346 iv
->step
, true, NULL
);
3348 phi
= SSA_NAME_DEF_STMT (iv
->ssa_name
);
3349 if (gimple_code (phi
) == GIMPLE_PHI
)
3351 /* Additionally record the possibility of leaving the original iv
3353 def
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (data
->current_loop
));
3354 /* Don't add candidate if it's from another PHI node because
3355 it's an affine iv appearing in the form of PEELED_CHREC. */
3356 phi
= SSA_NAME_DEF_STMT (def
);
3357 if (gimple_code (phi
) != GIMPLE_PHI
)
3359 cand
= add_candidate_1 (data
,
3360 iv
->base
, iv
->step
, true, IP_ORIGINAL
, NULL
,
3361 SSA_NAME_DEF_STMT (def
));
3364 cand
->var_before
= iv
->ssa_name
;
3365 cand
->var_after
= def
;
3369 gcc_assert (gimple_bb (phi
) == data
->current_loop
->header
);
3373 /* Adds candidates based on the old induction variables. */
3376 add_iv_candidate_for_bivs (struct ivopts_data
*data
)
3382 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
3384 iv
= ver_info (data
, i
)->iv
;
3385 if (iv
&& iv
->biv_p
&& !integer_zerop (iv
->step
))
3386 add_iv_candidate_for_biv (data
, iv
);
3390 /* Record common candidate {BASE, STEP} derived from USE in hashtable. */
3393 record_common_cand (struct ivopts_data
*data
, tree base
,
3394 tree step
, struct iv_use
*use
)
3396 class iv_common_cand ent
;
3397 class iv_common_cand
**slot
;
3401 ent
.hash
= iterative_hash_expr (base
, 0);
3402 ent
.hash
= iterative_hash_expr (step
, ent
.hash
);
3404 slot
= data
->iv_common_cand_tab
->find_slot (&ent
, INSERT
);
3407 *slot
= new iv_common_cand ();
3408 (*slot
)->base
= base
;
3409 (*slot
)->step
= step
;
3410 (*slot
)->uses
.create (8);
3411 (*slot
)->hash
= ent
.hash
;
3412 data
->iv_common_cands
.safe_push ((*slot
));
3415 gcc_assert (use
!= NULL
);
3416 (*slot
)->uses
.safe_push (use
);
3420 /* Comparison function used to sort common candidates. */
3423 common_cand_cmp (const void *p1
, const void *p2
)
3426 const class iv_common_cand
*const *const ccand1
3427 = (const class iv_common_cand
*const *)p1
;
3428 const class iv_common_cand
*const *const ccand2
3429 = (const class iv_common_cand
*const *)p2
;
3431 n1
= (*ccand1
)->uses
.length ();
3432 n2
= (*ccand2
)->uses
.length ();
3436 /* Adds IV candidates based on common candidated recorded. */
3439 add_iv_candidate_derived_from_uses (struct ivopts_data
*data
)
3442 struct iv_cand
*cand_1
, *cand_2
;
3444 data
->iv_common_cands
.qsort (common_cand_cmp
);
3445 for (i
= 0; i
< data
->iv_common_cands
.length (); i
++)
3447 class iv_common_cand
*ptr
= data
->iv_common_cands
[i
];
3449 /* Only add IV candidate if it's derived from multiple uses. */
3450 if (ptr
->uses
.length () <= 1)
3455 if (ip_normal_pos (data
->current_loop
))
3456 cand_1
= add_candidate_1 (data
, ptr
->base
, ptr
->step
,
3457 false, IP_NORMAL
, NULL
, NULL
);
3459 if (ip_end_pos (data
->current_loop
)
3460 && allow_ip_end_pos_p (data
->current_loop
))
3461 cand_2
= add_candidate_1 (data
, ptr
->base
, ptr
->step
,
3462 false, IP_END
, NULL
, NULL
);
3464 /* Bind deriving uses and the new candidates. */
3465 for (j
= 0; j
< ptr
->uses
.length (); j
++)
3467 struct iv_group
*group
= data
->vgroups
[ptr
->uses
[j
]->group_id
];
3469 bitmap_set_bit (group
->related_cands
, cand_1
->id
);
3471 bitmap_set_bit (group
->related_cands
, cand_2
->id
);
3475 /* Release data since it is useless from this point. */
3476 data
->iv_common_cand_tab
->empty ();
3477 data
->iv_common_cands
.truncate (0);
3480 /* Adds candidates based on the value of USE's iv. */
3483 add_iv_candidate_for_use (struct ivopts_data
*data
, struct iv_use
*use
)
3487 struct iv
*iv
= use
->iv
;
3488 tree basetype
= TREE_TYPE (iv
->base
);
3490 /* Don't add candidate for iv_use with non integer, pointer or non-mode
3491 precision types, instead, add candidate for the corresponding scev in
3492 unsigned type with the same precision. See PR93674 for more info. */
3493 if ((TREE_CODE (basetype
) != INTEGER_TYPE
&& !POINTER_TYPE_P (basetype
))
3494 || !type_has_mode_precision_p (basetype
))
3496 basetype
= lang_hooks
.types
.type_for_mode (TYPE_MODE (basetype
),
3497 TYPE_UNSIGNED (basetype
));
3498 add_candidate (data
, fold_convert (basetype
, iv
->base
),
3499 fold_convert (basetype
, iv
->step
), false, NULL
);
3503 add_candidate (data
, iv
->base
, iv
->step
, false, use
);
3505 /* Record common candidate for use in case it can be shared by others. */
3506 record_common_cand (data
, iv
->base
, iv
->step
, use
);
3508 /* Record common candidate with initial value zero. */
3509 basetype
= TREE_TYPE (iv
->base
);
3510 if (POINTER_TYPE_P (basetype
))
3511 basetype
= sizetype
;
3512 record_common_cand (data
, build_int_cst (basetype
, 0), iv
->step
, use
);
3514 /* Compare the cost of an address with an unscaled index with the cost of
3515 an address with a scaled index and add candidate if useful. */
3518 && poly_int_tree_p (iv
->step
, &step
)
3519 && address_p (use
->type
))
3521 poly_int64 new_step
;
3522 unsigned int fact
= preferred_mem_scale_factor
3524 TYPE_MODE (use
->mem_type
),
3525 optimize_loop_for_speed_p (data
->current_loop
));
3528 && multiple_p (step
, fact
, &new_step
))
3529 add_candidate (data
, size_int (0),
3530 wide_int_to_tree (sizetype
, new_step
),
3534 /* Record common candidate with constant offset stripped in base.
3535 Like the use itself, we also add candidate directly for it. */
3536 base
= strip_offset (iv
->base
, &offset
);
3537 if (maybe_ne (offset
, 0U) || base
!= iv
->base
)
3539 record_common_cand (data
, base
, iv
->step
, use
);
3540 add_candidate (data
, base
, iv
->step
, false, use
);
3543 /* Record common candidate with base_object removed in base. */
3546 if (iv
->base_object
!= NULL
&& TREE_CODE (base
) == POINTER_PLUS_EXPR
)
3548 tree step
= iv
->step
;
3551 base
= TREE_OPERAND (base
, 1);
3552 step
= fold_convert (sizetype
, step
);
3553 record_common_cand (data
, base
, step
, use
);
3554 /* Also record common candidate with offset stripped. */
3555 base
= strip_offset (base
, &offset
);
3556 if (maybe_ne (offset
, 0U))
3557 record_common_cand (data
, base
, step
, use
);
3560 /* At last, add auto-incremental candidates. Make such variables
3561 important since other iv uses with same base object may be based
3563 if (use
!= NULL
&& address_p (use
->type
))
3564 add_autoinc_candidates (data
, iv
->base
, iv
->step
, true, use
);
3567 /* Adds candidates based on the uses. */
3570 add_iv_candidate_for_groups (struct ivopts_data
*data
)
3574 /* Only add candidate for the first use in group. */
3575 for (i
= 0; i
< data
->vgroups
.length (); i
++)
3577 struct iv_group
*group
= data
->vgroups
[i
];
3579 gcc_assert (group
->vuses
[0] != NULL
);
3580 add_iv_candidate_for_use (data
, group
->vuses
[0]);
3582 add_iv_candidate_derived_from_uses (data
);
3585 /* Record important candidates and add them to related_cands bitmaps. */
3588 record_important_candidates (struct ivopts_data
*data
)
3591 struct iv_group
*group
;
3593 for (i
= 0; i
< data
->vcands
.length (); i
++)
3595 struct iv_cand
*cand
= data
->vcands
[i
];
3597 if (cand
->important
)
3598 bitmap_set_bit (data
->important_candidates
, i
);
3601 data
->consider_all_candidates
= (data
->vcands
.length ()
3602 <= CONSIDER_ALL_CANDIDATES_BOUND
);
3604 /* Add important candidates to groups' related_cands bitmaps. */
3605 for (i
= 0; i
< data
->vgroups
.length (); i
++)
3607 group
= data
->vgroups
[i
];
3608 bitmap_ior_into (group
->related_cands
, data
->important_candidates
);
3612 /* Allocates the data structure mapping the (use, candidate) pairs to costs.
3613 If consider_all_candidates is true, we use a two-dimensional array, otherwise
3614 we allocate a simple list to every use. */
3617 alloc_use_cost_map (struct ivopts_data
*data
)
3619 unsigned i
, size
, s
;
3621 for (i
= 0; i
< data
->vgroups
.length (); i
++)
3623 struct iv_group
*group
= data
->vgroups
[i
];
3625 if (data
->consider_all_candidates
)
3626 size
= data
->vcands
.length ();
3629 s
= bitmap_count_bits (group
->related_cands
);
3631 /* Round up to the power of two, so that moduling by it is fast. */
3632 size
= s
? (1 << ceil_log2 (s
)) : 1;
3635 group
->n_map_members
= size
;
3636 group
->cost_map
= XCNEWVEC (class cost_pair
, size
);
3640 /* Sets cost of (GROUP, CAND) pair to COST and record that it depends
3641 on invariants INV_VARS and that the value used in expressing it is
3642 VALUE, and in case of iv elimination the comparison operator is COMP. */
3645 set_group_iv_cost (struct ivopts_data
*data
,
3646 struct iv_group
*group
, struct iv_cand
*cand
,
3647 comp_cost cost
, bitmap inv_vars
, tree value
,
3648 enum tree_code comp
, bitmap inv_exprs
)
3652 if (cost
.infinite_cost_p ())
3654 BITMAP_FREE (inv_vars
);
3655 BITMAP_FREE (inv_exprs
);
3659 if (data
->consider_all_candidates
)
3661 group
->cost_map
[cand
->id
].cand
= cand
;
3662 group
->cost_map
[cand
->id
].cost
= cost
;
3663 group
->cost_map
[cand
->id
].inv_vars
= inv_vars
;
3664 group
->cost_map
[cand
->id
].inv_exprs
= inv_exprs
;
3665 group
->cost_map
[cand
->id
].value
= value
;
3666 group
->cost_map
[cand
->id
].comp
= comp
;
3670 /* n_map_members is a power of two, so this computes modulo. */
3671 s
= cand
->id
& (group
->n_map_members
- 1);
3672 for (i
= s
; i
< group
->n_map_members
; i
++)
3673 if (!group
->cost_map
[i
].cand
)
3675 for (i
= 0; i
< s
; i
++)
3676 if (!group
->cost_map
[i
].cand
)
3682 group
->cost_map
[i
].cand
= cand
;
3683 group
->cost_map
[i
].cost
= cost
;
3684 group
->cost_map
[i
].inv_vars
= inv_vars
;
3685 group
->cost_map
[i
].inv_exprs
= inv_exprs
;
3686 group
->cost_map
[i
].value
= value
;
3687 group
->cost_map
[i
].comp
= comp
;
3690 /* Gets cost of (GROUP, CAND) pair. */
3692 static class cost_pair
*
3693 get_group_iv_cost (struct ivopts_data
*data
, struct iv_group
*group
,
3694 struct iv_cand
*cand
)
3697 class cost_pair
*ret
;
3702 if (data
->consider_all_candidates
)
3704 ret
= group
->cost_map
+ cand
->id
;
3711 /* n_map_members is a power of two, so this computes modulo. */
3712 s
= cand
->id
& (group
->n_map_members
- 1);
3713 for (i
= s
; i
< group
->n_map_members
; i
++)
3714 if (group
->cost_map
[i
].cand
== cand
)
3715 return group
->cost_map
+ i
;
3716 else if (group
->cost_map
[i
].cand
== NULL
)
3718 for (i
= 0; i
< s
; i
++)
3719 if (group
->cost_map
[i
].cand
== cand
)
3720 return group
->cost_map
+ i
;
3721 else if (group
->cost_map
[i
].cand
== NULL
)
3727 /* Produce DECL_RTL for object obj so it looks like it is stored in memory. */
3729 produce_memory_decl_rtl (tree obj
, int *regno
)
3731 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (obj
));
3732 machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3736 if (TREE_STATIC (obj
) || DECL_EXTERNAL (obj
))
3738 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (obj
));
3739 x
= gen_rtx_SYMBOL_REF (address_mode
, name
);
3740 SET_SYMBOL_REF_DECL (x
, obj
);
3741 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
3742 set_mem_addr_space (x
, as
);
3743 targetm
.encode_section_info (obj
, x
, true);
3747 x
= gen_raw_REG (address_mode
, (*regno
)++);
3748 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
3749 set_mem_addr_space (x
, as
);
3755 /* Prepares decl_rtl for variables referred in *EXPR_P. Callback for
3756 walk_tree. DATA contains the actual fake register number. */
3759 prepare_decl_rtl (tree
*expr_p
, int *ws
, void *data
)
3761 tree obj
= NULL_TREE
;
3763 int *regno
= (int *) data
;
3765 switch (TREE_CODE (*expr_p
))
3768 for (expr_p
= &TREE_OPERAND (*expr_p
, 0);
3769 handled_component_p (*expr_p
);
3770 expr_p
= &TREE_OPERAND (*expr_p
, 0))
3773 if (DECL_P (obj
) && HAS_RTL_P (obj
) && !DECL_RTL_SET_P (obj
))
3774 x
= produce_memory_decl_rtl (obj
, regno
);
3779 obj
= SSA_NAME_VAR (*expr_p
);
3780 /* Defer handling of anonymous SSA_NAMEs to the expander. */
3783 if (!DECL_RTL_SET_P (obj
))
3784 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
3793 if (DECL_RTL_SET_P (obj
))
3796 if (DECL_MODE (obj
) == BLKmode
)
3797 x
= produce_memory_decl_rtl (obj
, regno
);
3799 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
3809 decl_rtl_to_reset
.safe_push (obj
);
3810 SET_DECL_RTL (obj
, x
);
3816 /* Predict whether the given loop will be transformed in the RTL
3817 doloop_optimize pass. Attempt to duplicate some doloop_optimize checks.
3818 This is only for target independent checks, see targetm.predict_doloop_p
3819 for the target dependent ones.
3821 Note that according to some initial investigation, some checks like costly
3822 niter check and invalid stmt scanning don't have much gains among general
3823 cases, so keep this as simple as possible first.
3825 Some RTL specific checks seems unable to be checked in gimple, if any new
3826 checks or easy checks _are_ missing here, please add them. */
3829 generic_predict_doloop_p (struct ivopts_data
*data
)
3831 class loop
*loop
= data
->current_loop
;
3833 /* Call target hook for target dependent checks. */
3834 if (!targetm
.predict_doloop_p (loop
))
3836 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3837 fprintf (dump_file
, "Predict doloop failure due to"
3838 " target specific checks.\n");
3842 /* Similar to doloop_optimize, check iteration description to know it's
3843 suitable or not. Keep it as simple as possible, feel free to extend it
3844 if you find any multiple exits cases matter. */
3845 edge exit
= single_dom_exit (loop
);
3846 class tree_niter_desc
*niter_desc
;
3847 if (!exit
|| !(niter_desc
= niter_for_exit (data
, exit
)))
3849 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3850 fprintf (dump_file
, "Predict doloop failure due to"
3851 " unexpected niters.\n");
3855 /* Similar to doloop_optimize, check whether iteration count too small
3856 and not profitable. */
3857 HOST_WIDE_INT est_niter
= get_estimated_loop_iterations_int (loop
);
3858 if (est_niter
== -1)
3859 est_niter
= get_likely_max_loop_iterations_int (loop
);
3860 if (est_niter
>= 0 && est_niter
< 3)
3862 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3864 "Predict doloop failure due to"
3865 " too few iterations (%u).\n",
3866 (unsigned int) est_niter
);
3873 /* Determines cost of the computation of EXPR. */
3876 computation_cost (tree expr
, bool speed
)
3880 tree type
= TREE_TYPE (expr
);
3882 /* Avoid using hard regs in ways which may be unsupported. */
3883 int regno
= LAST_VIRTUAL_REGISTER
+ 1;
3884 struct cgraph_node
*node
= cgraph_node::get (current_function_decl
);
3885 enum node_frequency real_frequency
= node
->frequency
;
3887 node
->frequency
= NODE_FREQUENCY_NORMAL
;
3888 crtl
->maybe_hot_insn_p
= speed
;
3889 walk_tree (&expr
, prepare_decl_rtl
, ®no
, NULL
);
3891 rslt
= expand_expr (expr
, NULL_RTX
, TYPE_MODE (type
), EXPAND_NORMAL
);
3894 default_rtl_profile ();
3895 node
->frequency
= real_frequency
;
3897 cost
= seq_cost (seq
, speed
);
3899 cost
+= address_cost (XEXP (rslt
, 0), TYPE_MODE (type
),
3900 TYPE_ADDR_SPACE (type
), speed
);
3901 else if (!REG_P (rslt
))
3902 cost
+= set_src_cost (rslt
, TYPE_MODE (type
), speed
);
3907 /* Returns variable containing the value of candidate CAND at statement AT. */
3910 var_at_stmt (class loop
*loop
, struct iv_cand
*cand
, gimple
*stmt
)
3912 if (stmt_after_increment (loop
, cand
, stmt
))
3913 return cand
->var_after
;
3915 return cand
->var_before
;
3918 /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
3919 same precision that is at least as wide as the precision of TYPE, stores
3920 BA to A and BB to B, and returns the type of BA. Otherwise, returns the
3924 determine_common_wider_type (tree
*a
, tree
*b
)
3926 tree wider_type
= NULL
;
3928 tree atype
= TREE_TYPE (*a
);
3930 if (CONVERT_EXPR_P (*a
))
3932 suba
= TREE_OPERAND (*a
, 0);
3933 wider_type
= TREE_TYPE (suba
);
3934 if (TYPE_PRECISION (wider_type
) < TYPE_PRECISION (atype
))
3940 if (CONVERT_EXPR_P (*b
))
3942 subb
= TREE_OPERAND (*b
, 0);
3943 if (TYPE_PRECISION (wider_type
) != TYPE_PRECISION (TREE_TYPE (subb
)))
3954 /* Determines the expression by that USE is expressed from induction variable
3955 CAND at statement AT in LOOP. The expression is stored in two parts in a
3956 decomposed form. The invariant part is stored in AFF_INV; while variant
3957 part in AFF_VAR. Store ratio of CAND.step over USE.step in PRAT if it's
3958 non-null. Returns false if USE cannot be expressed using CAND. */
3961 get_computation_aff_1 (class loop
*loop
, gimple
*at
, struct iv_use
*use
,
3962 struct iv_cand
*cand
, class aff_tree
*aff_inv
,
3963 class aff_tree
*aff_var
, widest_int
*prat
= NULL
)
3965 tree ubase
= use
->iv
->base
, ustep
= use
->iv
->step
;
3966 tree cbase
= cand
->iv
->base
, cstep
= cand
->iv
->step
;
3967 tree common_type
, uutype
, var
, cstep_common
;
3968 tree utype
= TREE_TYPE (ubase
), ctype
= TREE_TYPE (cbase
);
3972 /* We must have a precision to express the values of use. */
3973 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
3976 var
= var_at_stmt (loop
, cand
, at
);
3977 uutype
= unsigned_type_for (utype
);
3979 /* If the conversion is not noop, perform it. */
3980 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
3982 if (cand
->orig_iv
!= NULL
&& CONVERT_EXPR_P (cbase
)
3983 && (CONVERT_EXPR_P (cstep
) || poly_int_tree_p (cstep
)))
3985 tree inner_base
, inner_step
, inner_type
;
3986 inner_base
= TREE_OPERAND (cbase
, 0);
3987 if (CONVERT_EXPR_P (cstep
))
3988 inner_step
= TREE_OPERAND (cstep
, 0);
3992 inner_type
= TREE_TYPE (inner_base
);
3993 /* If candidate is added from a biv whose type is smaller than
3994 ctype, we know both candidate and the biv won't overflow.
3995 In this case, it's safe to skip the convertion in candidate.
3996 As an example, (unsigned short)((unsigned long)A) equals to
3997 (unsigned short)A, if A has a type no larger than short. */
3998 if (TYPE_PRECISION (inner_type
) <= TYPE_PRECISION (uutype
))
4004 cbase
= fold_convert (uutype
, cbase
);
4005 cstep
= fold_convert (uutype
, cstep
);
4006 var
= fold_convert (uutype
, var
);
4009 /* Ratio is 1 when computing the value of biv cand by itself.
4010 We can't rely on constant_multiple_of in this case because the
4011 use is created after the original biv is selected. The call
4012 could fail because of inconsistent fold behavior. See PR68021
4013 for more information. */
4014 if (cand
->pos
== IP_ORIGINAL
&& cand
->incremented_at
== use
->stmt
)
4016 gcc_assert (is_gimple_assign (use
->stmt
));
4017 gcc_assert (use
->iv
->ssa_name
== cand
->var_after
);
4018 gcc_assert (gimple_assign_lhs (use
->stmt
) == cand
->var_after
);
4021 else if (!constant_multiple_of (ustep
, cstep
, &rat
))
4027 /* In case both UBASE and CBASE are shortened to UUTYPE from some common
4028 type, we achieve better folding by computing their difference in this
4029 wider type, and cast the result to UUTYPE. We do not need to worry about
4030 overflows, as all the arithmetics will in the end be performed in UUTYPE
4032 common_type
= determine_common_wider_type (&ubase
, &cbase
);
4034 /* use = ubase - ratio * cbase + ratio * var. */
4035 tree_to_aff_combination (ubase
, common_type
, aff_inv
);
4036 tree_to_aff_combination (cbase
, common_type
, &aff_cbase
);
4037 tree_to_aff_combination (var
, uutype
, aff_var
);
4039 /* We need to shift the value if we are after the increment. */
4040 if (stmt_after_increment (loop
, cand
, at
))
4044 if (common_type
!= uutype
)
4045 cstep_common
= fold_convert (common_type
, cstep
);
4047 cstep_common
= cstep
;
4049 tree_to_aff_combination (cstep_common
, common_type
, &cstep_aff
);
4050 aff_combination_add (&aff_cbase
, &cstep_aff
);
4053 aff_combination_scale (&aff_cbase
, -rat
);
4054 aff_combination_add (aff_inv
, &aff_cbase
);
4055 if (common_type
!= uutype
)
4056 aff_combination_convert (aff_inv
, uutype
);
4058 aff_combination_scale (aff_var
, rat
);
4062 /* Determines the expression by that USE is expressed from induction variable
4063 CAND at statement AT in LOOP. The expression is stored in a decomposed
4064 form into AFF. Returns false if USE cannot be expressed using CAND. */
4067 get_computation_aff (class loop
*loop
, gimple
*at
, struct iv_use
*use
,
4068 struct iv_cand
*cand
, class aff_tree
*aff
)
4072 if (!get_computation_aff_1 (loop
, at
, use
, cand
, aff
, &aff_var
))
4075 aff_combination_add (aff
, &aff_var
);
4079 /* Return the type of USE. */
4082 get_use_type (struct iv_use
*use
)
4084 tree base_type
= TREE_TYPE (use
->iv
->base
);
4087 if (use
->type
== USE_REF_ADDRESS
)
4089 /* The base_type may be a void pointer. Create a pointer type based on
4090 the mem_ref instead. */
4091 type
= build_pointer_type (TREE_TYPE (*use
->op_p
));
4092 gcc_assert (TYPE_ADDR_SPACE (TREE_TYPE (type
))
4093 == TYPE_ADDR_SPACE (TREE_TYPE (base_type
)));
4101 /* Determines the expression by that USE is expressed from induction variable
4102 CAND at statement AT in LOOP. The computation is unshared. */
4105 get_computation_at (class loop
*loop
, gimple
*at
,
4106 struct iv_use
*use
, struct iv_cand
*cand
)
4109 tree type
= get_use_type (use
);
4111 if (!get_computation_aff (loop
, at
, use
, cand
, &aff
))
4113 unshare_aff_combination (&aff
);
4114 return fold_convert (type
, aff_combination_to_tree (&aff
));
4117 /* Like get_computation_at, but try harder, even if the computation
4118 is more expensive. Intended for debug stmts. */
4121 get_debug_computation_at (class loop
*loop
, gimple
*at
,
4122 struct iv_use
*use
, struct iv_cand
*cand
)
4124 if (tree ret
= get_computation_at (loop
, at
, use
, cand
))
4127 tree ubase
= use
->iv
->base
, ustep
= use
->iv
->step
;
4128 tree cbase
= cand
->iv
->base
, cstep
= cand
->iv
->step
;
4130 tree utype
= TREE_TYPE (ubase
), ctype
= TREE_TYPE (cbase
);
4133 /* We must have a precision to express the values of use. */
4134 if (TYPE_PRECISION (utype
) >= TYPE_PRECISION (ctype
))
4137 /* Try to handle the case that get_computation_at doesn't,
4139 use = ubase + (var - cbase) / ratio. */
4140 if (!constant_multiple_of (cstep
, fold_convert (TREE_TYPE (cstep
), ustep
),
4145 if (wi::neg_p (rat
))
4147 if (TYPE_UNSIGNED (ctype
))
4150 rat
= wi::neg (rat
);
4153 /* If both IVs can wrap around and CAND doesn't have a power of two step,
4154 it is unsafe. Consider uint16_t CAND with step 9, when wrapping around,
4155 the values will be ... 0xfff0, 0xfff9, 2, 11 ... and when use is say
4156 uint8_t with step 3, those values divided by 3 cast to uint8_t will be
4157 ... 0x50, 0x53, 0, 3 ... rather than expected 0x50, 0x53, 0x56, 0x59. */
4158 if (!use
->iv
->no_overflow
4159 && !cand
->iv
->no_overflow
4160 && !integer_pow2p (cstep
))
4163 int bits
= wi::exact_log2 (rat
);
4165 bits
= wi::floor_log2 (rat
) + 1;
4166 if (!cand
->iv
->no_overflow
4167 && TYPE_PRECISION (utype
) + bits
> TYPE_PRECISION (ctype
))
4170 var
= var_at_stmt (loop
, cand
, at
);
4172 if (POINTER_TYPE_P (ctype
))
4174 ctype
= unsigned_type_for (ctype
);
4175 cbase
= fold_convert (ctype
, cbase
);
4176 cstep
= fold_convert (ctype
, cstep
);
4177 var
= fold_convert (ctype
, var
);
4180 if (stmt_after_increment (loop
, cand
, at
))
4181 var
= fold_build2 (MINUS_EXPR
, TREE_TYPE (var
), var
,
4182 unshare_expr (cstep
));
4184 var
= fold_build2 (MINUS_EXPR
, TREE_TYPE (var
), var
, cbase
);
4185 var
= fold_build2 (EXACT_DIV_EXPR
, TREE_TYPE (var
), var
,
4186 wide_int_to_tree (TREE_TYPE (var
), rat
));
4187 if (POINTER_TYPE_P (utype
))
4189 var
= fold_convert (sizetype
, var
);
4191 var
= fold_build1 (NEGATE_EXPR
, sizetype
, var
);
4192 var
= fold_build2 (POINTER_PLUS_EXPR
, utype
, ubase
, var
);
4196 var
= fold_convert (utype
, var
);
4197 var
= fold_build2 (neg_p
? MINUS_EXPR
: PLUS_EXPR
, utype
,
4203 /* Adjust the cost COST for being in loop setup rather than loop body.
4204 If we're optimizing for space, the loop setup overhead is constant;
4205 if we're optimizing for speed, amortize it over the per-iteration cost.
4206 If ROUND_UP_P is true, the result is round up rather than to zero when
4207 optimizing for speed. */
4209 adjust_setup_cost (struct ivopts_data
*data
, int64_t cost
,
4210 bool round_up_p
= false)
4214 else if (optimize_loop_for_speed_p (data
->current_loop
))
4216 int64_t niters
= (int64_t) avg_loop_niter (data
->current_loop
);
4217 return (cost
+ (round_up_p
? niters
- 1 : 0)) / niters
;
4223 /* Calculate the SPEED or size cost of shiftadd EXPR in MODE. MULT is the
4224 EXPR operand holding the shift. COST0 and COST1 are the costs for
4225 calculating the operands of EXPR. Returns true if successful, and returns
4226 the cost in COST. */
4229 get_shiftadd_cost (tree expr
, scalar_int_mode mode
, comp_cost cost0
,
4230 comp_cost cost1
, tree mult
, bool speed
, comp_cost
*cost
)
4233 tree op1
= TREE_OPERAND (expr
, 1);
4234 tree cst
= TREE_OPERAND (mult
, 1);
4235 tree multop
= TREE_OPERAND (mult
, 0);
4236 int m
= exact_log2 (int_cst_value (cst
));
4237 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
4238 int as_cost
, sa_cost
;
4241 if (!(m
>= 0 && m
< maxm
))
4245 mult_in_op1
= operand_equal_p (op1
, mult
, 0);
4247 as_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
4249 /* If the target has a cheap shift-and-add or shift-and-sub instruction,
4250 use that in preference to a shift insn followed by an add insn. */
4251 sa_cost
= (TREE_CODE (expr
) != MINUS_EXPR
4252 ? shiftadd_cost (speed
, mode
, m
)
4254 ? shiftsub1_cost (speed
, mode
, m
)
4255 : shiftsub0_cost (speed
, mode
, m
)));
4257 res
= comp_cost (MIN (as_cost
, sa_cost
), 0);
4258 res
+= (mult_in_op1
? cost0
: cost1
);
4260 STRIP_NOPS (multop
);
4261 if (!is_gimple_val (multop
))
4262 res
+= force_expr_to_var_cost (multop
, speed
);
4268 /* Estimates cost of forcing expression EXPR into a variable. */
4271 force_expr_to_var_cost (tree expr
, bool speed
)
4273 static bool costs_initialized
= false;
4274 static unsigned integer_cost
[2];
4275 static unsigned symbol_cost
[2];
4276 static unsigned address_cost
[2];
4278 comp_cost cost0
, cost1
, cost
;
4280 scalar_int_mode int_mode
;
4282 if (!costs_initialized
)
4284 tree type
= build_pointer_type (integer_type_node
);
4289 var
= create_tmp_var_raw (integer_type_node
, "test_var");
4290 TREE_STATIC (var
) = 1;
4291 x
= produce_memory_decl_rtl (var
, NULL
);
4292 SET_DECL_RTL (var
, x
);
4294 addr
= build1 (ADDR_EXPR
, type
, var
);
4297 for (i
= 0; i
< 2; i
++)
4299 integer_cost
[i
] = computation_cost (build_int_cst (integer_type_node
,
4302 symbol_cost
[i
] = computation_cost (addr
, i
) + 1;
4305 = computation_cost (fold_build_pointer_plus_hwi (addr
, 2000), i
) + 1;
4306 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4308 fprintf (dump_file
, "force_expr_to_var_cost %s costs:\n", i
? "speed" : "size");
4309 fprintf (dump_file
, " integer %d\n", (int) integer_cost
[i
]);
4310 fprintf (dump_file
, " symbol %d\n", (int) symbol_cost
[i
]);
4311 fprintf (dump_file
, " address %d\n", (int) address_cost
[i
]);
4312 fprintf (dump_file
, " other %d\n", (int) target_spill_cost
[i
]);
4313 fprintf (dump_file
, "\n");
4317 costs_initialized
= true;
4322 if (SSA_VAR_P (expr
))
4325 if (is_gimple_min_invariant (expr
))
4327 if (poly_int_tree_p (expr
))
4328 return comp_cost (integer_cost
[speed
], 0);
4330 if (TREE_CODE (expr
) == ADDR_EXPR
)
4332 tree obj
= TREE_OPERAND (expr
, 0);
4335 || TREE_CODE (obj
) == PARM_DECL
4336 || TREE_CODE (obj
) == RESULT_DECL
)
4337 return comp_cost (symbol_cost
[speed
], 0);
4340 return comp_cost (address_cost
[speed
], 0);
4343 switch (TREE_CODE (expr
))
4345 case POINTER_PLUS_EXPR
:
4349 case TRUNC_DIV_EXPR
:
4354 op0
= TREE_OPERAND (expr
, 0);
4355 op1
= TREE_OPERAND (expr
, 1);
4363 op0
= TREE_OPERAND (expr
, 0);
4367 /* See add_iv_candidate_for_doloop, for doloop may_be_zero case, we
4368 introduce COND_EXPR for IV base, need to support better cost estimation
4369 for this COND_EXPR and tcc_comparison. */
4371 op0
= TREE_OPERAND (expr
, 1);
4373 op1
= TREE_OPERAND (expr
, 2);
4382 case UNORDERED_EXPR
:
4392 op0
= TREE_OPERAND (expr
, 0);
4394 op1
= TREE_OPERAND (expr
, 1);
4399 /* Just an arbitrary value, FIXME. */
4400 return comp_cost (target_spill_cost
[speed
], 0);
4403 if (op0
== NULL_TREE
4404 || TREE_CODE (op0
) == SSA_NAME
|| CONSTANT_CLASS_P (op0
))
4407 cost0
= force_expr_to_var_cost (op0
, speed
);
4409 if (op1
== NULL_TREE
4410 || TREE_CODE (op1
) == SSA_NAME
|| CONSTANT_CLASS_P (op1
))
4413 cost1
= force_expr_to_var_cost (op1
, speed
);
4415 mode
= TYPE_MODE (TREE_TYPE (expr
));
4416 switch (TREE_CODE (expr
))
4418 case POINTER_PLUS_EXPR
:
4422 cost
= comp_cost (add_cost (speed
, mode
), 0);
4423 if (TREE_CODE (expr
) != NEGATE_EXPR
)
4425 tree mult
= NULL_TREE
;
4427 if (TREE_CODE (op1
) == MULT_EXPR
)
4429 else if (TREE_CODE (op0
) == MULT_EXPR
)
4432 if (mult
!= NULL_TREE
4433 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4434 && cst_and_fits_in_hwi (TREE_OPERAND (mult
, 1))
4435 && get_shiftadd_cost (expr
, int_mode
, cost0
, cost1
, mult
,
4443 tree inner_mode
, outer_mode
;
4444 outer_mode
= TREE_TYPE (expr
);
4445 inner_mode
= TREE_TYPE (op0
);
4446 cost
= comp_cost (convert_cost (TYPE_MODE (outer_mode
),
4447 TYPE_MODE (inner_mode
), speed
), 0);
4452 if (cst_and_fits_in_hwi (op0
))
4453 cost
= comp_cost (mult_by_coeff_cost (int_cst_value (op0
),
4455 else if (cst_and_fits_in_hwi (op1
))
4456 cost
= comp_cost (mult_by_coeff_cost (int_cst_value (op1
),
4459 return comp_cost (target_spill_cost
[speed
], 0);
4462 case TRUNC_DIV_EXPR
:
4463 /* Division by power of two is usually cheap, so we allow it. Forbid
4465 if (integer_pow2p (TREE_OPERAND (expr
, 1)))
4466 cost
= comp_cost (add_cost (speed
, mode
), 0);
4468 cost
= comp_cost (target_spill_cost
[speed
], 0);
4476 cost
= comp_cost (add_cost (speed
, mode
), 0);
4479 op0
= TREE_OPERAND (expr
, 0);
4481 if (op0
== NULL_TREE
|| TREE_CODE (op0
) == SSA_NAME
4482 || CONSTANT_CLASS_P (op0
))
4485 cost
= force_expr_to_var_cost (op0
, speed
);
4493 case UNORDERED_EXPR
:
4503 /* Simply use add cost for now, FIXME if there is some more accurate cost
4505 cost
= comp_cost (add_cost (speed
, mode
), 0);
4517 /* Estimates cost of forcing EXPR into a variable. INV_VARS is a set of the
4518 invariants the computation depends on. */
4521 force_var_cost (struct ivopts_data
*data
, tree expr
, bitmap
*inv_vars
)
4526 find_inv_vars (data
, &expr
, inv_vars
);
4527 return force_expr_to_var_cost (expr
, data
->speed
);
4530 /* Returns cost of auto-modifying address expression in shape base + offset.
4531 AINC_STEP is step size of the address IV. AINC_OFFSET is offset of the
4532 address expression. The address expression has ADDR_MODE in addr space
4533 AS. The memory access has MEM_MODE. SPEED means we are optimizing for
4538 AINC_PRE_INC
, /* Pre increment. */
4539 AINC_PRE_DEC
, /* Pre decrement. */
4540 AINC_POST_INC
, /* Post increment. */
4541 AINC_POST_DEC
, /* Post decrement. */
4542 AINC_NONE
/* Also the number of auto increment types. */
4545 struct ainc_cost_data
4547 int64_t costs
[AINC_NONE
];
4551 get_address_cost_ainc (poly_int64 ainc_step
, poly_int64 ainc_offset
,
4552 machine_mode addr_mode
, machine_mode mem_mode
,
4553 addr_space_t as
, bool speed
)
4555 if (!USE_LOAD_PRE_DECREMENT (mem_mode
)
4556 && !USE_STORE_PRE_DECREMENT (mem_mode
)
4557 && !USE_LOAD_POST_DECREMENT (mem_mode
)
4558 && !USE_STORE_POST_DECREMENT (mem_mode
)
4559 && !USE_LOAD_PRE_INCREMENT (mem_mode
)
4560 && !USE_STORE_PRE_INCREMENT (mem_mode
)
4561 && !USE_LOAD_POST_INCREMENT (mem_mode
)
4562 && !USE_STORE_POST_INCREMENT (mem_mode
))
4563 return infinite_cost
;
4565 static vec
<ainc_cost_data
*> ainc_cost_data_list
;
4566 unsigned idx
= (unsigned) as
* MAX_MACHINE_MODE
+ (unsigned) mem_mode
;
4567 if (idx
>= ainc_cost_data_list
.length ())
4569 unsigned nsize
= ((unsigned) as
+ 1) *MAX_MACHINE_MODE
;
4571 gcc_assert (nsize
> idx
);
4572 ainc_cost_data_list
.safe_grow_cleared (nsize
, true);
4575 ainc_cost_data
*data
= ainc_cost_data_list
[idx
];
4578 rtx reg
= gen_raw_REG (addr_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4580 data
= (ainc_cost_data
*) xcalloc (1, sizeof (*data
));
4581 data
->costs
[AINC_PRE_DEC
] = INFTY
;
4582 data
->costs
[AINC_POST_DEC
] = INFTY
;
4583 data
->costs
[AINC_PRE_INC
] = INFTY
;
4584 data
->costs
[AINC_POST_INC
] = INFTY
;
4585 if (USE_LOAD_PRE_DECREMENT (mem_mode
)
4586 || USE_STORE_PRE_DECREMENT (mem_mode
))
4588 rtx addr
= gen_rtx_PRE_DEC (addr_mode
, reg
);
4590 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
4591 data
->costs
[AINC_PRE_DEC
]
4592 = address_cost (addr
, mem_mode
, as
, speed
);
4594 if (USE_LOAD_POST_DECREMENT (mem_mode
)
4595 || USE_STORE_POST_DECREMENT (mem_mode
))
4597 rtx addr
= gen_rtx_POST_DEC (addr_mode
, reg
);
4599 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
4600 data
->costs
[AINC_POST_DEC
]
4601 = address_cost (addr
, mem_mode
, as
, speed
);
4603 if (USE_LOAD_PRE_INCREMENT (mem_mode
)
4604 || USE_STORE_PRE_INCREMENT (mem_mode
))
4606 rtx addr
= gen_rtx_PRE_INC (addr_mode
, reg
);
4608 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
4609 data
->costs
[AINC_PRE_INC
]
4610 = address_cost (addr
, mem_mode
, as
, speed
);
4612 if (USE_LOAD_POST_INCREMENT (mem_mode
)
4613 || USE_STORE_POST_INCREMENT (mem_mode
))
4615 rtx addr
= gen_rtx_POST_INC (addr_mode
, reg
);
4617 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
4618 data
->costs
[AINC_POST_INC
]
4619 = address_cost (addr
, mem_mode
, as
, speed
);
4621 ainc_cost_data_list
[idx
] = data
;
4624 poly_int64 msize
= GET_MODE_SIZE (mem_mode
);
4625 if (known_eq (ainc_offset
, 0) && known_eq (msize
, ainc_step
))
4626 return comp_cost (data
->costs
[AINC_POST_INC
], 0);
4627 if (known_eq (ainc_offset
, 0) && known_eq (msize
, -ainc_step
))
4628 return comp_cost (data
->costs
[AINC_POST_DEC
], 0);
4629 if (known_eq (ainc_offset
, msize
) && known_eq (msize
, ainc_step
))
4630 return comp_cost (data
->costs
[AINC_PRE_INC
], 0);
4631 if (known_eq (ainc_offset
, -msize
) && known_eq (msize
, -ainc_step
))
4632 return comp_cost (data
->costs
[AINC_PRE_DEC
], 0);
4634 return infinite_cost
;
4637 /* Return cost of computing USE's address expression by using CAND.
4638 AFF_INV and AFF_VAR represent invariant and variant parts of the
4639 address expression, respectively. If AFF_INV is simple, store
4640 the loop invariant variables which are depended by it in INV_VARS;
4641 if AFF_INV is complicated, handle it as a new invariant expression
4642 and record it in INV_EXPR. RATIO indicates multiple times between
4643 steps of USE and CAND. If CAN_AUTOINC is nonNULL, store boolean
4644 value to it indicating if this is an auto-increment address. */
4647 get_address_cost (struct ivopts_data
*data
, struct iv_use
*use
,
4648 struct iv_cand
*cand
, aff_tree
*aff_inv
,
4649 aff_tree
*aff_var
, HOST_WIDE_INT ratio
,
4650 bitmap
*inv_vars
, iv_inv_expr_ent
**inv_expr
,
4651 bool *can_autoinc
, bool speed
)
4654 bool simple_inv
= true;
4655 tree comp_inv
= NULL_TREE
, type
= aff_var
->type
;
4656 comp_cost var_cost
= no_cost
, cost
= no_cost
;
4657 struct mem_address parts
= {NULL_TREE
, integer_one_node
,
4658 NULL_TREE
, NULL_TREE
, NULL_TREE
};
4659 machine_mode addr_mode
= TYPE_MODE (type
);
4660 machine_mode mem_mode
= TYPE_MODE (use
->mem_type
);
4661 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (use
->iv
->base
));
4662 /* Only true if ratio != 1. */
4663 bool ok_with_ratio_p
= false;
4664 bool ok_without_ratio_p
= false;
4666 if (!aff_combination_const_p (aff_inv
))
4668 parts
.index
= integer_one_node
;
4669 /* Addressing mode "base + index". */
4670 ok_without_ratio_p
= valid_mem_ref_p (mem_mode
, as
, &parts
);
4673 parts
.step
= wide_int_to_tree (type
, ratio
);
4674 /* Addressing mode "base + index << scale". */
4675 ok_with_ratio_p
= valid_mem_ref_p (mem_mode
, as
, &parts
);
4676 if (!ok_with_ratio_p
)
4677 parts
.step
= NULL_TREE
;
4679 if (ok_with_ratio_p
|| ok_without_ratio_p
)
4681 if (maybe_ne (aff_inv
->offset
, 0))
4683 parts
.offset
= wide_int_to_tree (sizetype
, aff_inv
->offset
);
4684 /* Addressing mode "base + index [<< scale] + offset". */
4685 if (!valid_mem_ref_p (mem_mode
, as
, &parts
))
4686 parts
.offset
= NULL_TREE
;
4688 aff_inv
->offset
= 0;
4691 move_fixed_address_to_symbol (&parts
, aff_inv
);
4692 /* Base is fixed address and is moved to symbol part. */
4693 if (parts
.symbol
!= NULL_TREE
&& aff_combination_zero_p (aff_inv
))
4694 parts
.base
= NULL_TREE
;
4696 /* Addressing mode "symbol + base + index [<< scale] [+ offset]". */
4697 if (parts
.symbol
!= NULL_TREE
4698 && !valid_mem_ref_p (mem_mode
, as
, &parts
))
4700 aff_combination_add_elt (aff_inv
, parts
.symbol
, 1);
4701 parts
.symbol
= NULL_TREE
;
4702 /* Reset SIMPLE_INV since symbol address needs to be computed
4703 outside of address expression in this case. */
4705 /* Symbol part is moved back to base part, it can't be NULL. */
4706 parts
.base
= integer_one_node
;
4710 parts
.index
= NULL_TREE
;
4714 poly_int64 ainc_step
;
4717 && ptrdiff_tree_p (cand
->iv
->step
, &ainc_step
))
4719 poly_int64 ainc_offset
= (aff_inv
->offset
).force_shwi ();
4721 if (stmt_after_increment (data
->current_loop
, cand
, use
->stmt
))
4722 ainc_offset
+= ainc_step
;
4723 cost
= get_address_cost_ainc (ainc_step
, ainc_offset
,
4724 addr_mode
, mem_mode
, as
, speed
);
4725 if (!cost
.infinite_cost_p ())
4727 *can_autoinc
= true;
4732 if (!aff_combination_zero_p (aff_inv
))
4734 parts
.offset
= wide_int_to_tree (sizetype
, aff_inv
->offset
);
4735 /* Addressing mode "base + offset". */
4736 if (!valid_mem_ref_p (mem_mode
, as
, &parts
))
4737 parts
.offset
= NULL_TREE
;
4739 aff_inv
->offset
= 0;
4744 simple_inv
= (aff_inv
== NULL
4745 || aff_combination_const_p (aff_inv
)
4746 || aff_combination_singleton_var_p (aff_inv
));
4747 if (!aff_combination_zero_p (aff_inv
))
4748 comp_inv
= aff_combination_to_tree (aff_inv
);
4749 if (comp_inv
!= NULL_TREE
)
4750 cost
= force_var_cost (data
, comp_inv
, inv_vars
);
4751 if (ratio
!= 1 && parts
.step
== NULL_TREE
)
4752 var_cost
+= mult_by_coeff_cost (ratio
, addr_mode
, speed
);
4753 if (comp_inv
!= NULL_TREE
&& parts
.index
== NULL_TREE
)
4754 var_cost
+= add_cost (speed
, addr_mode
);
4756 if (comp_inv
&& inv_expr
&& !simple_inv
)
4758 *inv_expr
= get_loop_invariant_expr (data
, comp_inv
);
4759 /* Clear depends on. */
4760 if (*inv_expr
!= NULL
&& inv_vars
&& *inv_vars
)
4761 bitmap_clear (*inv_vars
);
4763 /* Cost of small invariant expression adjusted against loop niters
4764 is usually zero, which makes it difficult to be differentiated
4765 from candidate based on loop invariant variables. Secondly, the
4766 generated invariant expression may not be hoisted out of loop by
4767 following pass. We penalize the cost by rounding up in order to
4768 neutralize such effects. */
4769 cost
.cost
= adjust_setup_cost (data
, cost
.cost
, true);
4770 cost
.scratch
= cost
.cost
;
4774 addr
= addr_for_mem_ref (&parts
, as
, false);
4775 gcc_assert (memory_address_addr_space_p (mem_mode
, addr
, as
));
4776 cost
+= address_cost (addr
, mem_mode
, as
, speed
);
4778 if (parts
.symbol
!= NULL_TREE
)
4779 cost
.complexity
+= 1;
4780 /* Don't increase the complexity of adding a scaled index if it's
4781 the only kind of index that the target allows. */
4782 if (parts
.step
!= NULL_TREE
&& ok_without_ratio_p
)
4783 cost
.complexity
+= 1;
4784 if (parts
.base
!= NULL_TREE
&& parts
.index
!= NULL_TREE
)
4785 cost
.complexity
+= 1;
4786 if (parts
.offset
!= NULL_TREE
&& !integer_zerop (parts
.offset
))
4787 cost
.complexity
+= 1;
4792 /* Scale (multiply) the computed COST (except scratch part that should be
4793 hoisted out a loop) by header->frequency / AT->frequency, which makes
4794 expected cost more accurate. */
4797 get_scaled_computation_cost_at (ivopts_data
*data
, gimple
*at
, comp_cost cost
)
4800 && data
->current_loop
->header
->count
.to_frequency (cfun
) > 0)
4802 basic_block bb
= gimple_bb (at
);
4803 gcc_assert (cost
.scratch
<= cost
.cost
);
4804 int scale_factor
= (int)(intptr_t) bb
->aux
;
4805 if (scale_factor
== 1)
4809 = cost
.scratch
+ (cost
.cost
- cost
.scratch
) * scale_factor
;
4811 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4812 fprintf (dump_file
, "Scaling cost based on bb prob by %2.2f: "
4813 "%" PRId64
" (scratch: %" PRId64
") -> %" PRId64
"\n",
4814 1.0f
* scale_factor
, cost
.cost
, cost
.scratch
, scaled_cost
);
4816 cost
.cost
= scaled_cost
;
4822 /* Determines the cost of the computation by that USE is expressed
4823 from induction variable CAND. If ADDRESS_P is true, we just need
4824 to create an address from it, otherwise we want to get it into
4825 register. A set of invariants we depend on is stored in INV_VARS.
4826 If CAN_AUTOINC is nonnull, use it to record whether autoinc
4827 addressing is likely. If INV_EXPR is nonnull, record invariant
4828 expr entry in it. */
4831 get_computation_cost (struct ivopts_data
*data
, struct iv_use
*use
,
4832 struct iv_cand
*cand
, bool address_p
, bitmap
*inv_vars
,
4833 bool *can_autoinc
, iv_inv_expr_ent
**inv_expr
)
4835 gimple
*at
= use
->stmt
;
4836 tree ubase
= use
->iv
->base
, cbase
= cand
->iv
->base
;
4837 tree utype
= TREE_TYPE (ubase
), ctype
= TREE_TYPE (cbase
);
4838 tree comp_inv
= NULL_TREE
;
4839 HOST_WIDE_INT ratio
, aratio
;
4842 aff_tree aff_inv
, aff_var
;
4843 bool speed
= optimize_bb_for_speed_p (gimple_bb (at
));
4848 *can_autoinc
= false;
4852 /* Check if we have enough precision to express the values of use. */
4853 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
4854 return infinite_cost
;
4857 || (use
->iv
->base_object
4858 && cand
->iv
->base_object
4859 && POINTER_TYPE_P (TREE_TYPE (use
->iv
->base_object
))
4860 && POINTER_TYPE_P (TREE_TYPE (cand
->iv
->base_object
))))
4862 /* Do not try to express address of an object with computation based
4863 on address of a different object. This may cause problems in rtl
4864 level alias analysis (that does not expect this to be happening,
4865 as this is illegal in C), and would be unlikely to be useful
4867 if (use
->iv
->base_object
4868 && cand
->iv
->base_object
4869 && !operand_equal_p (use
->iv
->base_object
, cand
->iv
->base_object
, 0))
4870 return infinite_cost
;
4873 if (!get_computation_aff_1 (data
->current_loop
, at
, use
,
4874 cand
, &aff_inv
, &aff_var
, &rat
)
4875 || !wi::fits_shwi_p (rat
))
4876 return infinite_cost
;
4878 ratio
= rat
.to_shwi ();
4881 cost
= get_address_cost (data
, use
, cand
, &aff_inv
, &aff_var
, ratio
,
4882 inv_vars
, inv_expr
, can_autoinc
, speed
);
4883 cost
= get_scaled_computation_cost_at (data
, at
, cost
);
4884 /* For doloop IV cand, add on the extra cost. */
4885 cost
+= cand
->doloop_p
? targetm
.doloop_cost_for_address
: 0;
4889 bool simple_inv
= (aff_combination_const_p (&aff_inv
)
4890 || aff_combination_singleton_var_p (&aff_inv
));
4891 tree signed_type
= signed_type_for (aff_combination_type (&aff_inv
));
4892 aff_combination_convert (&aff_inv
, signed_type
);
4893 if (!aff_combination_zero_p (&aff_inv
))
4894 comp_inv
= aff_combination_to_tree (&aff_inv
);
4896 cost
= force_var_cost (data
, comp_inv
, inv_vars
);
4897 if (comp_inv
&& inv_expr
&& !simple_inv
)
4899 *inv_expr
= get_loop_invariant_expr (data
, comp_inv
);
4900 /* Clear depends on. */
4901 if (*inv_expr
!= NULL
&& inv_vars
&& *inv_vars
)
4902 bitmap_clear (*inv_vars
);
4904 cost
.cost
= adjust_setup_cost (data
, cost
.cost
);
4905 /* Record setup cost in scratch field. */
4906 cost
.scratch
= cost
.cost
;
4908 /* Cost of constant integer can be covered when adding invariant part to
4910 else if (comp_inv
&& CONSTANT_CLASS_P (comp_inv
))
4913 /* Need type narrowing to represent use with cand. */
4914 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
4916 machine_mode outer_mode
= TYPE_MODE (utype
);
4917 machine_mode inner_mode
= TYPE_MODE (ctype
);
4918 cost
+= comp_cost (convert_cost (outer_mode
, inner_mode
, speed
), 0);
4921 /* Turn a + i * (-c) into a - i * c. */
4922 if (ratio
< 0 && comp_inv
&& !integer_zerop (comp_inv
))
4928 cost
+= mult_by_coeff_cost (aratio
, TYPE_MODE (utype
), speed
);
4930 /* TODO: We may also need to check if we can compute a + i * 4 in one
4932 /* Need to add up the invariant and variant parts. */
4933 if (comp_inv
&& !integer_zerop (comp_inv
))
4934 cost
+= add_cost (speed
, TYPE_MODE (utype
));
4936 cost
= get_scaled_computation_cost_at (data
, at
, cost
);
4938 /* For doloop IV cand, add on the extra cost. */
4939 if (cand
->doloop_p
&& use
->type
== USE_NONLINEAR_EXPR
)
4940 cost
+= targetm
.doloop_cost_for_generic
;
4945 /* Determines cost of computing the use in GROUP with CAND in a generic
4949 determine_group_iv_cost_generic (struct ivopts_data
*data
,
4950 struct iv_group
*group
, struct iv_cand
*cand
)
4953 iv_inv_expr_ent
*inv_expr
= NULL
;
4954 bitmap inv_vars
= NULL
, inv_exprs
= NULL
;
4955 struct iv_use
*use
= group
->vuses
[0];
4957 /* The simple case first -- if we need to express value of the preserved
4958 original biv, the cost is 0. This also prevents us from counting the
4959 cost of increment twice -- once at this use and once in the cost of
4961 if (cand
->pos
== IP_ORIGINAL
&& cand
->incremented_at
== use
->stmt
)
4964 cost
= get_computation_cost (data
, use
, cand
, false,
4965 &inv_vars
, NULL
, &inv_expr
);
4969 inv_exprs
= BITMAP_ALLOC (NULL
);
4970 bitmap_set_bit (inv_exprs
, inv_expr
->id
);
4972 set_group_iv_cost (data
, group
, cand
, cost
, inv_vars
,
4973 NULL_TREE
, ERROR_MARK
, inv_exprs
);
4974 return !cost
.infinite_cost_p ();
4977 /* Determines cost of computing uses in GROUP with CAND in addresses. */
4980 determine_group_iv_cost_address (struct ivopts_data
*data
,
4981 struct iv_group
*group
, struct iv_cand
*cand
)
4984 bitmap inv_vars
= NULL
, inv_exprs
= NULL
;
4986 iv_inv_expr_ent
*inv_expr
= NULL
;
4987 struct iv_use
*use
= group
->vuses
[0];
4988 comp_cost sum_cost
= no_cost
, cost
;
4990 cost
= get_computation_cost (data
, use
, cand
, true,
4991 &inv_vars
, &can_autoinc
, &inv_expr
);
4995 inv_exprs
= BITMAP_ALLOC (NULL
);
4996 bitmap_set_bit (inv_exprs
, inv_expr
->id
);
4999 if (!sum_cost
.infinite_cost_p () && cand
->ainc_use
== use
)
5002 sum_cost
-= cand
->cost_step
;
5003 /* If we generated the candidate solely for exploiting autoincrement
5004 opportunities, and it turns out it can't be used, set the cost to
5005 infinity to make sure we ignore it. */
5006 else if (cand
->pos
== IP_AFTER_USE
|| cand
->pos
== IP_BEFORE_USE
)
5007 sum_cost
= infinite_cost
;
5010 /* Uses in a group can share setup code, so only add setup cost once. */
5011 cost
-= cost
.scratch
;
5012 /* Compute and add costs for rest uses of this group. */
5013 for (i
= 1; i
< group
->vuses
.length () && !sum_cost
.infinite_cost_p (); i
++)
5015 struct iv_use
*next
= group
->vuses
[i
];
5017 /* TODO: We could skip computing cost for sub iv_use when it has the
5018 same cost as the first iv_use, but the cost really depends on the
5019 offset and where the iv_use is. */
5020 cost
= get_computation_cost (data
, next
, cand
, true,
5021 NULL
, &can_autoinc
, &inv_expr
);
5025 inv_exprs
= BITMAP_ALLOC (NULL
);
5027 bitmap_set_bit (inv_exprs
, inv_expr
->id
);
5031 set_group_iv_cost (data
, group
, cand
, sum_cost
, inv_vars
,
5032 NULL_TREE
, ERROR_MARK
, inv_exprs
);
5034 return !sum_cost
.infinite_cost_p ();
5037 /* Computes value of candidate CAND at position AT in iteration NITER, and
5038 stores it to VAL. */
5041 cand_value_at (class loop
*loop
, struct iv_cand
*cand
, gimple
*at
, tree niter
,
5044 aff_tree step
, delta
, nit
;
5045 struct iv
*iv
= cand
->iv
;
5046 tree type
= TREE_TYPE (iv
->base
);
5048 if (POINTER_TYPE_P (type
))
5049 steptype
= sizetype
;
5051 steptype
= unsigned_type_for (type
);
5053 tree_to_aff_combination (iv
->step
, TREE_TYPE (iv
->step
), &step
);
5054 aff_combination_convert (&step
, steptype
);
5055 tree_to_aff_combination (niter
, TREE_TYPE (niter
), &nit
);
5056 aff_combination_convert (&nit
, steptype
);
5057 aff_combination_mult (&nit
, &step
, &delta
);
5058 if (stmt_after_increment (loop
, cand
, at
))
5059 aff_combination_add (&delta
, &step
);
5061 tree_to_aff_combination (iv
->base
, type
, val
);
5062 if (!POINTER_TYPE_P (type
))
5063 aff_combination_convert (val
, steptype
);
5064 aff_combination_add (val
, &delta
);
5067 /* Returns period of induction variable iv. */
5070 iv_period (struct iv
*iv
)
5072 tree step
= iv
->step
, period
, type
;
5075 gcc_assert (step
&& TREE_CODE (step
) == INTEGER_CST
);
5077 type
= unsigned_type_for (TREE_TYPE (step
));
5078 /* Period of the iv is lcm (step, type_range)/step -1,
5079 i.e., N*type_range/step - 1. Since type range is power
5080 of two, N == (step >> num_of_ending_zeros_binary (step),
5081 so the final result is
5083 (type_range >> num_of_ending_zeros_binary (step)) - 1
5086 pow2div
= num_ending_zeros (step
);
5088 period
= build_low_bits_mask (type
,
5089 (TYPE_PRECISION (type
)
5090 - tree_to_uhwi (pow2div
)));
5095 /* Returns the comparison operator used when eliminating the iv USE. */
5097 static enum tree_code
5098 iv_elimination_compare (struct ivopts_data
*data
, struct iv_use
*use
)
5100 class loop
*loop
= data
->current_loop
;
5104 ex_bb
= gimple_bb (use
->stmt
);
5105 exit
= EDGE_SUCC (ex_bb
, 0);
5106 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
5107 exit
= EDGE_SUCC (ex_bb
, 1);
5109 return (exit
->flags
& EDGE_TRUE_VALUE
? EQ_EXPR
: NE_EXPR
);
5112 /* Returns true if we can prove that BASE - OFFSET does not overflow. For now,
5113 we only detect the situation that BASE = SOMETHING + OFFSET, where the
5114 calculation is performed in non-wrapping type.
5116 TODO: More generally, we could test for the situation that
5117 BASE = SOMETHING + OFFSET' and OFFSET is between OFFSET' and zero.
5118 This would require knowing the sign of OFFSET. */
5121 difference_cannot_overflow_p (struct ivopts_data
*data
, tree base
, tree offset
)
5123 enum tree_code code
;
5125 aff_tree aff_e1
, aff_e2
, aff_offset
;
5127 if (!nowrap_type_p (TREE_TYPE (base
)))
5130 base
= expand_simple_operations (base
);
5132 if (TREE_CODE (base
) == SSA_NAME
)
5134 gimple
*stmt
= SSA_NAME_DEF_STMT (base
);
5136 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
5139 code
= gimple_assign_rhs_code (stmt
);
5140 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
5143 e1
= gimple_assign_rhs1 (stmt
);
5144 e2
= gimple_assign_rhs2 (stmt
);
5148 code
= TREE_CODE (base
);
5149 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
5151 e1
= TREE_OPERAND (base
, 0);
5152 e2
= TREE_OPERAND (base
, 1);
5155 /* Use affine expansion as deeper inspection to prove the equality. */
5156 tree_to_aff_combination_expand (e2
, TREE_TYPE (e2
),
5157 &aff_e2
, &data
->name_expansion_cache
);
5158 tree_to_aff_combination_expand (offset
, TREE_TYPE (offset
),
5159 &aff_offset
, &data
->name_expansion_cache
);
5160 aff_combination_scale (&aff_offset
, -1);
5164 aff_combination_add (&aff_e2
, &aff_offset
);
5165 if (aff_combination_zero_p (&aff_e2
))
5168 tree_to_aff_combination_expand (e1
, TREE_TYPE (e1
),
5169 &aff_e1
, &data
->name_expansion_cache
);
5170 aff_combination_add (&aff_e1
, &aff_offset
);
5171 return aff_combination_zero_p (&aff_e1
);
5173 case POINTER_PLUS_EXPR
:
5174 aff_combination_add (&aff_e2
, &aff_offset
);
5175 return aff_combination_zero_p (&aff_e2
);
5182 /* Tries to replace loop exit by one formulated in terms of a LT_EXPR
5183 comparison with CAND. NITER describes the number of iterations of
5184 the loops. If successful, the comparison in COMP_P is altered accordingly.
5186 We aim to handle the following situation:
5202 Here, the number of iterations of the loop is (a + 1 > b) ? 0 : b - a - 1.
5203 We aim to optimize this to
5211 while (p < p_0 - a + b);
5213 This preserves the correctness, since the pointer arithmetics does not
5214 overflow. More precisely:
5216 1) if a + 1 <= b, then p_0 - a + b is the final value of p, hence there is no
5217 overflow in computing it or the values of p.
5218 2) if a + 1 > b, then we need to verify that the expression p_0 - a does not
5219 overflow. To prove this, we use the fact that p_0 = base + a. */
5222 iv_elimination_compare_lt (struct ivopts_data
*data
,
5223 struct iv_cand
*cand
, enum tree_code
*comp_p
,
5224 class tree_niter_desc
*niter
)
5226 tree cand_type
, a
, b
, mbz
, nit_type
= TREE_TYPE (niter
->niter
), offset
;
5227 class aff_tree nit
, tmpa
, tmpb
;
5228 enum tree_code comp
;
5231 /* We need to know that the candidate induction variable does not overflow.
5232 While more complex analysis may be used to prove this, for now just
5233 check that the variable appears in the original program and that it
5234 is computed in a type that guarantees no overflows. */
5235 cand_type
= TREE_TYPE (cand
->iv
->base
);
5236 if (cand
->pos
!= IP_ORIGINAL
|| !nowrap_type_p (cand_type
))
5239 /* Make sure that the loop iterates till the loop bound is hit, as otherwise
5240 the calculation of the BOUND could overflow, making the comparison
5242 if (!data
->loop_single_exit_p
)
5245 /* We need to be able to decide whether candidate is increasing or decreasing
5246 in order to choose the right comparison operator. */
5247 if (!cst_and_fits_in_hwi (cand
->iv
->step
))
5249 step
= int_cst_value (cand
->iv
->step
);
5251 /* Check that the number of iterations matches the expected pattern:
5252 a + 1 > b ? 0 : b - a - 1. */
5253 mbz
= niter
->may_be_zero
;
5254 if (TREE_CODE (mbz
) == GT_EXPR
)
5256 /* Handle a + 1 > b. */
5257 tree op0
= TREE_OPERAND (mbz
, 0);
5258 if (TREE_CODE (op0
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op0
, 1)))
5260 a
= TREE_OPERAND (op0
, 0);
5261 b
= TREE_OPERAND (mbz
, 1);
5266 else if (TREE_CODE (mbz
) == LT_EXPR
)
5268 tree op1
= TREE_OPERAND (mbz
, 1);
5270 /* Handle b < a + 1. */
5271 if (TREE_CODE (op1
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op1
, 1)))
5273 a
= TREE_OPERAND (op1
, 0);
5274 b
= TREE_OPERAND (mbz
, 0);
5282 /* Expected number of iterations is B - A - 1. Check that it matches
5283 the actual number, i.e., that B - A - NITER = 1. */
5284 tree_to_aff_combination (niter
->niter
, nit_type
, &nit
);
5285 tree_to_aff_combination (fold_convert (nit_type
, a
), nit_type
, &tmpa
);
5286 tree_to_aff_combination (fold_convert (nit_type
, b
), nit_type
, &tmpb
);
5287 aff_combination_scale (&nit
, -1);
5288 aff_combination_scale (&tmpa
, -1);
5289 aff_combination_add (&tmpb
, &tmpa
);
5290 aff_combination_add (&tmpb
, &nit
);
5291 if (tmpb
.n
!= 0 || maybe_ne (tmpb
.offset
, 1))
5294 /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
5296 offset
= fold_build2 (MULT_EXPR
, TREE_TYPE (cand
->iv
->step
),
5298 fold_convert (TREE_TYPE (cand
->iv
->step
), a
));
5299 if (!difference_cannot_overflow_p (data
, cand
->iv
->base
, offset
))
5302 /* Determine the new comparison operator. */
5303 comp
= step
< 0 ? GT_EXPR
: LT_EXPR
;
5304 if (*comp_p
== NE_EXPR
)
5306 else if (*comp_p
== EQ_EXPR
)
5307 *comp_p
= invert_tree_comparison (comp
, false);
5314 /* Check whether it is possible to express the condition in USE by comparison
5315 of candidate CAND. If so, store the value compared with to BOUND, and the
5316 comparison operator to COMP. */
5319 may_eliminate_iv (struct ivopts_data
*data
,
5320 struct iv_use
*use
, struct iv_cand
*cand
, tree
*bound
,
5321 enum tree_code
*comp
)
5326 class loop
*loop
= data
->current_loop
;
5328 class tree_niter_desc
*desc
= NULL
;
5330 if (TREE_CODE (cand
->iv
->step
) != INTEGER_CST
)
5333 /* For now works only for exits that dominate the loop latch.
5334 TODO: extend to other conditions inside loop body. */
5335 ex_bb
= gimple_bb (use
->stmt
);
5336 if (use
->stmt
!= last_stmt (ex_bb
)
5337 || gimple_code (use
->stmt
) != GIMPLE_COND
5338 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, ex_bb
))
5341 exit
= EDGE_SUCC (ex_bb
, 0);
5342 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
5343 exit
= EDGE_SUCC (ex_bb
, 1);
5344 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
5347 desc
= niter_for_exit (data
, exit
);
5351 /* Determine whether we can use the variable to test the exit condition.
5352 This is the case iff the period of the induction variable is greater
5353 than the number of iterations for which the exit condition is true. */
5354 period
= iv_period (cand
->iv
);
5356 /* If the number of iterations is constant, compare against it directly. */
5357 if (TREE_CODE (desc
->niter
) == INTEGER_CST
)
5359 /* See cand_value_at. */
5360 if (stmt_after_increment (loop
, cand
, use
->stmt
))
5362 if (!tree_int_cst_lt (desc
->niter
, period
))
5367 if (tree_int_cst_lt (period
, desc
->niter
))
5372 /* If not, and if this is the only possible exit of the loop, see whether
5373 we can get a conservative estimate on the number of iterations of the
5374 entire loop and compare against that instead. */
5377 widest_int period_value
, max_niter
;
5379 max_niter
= desc
->max
;
5380 if (stmt_after_increment (loop
, cand
, use
->stmt
))
5382 period_value
= wi::to_widest (period
);
5383 if (wi::gtu_p (max_niter
, period_value
))
5385 /* See if we can take advantage of inferred loop bound
5387 if (data
->loop_single_exit_p
)
5389 if (!max_loop_iterations (loop
, &max_niter
))
5391 /* The loop bound is already adjusted by adding 1. */
5392 if (wi::gtu_p (max_niter
, period_value
))
5400 /* For doloop IV cand, the bound would be zero. It's safe whether
5401 may_be_zero set or not. */
5404 *bound
= build_int_cst (TREE_TYPE (cand
->iv
->base
), 0);
5405 *comp
= iv_elimination_compare (data
, use
);
5409 cand_value_at (loop
, cand
, use
->stmt
, desc
->niter
, &bnd
);
5411 *bound
= fold_convert (TREE_TYPE (cand
->iv
->base
),
5412 aff_combination_to_tree (&bnd
));
5413 *comp
= iv_elimination_compare (data
, use
);
5415 /* It is unlikely that computing the number of iterations using division
5416 would be more profitable than keeping the original induction variable. */
5417 if (expression_expensive_p (*bound
))
5420 /* Sometimes, it is possible to handle the situation that the number of
5421 iterations may be zero unless additional assumptions by using <
5422 instead of != in the exit condition.
5424 TODO: we could also calculate the value MAY_BE_ZERO ? 0 : NITER and
5425 base the exit condition on it. However, that is often too
5427 if (!integer_zerop (desc
->may_be_zero
))
5428 return iv_elimination_compare_lt (data
, cand
, comp
, desc
);
5433 /* Calculates the cost of BOUND, if it is a PARM_DECL. A PARM_DECL must
5434 be copied, if it is used in the loop body and DATA->body_includes_call. */
5437 parm_decl_cost (struct ivopts_data
*data
, tree bound
)
5439 tree sbound
= bound
;
5440 STRIP_NOPS (sbound
);
5442 if (TREE_CODE (sbound
) == SSA_NAME
5443 && SSA_NAME_IS_DEFAULT_DEF (sbound
)
5444 && TREE_CODE (SSA_NAME_VAR (sbound
)) == PARM_DECL
5445 && data
->body_includes_call
)
5446 return COSTS_N_INSNS (1);
5451 /* Determines cost of computing the use in GROUP with CAND in a condition. */
5454 determine_group_iv_cost_cond (struct ivopts_data
*data
,
5455 struct iv_group
*group
, struct iv_cand
*cand
)
5457 tree bound
= NULL_TREE
;
5459 bitmap inv_exprs
= NULL
;
5460 bitmap inv_vars_elim
= NULL
, inv_vars_express
= NULL
, inv_vars
;
5461 comp_cost elim_cost
= infinite_cost
, express_cost
, cost
, bound_cost
;
5462 enum comp_iv_rewrite rewrite_type
;
5463 iv_inv_expr_ent
*inv_expr_elim
= NULL
, *inv_expr_express
= NULL
, *inv_expr
;
5464 tree
*control_var
, *bound_cst
;
5465 enum tree_code comp
= ERROR_MARK
;
5466 struct iv_use
*use
= group
->vuses
[0];
5468 /* Extract condition operands. */
5469 rewrite_type
= extract_cond_operands (data
, use
->stmt
, &control_var
,
5470 &bound_cst
, NULL
, &cmp_iv
);
5471 gcc_assert (rewrite_type
!= COMP_IV_NA
);
5473 /* Try iv elimination. */
5474 if (rewrite_type
== COMP_IV_ELIM
5475 && may_eliminate_iv (data
, use
, cand
, &bound
, &comp
))
5477 elim_cost
= force_var_cost (data
, bound
, &inv_vars_elim
);
5478 if (elim_cost
.cost
== 0)
5479 elim_cost
.cost
= parm_decl_cost (data
, bound
);
5480 else if (TREE_CODE (bound
) == INTEGER_CST
)
5482 /* If we replace a loop condition 'i < n' with 'p < base + n',
5483 inv_vars_elim will have 'base' and 'n' set, which implies that both
5484 'base' and 'n' will be live during the loop. More likely,
5485 'base + n' will be loop invariant, resulting in only one live value
5486 during the loop. So in that case we clear inv_vars_elim and set
5487 inv_expr_elim instead. */
5488 if (inv_vars_elim
&& bitmap_count_bits (inv_vars_elim
) > 1)
5490 inv_expr_elim
= get_loop_invariant_expr (data
, bound
);
5491 bitmap_clear (inv_vars_elim
);
5493 /* The bound is a loop invariant, so it will be only computed
5495 elim_cost
.cost
= adjust_setup_cost (data
, elim_cost
.cost
);
5498 /* When the condition is a comparison of the candidate IV against
5499 zero, prefer this IV.
5501 TODO: The constant that we're subtracting from the cost should
5502 be target-dependent. This information should be added to the
5503 target costs for each backend. */
5504 if (!elim_cost
.infinite_cost_p () /* Do not try to decrease infinite! */
5505 && integer_zerop (*bound_cst
)
5506 && (operand_equal_p (*control_var
, cand
->var_after
, 0)
5507 || operand_equal_p (*control_var
, cand
->var_before
, 0)))
5510 express_cost
= get_computation_cost (data
, use
, cand
, false,
5511 &inv_vars_express
, NULL
,
5514 find_inv_vars (data
, &cmp_iv
->base
, &inv_vars_express
);
5516 /* Count the cost of the original bound as well. */
5517 bound_cost
= force_var_cost (data
, *bound_cst
, NULL
);
5518 if (bound_cost
.cost
== 0)
5519 bound_cost
.cost
= parm_decl_cost (data
, *bound_cst
);
5520 else if (TREE_CODE (*bound_cst
) == INTEGER_CST
)
5521 bound_cost
.cost
= 0;
5522 express_cost
+= bound_cost
;
5524 /* Choose the better approach, preferring the eliminated IV. */
5525 if (elim_cost
<= express_cost
)
5528 inv_vars
= inv_vars_elim
;
5529 inv_vars_elim
= NULL
;
5530 inv_expr
= inv_expr_elim
;
5531 /* For doloop candidate/use pair, adjust to zero cost. */
5532 if (group
->doloop_p
&& cand
->doloop_p
&& elim_cost
.cost
> no_cost
.cost
)
5537 cost
= express_cost
;
5538 inv_vars
= inv_vars_express
;
5539 inv_vars_express
= NULL
;
5542 inv_expr
= inv_expr_express
;
5547 inv_exprs
= BITMAP_ALLOC (NULL
);
5548 bitmap_set_bit (inv_exprs
, inv_expr
->id
);
5550 set_group_iv_cost (data
, group
, cand
, cost
,
5551 inv_vars
, bound
, comp
, inv_exprs
);
5554 BITMAP_FREE (inv_vars_elim
);
5555 if (inv_vars_express
)
5556 BITMAP_FREE (inv_vars_express
);
5558 return !cost
.infinite_cost_p ();
5561 /* Determines cost of computing uses in GROUP with CAND. Returns false
5562 if USE cannot be represented with CAND. */
5565 determine_group_iv_cost (struct ivopts_data
*data
,
5566 struct iv_group
*group
, struct iv_cand
*cand
)
5568 switch (group
->type
)
5570 case USE_NONLINEAR_EXPR
:
5571 return determine_group_iv_cost_generic (data
, group
, cand
);
5573 case USE_REF_ADDRESS
:
5574 case USE_PTR_ADDRESS
:
5575 return determine_group_iv_cost_address (data
, group
, cand
);
5578 return determine_group_iv_cost_cond (data
, group
, cand
);
5585 /* Return true if get_computation_cost indicates that autoincrement is
5586 a possibility for the pair of USE and CAND, false otherwise. */
5589 autoinc_possible_for_pair (struct ivopts_data
*data
, struct iv_use
*use
,
5590 struct iv_cand
*cand
)
5592 if (!address_p (use
->type
))
5595 bool can_autoinc
= false;
5596 get_computation_cost (data
, use
, cand
, true, NULL
, &can_autoinc
, NULL
);
5600 /* Examine IP_ORIGINAL candidates to see if they are incremented next to a
5601 use that allows autoincrement, and set their AINC_USE if possible. */
5604 set_autoinc_for_original_candidates (struct ivopts_data
*data
)
5608 for (i
= 0; i
< data
->vcands
.length (); i
++)
5610 struct iv_cand
*cand
= data
->vcands
[i
];
5611 struct iv_use
*closest_before
= NULL
;
5612 struct iv_use
*closest_after
= NULL
;
5613 if (cand
->pos
!= IP_ORIGINAL
)
5616 for (j
= 0; j
< data
->vgroups
.length (); j
++)
5618 struct iv_group
*group
= data
->vgroups
[j
];
5619 struct iv_use
*use
= group
->vuses
[0];
5620 unsigned uid
= gimple_uid (use
->stmt
);
5622 if (gimple_bb (use
->stmt
) != gimple_bb (cand
->incremented_at
))
5625 if (uid
< gimple_uid (cand
->incremented_at
)
5626 && (closest_before
== NULL
5627 || uid
> gimple_uid (closest_before
->stmt
)))
5628 closest_before
= use
;
5630 if (uid
> gimple_uid (cand
->incremented_at
)
5631 && (closest_after
== NULL
5632 || uid
< gimple_uid (closest_after
->stmt
)))
5633 closest_after
= use
;
5636 if (closest_before
!= NULL
5637 && autoinc_possible_for_pair (data
, closest_before
, cand
))
5638 cand
->ainc_use
= closest_before
;
5639 else if (closest_after
!= NULL
5640 && autoinc_possible_for_pair (data
, closest_after
, cand
))
5641 cand
->ainc_use
= closest_after
;
5645 /* Relate compare use with all candidates. */
5648 relate_compare_use_with_all_cands (struct ivopts_data
*data
)
5650 unsigned i
, count
= data
->vcands
.length ();
5651 for (i
= 0; i
< data
->vgroups
.length (); i
++)
5653 struct iv_group
*group
= data
->vgroups
[i
];
5655 if (group
->type
== USE_COMPARE
)
5656 bitmap_set_range (group
->related_cands
, 0, count
);
5660 /* If PREFERRED_MODE is suitable and profitable, use the preferred
5661 PREFERRED_MODE to compute doloop iv base from niter: base = niter + 1. */
5664 compute_doloop_base_on_mode (machine_mode preferred_mode
, tree niter
,
5665 const widest_int
&iterations_max
)
5667 tree ntype
= TREE_TYPE (niter
);
5668 tree pref_type
= lang_hooks
.types
.type_for_mode (preferred_mode
, 1);
5670 return fold_build2 (PLUS_EXPR
, ntype
, unshare_expr (niter
),
5671 build_int_cst (ntype
, 1));
5673 gcc_assert (TREE_CODE (pref_type
) == INTEGER_TYPE
);
5675 int prec
= TYPE_PRECISION (ntype
);
5676 int pref_prec
= TYPE_PRECISION (pref_type
);
5680 /* Check if the PREFERRED_MODED is able to present niter. */
5681 if (pref_prec
> prec
5682 || wi::ltu_p (iterations_max
,
5683 widest_int::from (wi::max_value (pref_prec
, UNSIGNED
),
5686 /* No wrap, it is safe to use preferred type after niter + 1. */
5687 if (wi::ltu_p (iterations_max
,
5688 widest_int::from (wi::max_value (prec
, UNSIGNED
),
5691 /* This could help to optimize "-1 +1" pair when niter looks
5692 like "n-1": n is in original mode. "base = (n - 1) + 1"
5693 in PREFERRED_MODED: it could be base = (PREFERRED_TYPE)n. */
5694 base
= fold_build2 (PLUS_EXPR
, ntype
, unshare_expr (niter
),
5695 build_int_cst (ntype
, 1));
5696 base
= fold_convert (pref_type
, base
);
5699 /* To avoid wrap, convert niter to preferred type before plus 1. */
5702 niter
= fold_convert (pref_type
, niter
);
5703 base
= fold_build2 (PLUS_EXPR
, pref_type
, unshare_expr (niter
),
5704 build_int_cst (pref_type
, 1));
5708 base
= fold_build2 (PLUS_EXPR
, ntype
, unshare_expr (niter
),
5709 build_int_cst (ntype
, 1));
5713 /* Add one doloop dedicated IV candidate:
5714 - Base is (may_be_zero ? 1 : (niter + 1)).
5718 add_iv_candidate_for_doloop (struct ivopts_data
*data
)
5720 tree_niter_desc
*niter_desc
= niter_for_single_dom_exit (data
);
5721 gcc_assert (niter_desc
&& niter_desc
->assumptions
);
5723 tree niter
= niter_desc
->niter
;
5724 tree ntype
= TREE_TYPE (niter
);
5725 gcc_assert (TREE_CODE (ntype
) == INTEGER_TYPE
);
5727 tree may_be_zero
= niter_desc
->may_be_zero
;
5728 if (may_be_zero
&& integer_zerop (may_be_zero
))
5729 may_be_zero
= NULL_TREE
;
5732 if (COMPARISON_CLASS_P (may_be_zero
))
5734 niter
= fold_build3 (COND_EXPR
, ntype
, may_be_zero
,
5735 build_int_cst (ntype
, 0),
5736 rewrite_to_non_trapping_overflow (niter
));
5738 /* Don't try to obtain the iteration count expression when may_be_zero is
5739 integer_nonzerop (actually iteration count is one) or else. */
5744 machine_mode mode
= TYPE_MODE (ntype
);
5745 machine_mode pref_mode
= targetm
.preferred_doloop_mode (mode
);
5748 if (mode
!= pref_mode
)
5750 base
= compute_doloop_base_on_mode (pref_mode
, niter
, niter_desc
->max
);
5751 ntype
= TREE_TYPE (base
);
5754 base
= fold_build2 (PLUS_EXPR
, ntype
, unshare_expr (niter
),
5755 build_int_cst (ntype
, 1));
5758 add_candidate (data
, base
, build_int_cst (ntype
, -1), true, NULL
, NULL
, true);
5761 /* Finds the candidates for the induction variables. */
5764 find_iv_candidates (struct ivopts_data
*data
)
5766 /* Add commonly used ivs. */
5767 add_standard_iv_candidates (data
);
5769 /* Add doloop dedicated ivs. */
5770 if (data
->doloop_use_p
)
5771 add_iv_candidate_for_doloop (data
);
5773 /* Add old induction variables. */
5774 add_iv_candidate_for_bivs (data
);
5776 /* Add induction variables derived from uses. */
5777 add_iv_candidate_for_groups (data
);
5779 set_autoinc_for_original_candidates (data
);
5781 /* Record the important candidates. */
5782 record_important_candidates (data
);
5784 /* Relate compare iv_use with all candidates. */
5785 if (!data
->consider_all_candidates
)
5786 relate_compare_use_with_all_cands (data
);
5788 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5792 fprintf (dump_file
, "\n<Important Candidates>:\t");
5793 for (i
= 0; i
< data
->vcands
.length (); i
++)
5794 if (data
->vcands
[i
]->important
)
5795 fprintf (dump_file
, " %d,", data
->vcands
[i
]->id
);
5796 fprintf (dump_file
, "\n");
5798 fprintf (dump_file
, "\n<Group, Cand> Related:\n");
5799 for (i
= 0; i
< data
->vgroups
.length (); i
++)
5801 struct iv_group
*group
= data
->vgroups
[i
];
5803 if (group
->related_cands
)
5805 fprintf (dump_file
, " Group %d:\t", group
->id
);
5806 dump_bitmap (dump_file
, group
->related_cands
);
5809 fprintf (dump_file
, "\n");
5813 /* Determines costs of computing use of iv with an iv candidate. */
5816 determine_group_iv_costs (struct ivopts_data
*data
)
5819 struct iv_cand
*cand
;
5820 struct iv_group
*group
;
5821 bitmap to_clear
= BITMAP_ALLOC (NULL
);
5823 alloc_use_cost_map (data
);
5825 for (i
= 0; i
< data
->vgroups
.length (); i
++)
5827 group
= data
->vgroups
[i
];
5829 if (data
->consider_all_candidates
)
5831 for (j
= 0; j
< data
->vcands
.length (); j
++)
5833 cand
= data
->vcands
[j
];
5834 determine_group_iv_cost (data
, group
, cand
);
5841 EXECUTE_IF_SET_IN_BITMAP (group
->related_cands
, 0, j
, bi
)
5843 cand
= data
->vcands
[j
];
5844 if (!determine_group_iv_cost (data
, group
, cand
))
5845 bitmap_set_bit (to_clear
, j
);
5848 /* Remove the candidates for that the cost is infinite from
5849 the list of related candidates. */
5850 bitmap_and_compl_into (group
->related_cands
, to_clear
);
5851 bitmap_clear (to_clear
);
5855 BITMAP_FREE (to_clear
);
5857 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5861 /* Dump invariant variables. */
5862 fprintf (dump_file
, "\n<Invariant Vars>:\n");
5863 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
5865 struct version_info
*info
= ver_info (data
, i
);
5868 fprintf (dump_file
, "Inv %d:\t", info
->inv_id
);
5869 print_generic_expr (dump_file
, info
->name
, TDF_SLIM
);
5870 fprintf (dump_file
, "%s\n",
5871 info
->has_nonlin_use
? "" : "\t(eliminable)");
5875 /* Dump invariant expressions. */
5876 fprintf (dump_file
, "\n<Invariant Expressions>:\n");
5877 auto_vec
<iv_inv_expr_ent
*> list (data
->inv_expr_tab
->elements ());
5879 for (hash_table
<iv_inv_expr_hasher
>::iterator it
5880 = data
->inv_expr_tab
->begin (); it
!= data
->inv_expr_tab
->end ();
5882 list
.safe_push (*it
);
5884 list
.qsort (sort_iv_inv_expr_ent
);
5886 for (i
= 0; i
< list
.length (); ++i
)
5888 fprintf (dump_file
, "inv_expr %d: \t", list
[i
]->id
);
5889 print_generic_expr (dump_file
, list
[i
]->expr
, TDF_SLIM
);
5890 fprintf (dump_file
, "\n");
5893 fprintf (dump_file
, "\n<Group-candidate Costs>:\n");
5895 for (i
= 0; i
< data
->vgroups
.length (); i
++)
5897 group
= data
->vgroups
[i
];
5899 fprintf (dump_file
, "Group %d:\n", i
);
5900 fprintf (dump_file
, " cand\tcost\tcompl.\tinv.expr.\tinv.vars\n");
5901 for (j
= 0; j
< group
->n_map_members
; j
++)
5903 if (!group
->cost_map
[j
].cand
5904 || group
->cost_map
[j
].cost
.infinite_cost_p ())
5907 fprintf (dump_file
, " %d\t%" PRId64
"\t%d\t",
5908 group
->cost_map
[j
].cand
->id
,
5909 group
->cost_map
[j
].cost
.cost
,
5910 group
->cost_map
[j
].cost
.complexity
);
5911 if (!group
->cost_map
[j
].inv_exprs
5912 || bitmap_empty_p (group
->cost_map
[j
].inv_exprs
))
5913 fprintf (dump_file
, "NIL;\t");
5915 bitmap_print (dump_file
,
5916 group
->cost_map
[j
].inv_exprs
, "", ";\t");
5917 if (!group
->cost_map
[j
].inv_vars
5918 || bitmap_empty_p (group
->cost_map
[j
].inv_vars
))
5919 fprintf (dump_file
, "NIL;\n");
5921 bitmap_print (dump_file
,
5922 group
->cost_map
[j
].inv_vars
, "", "\n");
5925 fprintf (dump_file
, "\n");
5927 fprintf (dump_file
, "\n");
5931 /* Determines cost of the candidate CAND. */
5934 determine_iv_cost (struct ivopts_data
*data
, struct iv_cand
*cand
)
5936 comp_cost cost_base
;
5937 int64_t cost
, cost_step
;
5940 gcc_assert (cand
->iv
!= NULL
);
5942 /* There are two costs associated with the candidate -- its increment
5943 and its initialization. The second is almost negligible for any loop
5944 that rolls enough, so we take it just very little into account. */
5946 base
= cand
->iv
->base
;
5947 cost_base
= force_var_cost (data
, base
, NULL
);
5948 /* It will be exceptional that the iv register happens to be initialized with
5949 the proper value at no cost. In general, there will at least be a regcopy
5951 if (cost_base
.cost
== 0)
5952 cost_base
.cost
= COSTS_N_INSNS (1);
5953 /* Doloop decrement should be considered as zero cost. */
5957 cost_step
= add_cost (data
->speed
, TYPE_MODE (TREE_TYPE (base
)));
5958 cost
= cost_step
+ adjust_setup_cost (data
, cost_base
.cost
);
5960 /* Prefer the original ivs unless we may gain something by replacing it.
5961 The reason is to make debugging simpler; so this is not relevant for
5962 artificial ivs created by other optimization passes. */
5963 if ((cand
->pos
!= IP_ORIGINAL
5964 || !SSA_NAME_VAR (cand
->var_before
)
5965 || DECL_ARTIFICIAL (SSA_NAME_VAR (cand
->var_before
)))
5966 /* Prefer doloop as well. */
5970 /* Prefer not to insert statements into latch unless there are some
5971 already (so that we do not create unnecessary jumps). */
5972 if (cand
->pos
== IP_END
5973 && empty_block_p (ip_end_pos (data
->current_loop
)))
5977 cand
->cost_step
= cost_step
;
5980 /* Determines costs of computation of the candidates. */
5983 determine_iv_costs (struct ivopts_data
*data
)
5987 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5989 fprintf (dump_file
, "<Candidate Costs>:\n");
5990 fprintf (dump_file
, " cand\tcost\n");
5993 for (i
= 0; i
< data
->vcands
.length (); i
++)
5995 struct iv_cand
*cand
= data
->vcands
[i
];
5997 determine_iv_cost (data
, cand
);
5999 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6000 fprintf (dump_file
, " %d\t%d\n", i
, cand
->cost
);
6003 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6004 fprintf (dump_file
, "\n");
6007 /* Estimate register pressure for loop having N_INVS invariants and N_CANDS
6008 induction variables. Note N_INVS includes both invariant variables and
6009 invariant expressions. */
6012 ivopts_estimate_reg_pressure (struct ivopts_data
*data
, unsigned n_invs
,
6016 unsigned n_old
= data
->regs_used
, n_new
= n_invs
+ n_cands
;
6017 unsigned regs_needed
= n_new
+ n_old
, available_regs
= target_avail_regs
;
6018 bool speed
= data
->speed
;
6020 /* If there is a call in the loop body, the call-clobbered registers
6021 are not available for loop invariants. */
6022 if (data
->body_includes_call
)
6023 available_regs
= available_regs
- target_clobbered_regs
;
6025 /* If we have enough registers. */
6026 if (regs_needed
+ target_res_regs
< available_regs
)
6028 /* If close to running out of registers, try to preserve them. */
6029 else if (regs_needed
<= available_regs
)
6030 cost
= target_reg_cost
[speed
] * regs_needed
;
6031 /* If we run out of available registers but the number of candidates
6032 does not, we penalize extra registers using target_spill_cost. */
6033 else if (n_cands
<= available_regs
)
6034 cost
= target_reg_cost
[speed
] * available_regs
6035 + target_spill_cost
[speed
] * (regs_needed
- available_regs
);
6036 /* If the number of candidates runs out available registers, we penalize
6037 extra candidate registers using target_spill_cost * 2. Because it is
6038 more expensive to spill induction variable than invariant. */
6040 cost
= target_reg_cost
[speed
] * available_regs
6041 + target_spill_cost
[speed
] * (n_cands
- available_regs
) * 2
6042 + target_spill_cost
[speed
] * (regs_needed
- n_cands
);
6044 /* Finally, add the number of candidates, so that we prefer eliminating
6045 induction variables if possible. */
6046 return cost
+ n_cands
;
6049 /* For each size of the induction variable set determine the penalty. */
6052 determine_set_costs (struct ivopts_data
*data
)
6058 class loop
*loop
= data
->current_loop
;
6061 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6063 fprintf (dump_file
, "<Global Costs>:\n");
6064 fprintf (dump_file
, " target_avail_regs %d\n", target_avail_regs
);
6065 fprintf (dump_file
, " target_clobbered_regs %d\n", target_clobbered_regs
);
6066 fprintf (dump_file
, " target_reg_cost %d\n", target_reg_cost
[data
->speed
]);
6067 fprintf (dump_file
, " target_spill_cost %d\n", target_spill_cost
[data
->speed
]);
6071 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
6074 op
= PHI_RESULT (phi
);
6076 if (virtual_operand_p (op
))
6079 if (get_iv (data
, op
))
6082 if (!POINTER_TYPE_P (TREE_TYPE (op
))
6083 && !INTEGRAL_TYPE_P (TREE_TYPE (op
)))
6089 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
6091 struct version_info
*info
= ver_info (data
, j
);
6093 if (info
->inv_id
&& info
->has_nonlin_use
)
6097 data
->regs_used
= n
;
6098 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6099 fprintf (dump_file
, " regs_used %d\n", n
);
6101 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6103 fprintf (dump_file
, " cost for size:\n");
6104 fprintf (dump_file
, " ivs\tcost\n");
6105 for (j
= 0; j
<= 2 * target_avail_regs
; j
++)
6106 fprintf (dump_file
, " %d\t%d\n", j
,
6107 ivopts_estimate_reg_pressure (data
, 0, j
));
6108 fprintf (dump_file
, "\n");
6112 /* Returns true if A is a cheaper cost pair than B. */
6115 cheaper_cost_pair (class cost_pair
*a
, class cost_pair
*b
)
6123 if (a
->cost
< b
->cost
)
6126 if (b
->cost
< a
->cost
)
6129 /* In case the costs are the same, prefer the cheaper candidate. */
6130 if (a
->cand
->cost
< b
->cand
->cost
)
6136 /* Compare if A is a more expensive cost pair than B. Return 1, 0 and -1
6137 for more expensive, equal and cheaper respectively. */
6140 compare_cost_pair (class cost_pair
*a
, class cost_pair
*b
)
6142 if (cheaper_cost_pair (a
, b
))
6144 if (cheaper_cost_pair (b
, a
))
6150 /* Returns candidate by that USE is expressed in IVS. */
6152 static class cost_pair
*
6153 iv_ca_cand_for_group (class iv_ca
*ivs
, struct iv_group
*group
)
6155 return ivs
->cand_for_group
[group
->id
];
6158 /* Computes the cost field of IVS structure. */
6161 iv_ca_recount_cost (struct ivopts_data
*data
, class iv_ca
*ivs
)
6163 comp_cost cost
= ivs
->cand_use_cost
;
6165 cost
+= ivs
->cand_cost
;
6166 cost
+= ivopts_estimate_reg_pressure (data
, ivs
->n_invs
, ivs
->n_cands
);
6170 /* Remove use of invariants in set INVS by decreasing counter in N_INV_USES
6174 iv_ca_set_remove_invs (class iv_ca
*ivs
, bitmap invs
, unsigned *n_inv_uses
)
6182 gcc_assert (n_inv_uses
!= NULL
);
6183 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
6186 if (n_inv_uses
[iid
] == 0)
6191 /* Set USE not to be expressed by any candidate in IVS. */
6194 iv_ca_set_no_cp (struct ivopts_data
*data
, class iv_ca
*ivs
,
6195 struct iv_group
*group
)
6197 unsigned gid
= group
->id
, cid
;
6198 class cost_pair
*cp
;
6200 cp
= ivs
->cand_for_group
[gid
];
6206 ivs
->cand_for_group
[gid
] = NULL
;
6207 ivs
->n_cand_uses
[cid
]--;
6209 if (ivs
->n_cand_uses
[cid
] == 0)
6211 bitmap_clear_bit (ivs
->cands
, cid
);
6212 if (!cp
->cand
->doloop_p
|| !targetm
.have_count_reg_decr_p
)
6214 ivs
->cand_cost
-= cp
->cand
->cost
;
6215 iv_ca_set_remove_invs (ivs
, cp
->cand
->inv_vars
, ivs
->n_inv_var_uses
);
6216 iv_ca_set_remove_invs (ivs
, cp
->cand
->inv_exprs
, ivs
->n_inv_expr_uses
);
6219 ivs
->cand_use_cost
-= cp
->cost
;
6220 iv_ca_set_remove_invs (ivs
, cp
->inv_vars
, ivs
->n_inv_var_uses
);
6221 iv_ca_set_remove_invs (ivs
, cp
->inv_exprs
, ivs
->n_inv_expr_uses
);
6222 iv_ca_recount_cost (data
, ivs
);
6225 /* Add use of invariants in set INVS by increasing counter in N_INV_USES and
6229 iv_ca_set_add_invs (class iv_ca
*ivs
, bitmap invs
, unsigned *n_inv_uses
)
6237 gcc_assert (n_inv_uses
!= NULL
);
6238 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
6241 if (n_inv_uses
[iid
] == 1)
6246 /* Set cost pair for GROUP in set IVS to CP. */
6249 iv_ca_set_cp (struct ivopts_data
*data
, class iv_ca
*ivs
,
6250 struct iv_group
*group
, class cost_pair
*cp
)
6252 unsigned gid
= group
->id
, cid
;
6254 if (ivs
->cand_for_group
[gid
] == cp
)
6257 if (ivs
->cand_for_group
[gid
])
6258 iv_ca_set_no_cp (data
, ivs
, group
);
6265 ivs
->cand_for_group
[gid
] = cp
;
6266 ivs
->n_cand_uses
[cid
]++;
6267 if (ivs
->n_cand_uses
[cid
] == 1)
6269 bitmap_set_bit (ivs
->cands
, cid
);
6270 if (!cp
->cand
->doloop_p
|| !targetm
.have_count_reg_decr_p
)
6272 ivs
->cand_cost
+= cp
->cand
->cost
;
6273 iv_ca_set_add_invs (ivs
, cp
->cand
->inv_vars
, ivs
->n_inv_var_uses
);
6274 iv_ca_set_add_invs (ivs
, cp
->cand
->inv_exprs
, ivs
->n_inv_expr_uses
);
6277 ivs
->cand_use_cost
+= cp
->cost
;
6278 iv_ca_set_add_invs (ivs
, cp
->inv_vars
, ivs
->n_inv_var_uses
);
6279 iv_ca_set_add_invs (ivs
, cp
->inv_exprs
, ivs
->n_inv_expr_uses
);
6280 iv_ca_recount_cost (data
, ivs
);
6284 /* Extend set IVS by expressing USE by some of the candidates in it
6285 if possible. Consider all important candidates if candidates in
6286 set IVS don't give any result. */
6289 iv_ca_add_group (struct ivopts_data
*data
, class iv_ca
*ivs
,
6290 struct iv_group
*group
)
6292 class cost_pair
*best_cp
= NULL
, *cp
;
6295 struct iv_cand
*cand
;
6297 gcc_assert (ivs
->upto
>= group
->id
);
6301 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
6303 cand
= data
->vcands
[i
];
6304 cp
= get_group_iv_cost (data
, group
, cand
);
6305 if (cheaper_cost_pair (cp
, best_cp
))
6309 if (best_cp
== NULL
)
6311 EXECUTE_IF_SET_IN_BITMAP (data
->important_candidates
, 0, i
, bi
)
6313 cand
= data
->vcands
[i
];
6314 cp
= get_group_iv_cost (data
, group
, cand
);
6315 if (cheaper_cost_pair (cp
, best_cp
))
6320 iv_ca_set_cp (data
, ivs
, group
, best_cp
);
6323 /* Get cost for assignment IVS. */
6326 iv_ca_cost (class iv_ca
*ivs
)
6328 /* This was a conditional expression but it triggered a bug in
6330 if (ivs
->bad_groups
)
6331 return infinite_cost
;
6336 /* Compare if applying NEW_CP to GROUP for IVS introduces more invariants
6337 than OLD_CP. Return 1, 0 and -1 for more, equal and fewer invariants
6341 iv_ca_compare_deps (struct ivopts_data
*data
, class iv_ca
*ivs
,
6342 struct iv_group
*group
, class cost_pair
*old_cp
,
6343 class cost_pair
*new_cp
)
6345 gcc_assert (old_cp
&& new_cp
&& old_cp
!= new_cp
);
6346 unsigned old_n_invs
= ivs
->n_invs
;
6347 iv_ca_set_cp (data
, ivs
, group
, new_cp
);
6348 unsigned new_n_invs
= ivs
->n_invs
;
6349 iv_ca_set_cp (data
, ivs
, group
, old_cp
);
6351 return new_n_invs
> old_n_invs
? 1 : (new_n_invs
< old_n_invs
? -1 : 0);
6354 /* Creates change of expressing GROUP by NEW_CP instead of OLD_CP and chains
6357 static struct iv_ca_delta
*
6358 iv_ca_delta_add (struct iv_group
*group
, class cost_pair
*old_cp
,
6359 class cost_pair
*new_cp
, struct iv_ca_delta
*next
)
6361 struct iv_ca_delta
*change
= XNEW (struct iv_ca_delta
);
6363 change
->group
= group
;
6364 change
->old_cp
= old_cp
;
6365 change
->new_cp
= new_cp
;
6366 change
->next
= next
;
6371 /* Joins two lists of changes L1 and L2. Destructive -- old lists
6374 static struct iv_ca_delta
*
6375 iv_ca_delta_join (struct iv_ca_delta
*l1
, struct iv_ca_delta
*l2
)
6377 struct iv_ca_delta
*last
;
6385 for (last
= l1
; last
->next
; last
= last
->next
)
6392 /* Reverse the list of changes DELTA, forming the inverse to it. */
6394 static struct iv_ca_delta
*
6395 iv_ca_delta_reverse (struct iv_ca_delta
*delta
)
6397 struct iv_ca_delta
*act
, *next
, *prev
= NULL
;
6399 for (act
= delta
; act
; act
= next
)
6405 std::swap (act
->old_cp
, act
->new_cp
);
6411 /* Commit changes in DELTA to IVS. If FORWARD is false, the changes are
6412 reverted instead. */
6415 iv_ca_delta_commit (struct ivopts_data
*data
, class iv_ca
*ivs
,
6416 struct iv_ca_delta
*delta
, bool forward
)
6418 class cost_pair
*from
, *to
;
6419 struct iv_ca_delta
*act
;
6422 delta
= iv_ca_delta_reverse (delta
);
6424 for (act
= delta
; act
; act
= act
->next
)
6428 gcc_assert (iv_ca_cand_for_group (ivs
, act
->group
) == from
);
6429 iv_ca_set_cp (data
, ivs
, act
->group
, to
);
6433 iv_ca_delta_reverse (delta
);
6436 /* Returns true if CAND is used in IVS. */
6439 iv_ca_cand_used_p (class iv_ca
*ivs
, struct iv_cand
*cand
)
6441 return ivs
->n_cand_uses
[cand
->id
] > 0;
6444 /* Returns number of induction variable candidates in the set IVS. */
6447 iv_ca_n_cands (class iv_ca
*ivs
)
6449 return ivs
->n_cands
;
6452 /* Free the list of changes DELTA. */
6455 iv_ca_delta_free (struct iv_ca_delta
**delta
)
6457 struct iv_ca_delta
*act
, *next
;
6459 for (act
= *delta
; act
; act
= next
)
6468 /* Allocates new iv candidates assignment. */
6470 static class iv_ca
*
6471 iv_ca_new (struct ivopts_data
*data
)
6473 class iv_ca
*nw
= XNEW (class iv_ca
);
6477 nw
->cand_for_group
= XCNEWVEC (class cost_pair
*,
6478 data
->vgroups
.length ());
6479 nw
->n_cand_uses
= XCNEWVEC (unsigned, data
->vcands
.length ());
6480 nw
->cands
= BITMAP_ALLOC (NULL
);
6483 nw
->cand_use_cost
= no_cost
;
6485 nw
->n_inv_var_uses
= XCNEWVEC (unsigned, data
->max_inv_var_id
+ 1);
6486 nw
->n_inv_expr_uses
= XCNEWVEC (unsigned, data
->max_inv_expr_id
+ 1);
6492 /* Free memory occupied by the set IVS. */
6495 iv_ca_free (class iv_ca
**ivs
)
6497 free ((*ivs
)->cand_for_group
);
6498 free ((*ivs
)->n_cand_uses
);
6499 BITMAP_FREE ((*ivs
)->cands
);
6500 free ((*ivs
)->n_inv_var_uses
);
6501 free ((*ivs
)->n_inv_expr_uses
);
6506 /* Dumps IVS to FILE. */
6509 iv_ca_dump (struct ivopts_data
*data
, FILE *file
, class iv_ca
*ivs
)
6512 comp_cost cost
= iv_ca_cost (ivs
);
6514 fprintf (file
, " cost: %" PRId64
" (complexity %d)\n", cost
.cost
,
6516 fprintf (file
, " reg_cost: %d\n",
6517 ivopts_estimate_reg_pressure (data
, ivs
->n_invs
, ivs
->n_cands
));
6518 fprintf (file
, " cand_cost: %" PRId64
"\n cand_group_cost: "
6519 "%" PRId64
" (complexity %d)\n", ivs
->cand_cost
,
6520 ivs
->cand_use_cost
.cost
, ivs
->cand_use_cost
.complexity
);
6521 bitmap_print (file
, ivs
->cands
, " candidates: ","\n");
6523 for (i
= 0; i
< ivs
->upto
; i
++)
6525 struct iv_group
*group
= data
->vgroups
[i
];
6526 class cost_pair
*cp
= iv_ca_cand_for_group (ivs
, group
);
6528 fprintf (file
, " group:%d --> iv_cand:%d, cost=("
6529 "%" PRId64
",%d)\n", group
->id
, cp
->cand
->id
,
6530 cp
->cost
.cost
, cp
->cost
.complexity
);
6532 fprintf (file
, " group:%d --> ??\n", group
->id
);
6535 const char *pref
= "";
6536 fprintf (file
, " invariant variables: ");
6537 for (i
= 1; i
<= data
->max_inv_var_id
; i
++)
6538 if (ivs
->n_inv_var_uses
[i
])
6540 fprintf (file
, "%s%d", pref
, i
);
6545 fprintf (file
, "\n invariant expressions: ");
6546 for (i
= 1; i
<= data
->max_inv_expr_id
; i
++)
6547 if (ivs
->n_inv_expr_uses
[i
])
6549 fprintf (file
, "%s%d", pref
, i
);
6553 fprintf (file
, "\n\n");
6556 /* Try changing candidate in IVS to CAND for each use. Return cost of the
6557 new set, and store differences in DELTA. Number of induction variables
6558 in the new set is stored to N_IVS. MIN_NCAND is a flag. When it is true
6559 the function will try to find a solution with mimimal iv candidates. */
6562 iv_ca_extend (struct ivopts_data
*data
, class iv_ca
*ivs
,
6563 struct iv_cand
*cand
, struct iv_ca_delta
**delta
,
6564 unsigned *n_ivs
, bool min_ncand
)
6568 struct iv_group
*group
;
6569 class cost_pair
*old_cp
, *new_cp
;
6572 for (i
= 0; i
< ivs
->upto
; i
++)
6574 group
= data
->vgroups
[i
];
6575 old_cp
= iv_ca_cand_for_group (ivs
, group
);
6578 && old_cp
->cand
== cand
)
6581 new_cp
= get_group_iv_cost (data
, group
, cand
);
6587 int cmp_invs
= iv_ca_compare_deps (data
, ivs
, group
, old_cp
, new_cp
);
6588 /* Skip if new_cp depends on more invariants. */
6592 int cmp_cost
= compare_cost_pair (new_cp
, old_cp
);
6593 /* Skip if new_cp is not cheaper. */
6594 if (cmp_cost
> 0 || (cmp_cost
== 0 && cmp_invs
== 0))
6598 *delta
= iv_ca_delta_add (group
, old_cp
, new_cp
, *delta
);
6601 iv_ca_delta_commit (data
, ivs
, *delta
, true);
6602 cost
= iv_ca_cost (ivs
);
6604 *n_ivs
= iv_ca_n_cands (ivs
);
6605 iv_ca_delta_commit (data
, ivs
, *delta
, false);
6610 /* Try narrowing set IVS by removing CAND. Return the cost of
6611 the new set and store the differences in DELTA. START is
6612 the candidate with which we start narrowing. */
6615 iv_ca_narrow (struct ivopts_data
*data
, class iv_ca
*ivs
,
6616 struct iv_cand
*cand
, struct iv_cand
*start
,
6617 struct iv_ca_delta
**delta
)
6620 struct iv_group
*group
;
6621 class cost_pair
*old_cp
, *new_cp
, *cp
;
6623 struct iv_cand
*cnd
;
6624 comp_cost cost
, best_cost
, acost
;
6627 for (i
= 0; i
< data
->vgroups
.length (); i
++)
6629 group
= data
->vgroups
[i
];
6631 old_cp
= iv_ca_cand_for_group (ivs
, group
);
6632 if (old_cp
->cand
!= cand
)
6635 best_cost
= iv_ca_cost (ivs
);
6636 /* Start narrowing with START. */
6637 new_cp
= get_group_iv_cost (data
, group
, start
);
6639 if (data
->consider_all_candidates
)
6641 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, ci
, bi
)
6643 if (ci
== cand
->id
|| (start
&& ci
== start
->id
))
6646 cnd
= data
->vcands
[ci
];
6648 cp
= get_group_iv_cost (data
, group
, cnd
);
6652 iv_ca_set_cp (data
, ivs
, group
, cp
);
6653 acost
= iv_ca_cost (ivs
);
6655 if (acost
< best_cost
)
6664 EXECUTE_IF_AND_IN_BITMAP (group
->related_cands
, ivs
->cands
, 0, ci
, bi
)
6666 if (ci
== cand
->id
|| (start
&& ci
== start
->id
))
6669 cnd
= data
->vcands
[ci
];
6671 cp
= get_group_iv_cost (data
, group
, cnd
);
6675 iv_ca_set_cp (data
, ivs
, group
, cp
);
6676 acost
= iv_ca_cost (ivs
);
6678 if (acost
< best_cost
)
6685 /* Restore to old cp for use. */
6686 iv_ca_set_cp (data
, ivs
, group
, old_cp
);
6690 iv_ca_delta_free (delta
);
6691 return infinite_cost
;
6694 *delta
= iv_ca_delta_add (group
, old_cp
, new_cp
, *delta
);
6697 iv_ca_delta_commit (data
, ivs
, *delta
, true);
6698 cost
= iv_ca_cost (ivs
);
6699 iv_ca_delta_commit (data
, ivs
, *delta
, false);
6704 /* Try optimizing the set of candidates IVS by removing candidates different
6705 from to EXCEPT_CAND from it. Return cost of the new set, and store
6706 differences in DELTA. */
6709 iv_ca_prune (struct ivopts_data
*data
, class iv_ca
*ivs
,
6710 struct iv_cand
*except_cand
, struct iv_ca_delta
**delta
)
6713 struct iv_ca_delta
*act_delta
, *best_delta
;
6715 comp_cost best_cost
, acost
;
6716 struct iv_cand
*cand
;
6719 best_cost
= iv_ca_cost (ivs
);
6721 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
6723 cand
= data
->vcands
[i
];
6725 if (cand
== except_cand
)
6728 acost
= iv_ca_narrow (data
, ivs
, cand
, except_cand
, &act_delta
);
6730 if (acost
< best_cost
)
6733 iv_ca_delta_free (&best_delta
);
6734 best_delta
= act_delta
;
6737 iv_ca_delta_free (&act_delta
);
6746 /* Recurse to possibly remove other unnecessary ivs. */
6747 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
6748 best_cost
= iv_ca_prune (data
, ivs
, except_cand
, delta
);
6749 iv_ca_delta_commit (data
, ivs
, best_delta
, false);
6750 *delta
= iv_ca_delta_join (best_delta
, *delta
);
6754 /* Check if CAND_IDX is a candidate other than OLD_CAND and has
6755 cheaper local cost for GROUP than BEST_CP. Return pointer to
6756 the corresponding cost_pair, otherwise just return BEST_CP. */
6758 static class cost_pair
*
6759 cheaper_cost_with_cand (struct ivopts_data
*data
, struct iv_group
*group
,
6760 unsigned int cand_idx
, struct iv_cand
*old_cand
,
6761 class cost_pair
*best_cp
)
6763 struct iv_cand
*cand
;
6764 class cost_pair
*cp
;
6766 gcc_assert (old_cand
!= NULL
&& best_cp
!= NULL
);
6767 if (cand_idx
== old_cand
->id
)
6770 cand
= data
->vcands
[cand_idx
];
6771 cp
= get_group_iv_cost (data
, group
, cand
);
6772 if (cp
!= NULL
&& cheaper_cost_pair (cp
, best_cp
))
6778 /* Try breaking local optimal fixed-point for IVS by replacing candidates
6779 which are used by more than one iv uses. For each of those candidates,
6780 this function tries to represent iv uses under that candidate using
6781 other ones with lower local cost, then tries to prune the new set.
6782 If the new set has lower cost, It returns the new cost after recording
6783 candidate replacement in list DELTA. */
6786 iv_ca_replace (struct ivopts_data
*data
, class iv_ca
*ivs
,
6787 struct iv_ca_delta
**delta
)
6789 bitmap_iterator bi
, bj
;
6790 unsigned int i
, j
, k
;
6791 struct iv_cand
*cand
;
6792 comp_cost orig_cost
, acost
;
6793 struct iv_ca_delta
*act_delta
, *tmp_delta
;
6794 class cost_pair
*old_cp
, *best_cp
= NULL
;
6797 orig_cost
= iv_ca_cost (ivs
);
6799 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
6801 if (ivs
->n_cand_uses
[i
] == 1
6802 || ivs
->n_cand_uses
[i
] > ALWAYS_PRUNE_CAND_SET_BOUND
)
6805 cand
= data
->vcands
[i
];
6808 /* Represent uses under current candidate using other ones with
6809 lower local cost. */
6810 for (j
= 0; j
< ivs
->upto
; j
++)
6812 struct iv_group
*group
= data
->vgroups
[j
];
6813 old_cp
= iv_ca_cand_for_group (ivs
, group
);
6815 if (old_cp
->cand
!= cand
)
6819 if (data
->consider_all_candidates
)
6820 for (k
= 0; k
< data
->vcands
.length (); k
++)
6821 best_cp
= cheaper_cost_with_cand (data
, group
, k
,
6822 old_cp
->cand
, best_cp
);
6824 EXECUTE_IF_SET_IN_BITMAP (group
->related_cands
, 0, k
, bj
)
6825 best_cp
= cheaper_cost_with_cand (data
, group
, k
,
6826 old_cp
->cand
, best_cp
);
6828 if (best_cp
== old_cp
)
6831 act_delta
= iv_ca_delta_add (group
, old_cp
, best_cp
, act_delta
);
6833 /* No need for further prune. */
6837 /* Prune the new candidate set. */
6838 iv_ca_delta_commit (data
, ivs
, act_delta
, true);
6839 acost
= iv_ca_prune (data
, ivs
, NULL
, &tmp_delta
);
6840 iv_ca_delta_commit (data
, ivs
, act_delta
, false);
6841 act_delta
= iv_ca_delta_join (act_delta
, tmp_delta
);
6843 if (acost
< orig_cost
)
6849 iv_ca_delta_free (&act_delta
);
6855 /* Tries to extend the sets IVS in the best possible way in order to
6856 express the GROUP. If ORIGINALP is true, prefer candidates from
6857 the original set of IVs, otherwise favor important candidates not
6858 based on any memory object. */
6861 try_add_cand_for (struct ivopts_data
*data
, class iv_ca
*ivs
,
6862 struct iv_group
*group
, bool originalp
)
6864 comp_cost best_cost
, act_cost
;
6867 struct iv_cand
*cand
;
6868 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
;
6869 class cost_pair
*cp
;
6871 iv_ca_add_group (data
, ivs
, group
);
6872 best_cost
= iv_ca_cost (ivs
);
6873 cp
= iv_ca_cand_for_group (ivs
, group
);
6876 best_delta
= iv_ca_delta_add (group
, NULL
, cp
, NULL
);
6877 iv_ca_set_no_cp (data
, ivs
, group
);
6880 /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
6881 first try important candidates not based on any memory object. Only if
6882 this fails, try the specific ones. Rationale -- in loops with many
6883 variables the best choice often is to use just one generic biv. If we
6884 added here many ivs specific to the uses, the optimization algorithm later
6885 would be likely to get stuck in a local minimum, thus causing us to create
6886 too many ivs. The approach from few ivs to more seems more likely to be
6887 successful -- starting from few ivs, replacing an expensive use by a
6888 specific iv should always be a win. */
6889 EXECUTE_IF_SET_IN_BITMAP (group
->related_cands
, 0, i
, bi
)
6891 cand
= data
->vcands
[i
];
6893 if (originalp
&& cand
->pos
!=IP_ORIGINAL
)
6896 if (!originalp
&& cand
->iv
->base_object
!= NULL_TREE
)
6899 if (iv_ca_cand_used_p (ivs
, cand
))
6902 cp
= get_group_iv_cost (data
, group
, cand
);
6906 iv_ca_set_cp (data
, ivs
, group
, cp
);
6907 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
,
6909 iv_ca_set_no_cp (data
, ivs
, group
);
6910 act_delta
= iv_ca_delta_add (group
, NULL
, cp
, act_delta
);
6912 if (act_cost
< best_cost
)
6914 best_cost
= act_cost
;
6916 iv_ca_delta_free (&best_delta
);
6917 best_delta
= act_delta
;
6920 iv_ca_delta_free (&act_delta
);
6923 if (best_cost
.infinite_cost_p ())
6925 for (i
= 0; i
< group
->n_map_members
; i
++)
6927 cp
= group
->cost_map
+ i
;
6932 /* Already tried this. */
6933 if (cand
->important
)
6935 if (originalp
&& cand
->pos
== IP_ORIGINAL
)
6937 if (!originalp
&& cand
->iv
->base_object
== NULL_TREE
)
6941 if (iv_ca_cand_used_p (ivs
, cand
))
6945 iv_ca_set_cp (data
, ivs
, group
, cp
);
6946 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
, true);
6947 iv_ca_set_no_cp (data
, ivs
, group
);
6948 act_delta
= iv_ca_delta_add (group
,
6949 iv_ca_cand_for_group (ivs
, group
),
6952 if (act_cost
< best_cost
)
6954 best_cost
= act_cost
;
6957 iv_ca_delta_free (&best_delta
);
6958 best_delta
= act_delta
;
6961 iv_ca_delta_free (&act_delta
);
6965 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
6966 iv_ca_delta_free (&best_delta
);
6968 return !best_cost
.infinite_cost_p ();
6971 /* Finds an initial assignment of candidates to uses. */
6973 static class iv_ca
*
6974 get_initial_solution (struct ivopts_data
*data
, bool originalp
)
6977 class iv_ca
*ivs
= iv_ca_new (data
);
6979 for (i
= 0; i
< data
->vgroups
.length (); i
++)
6980 if (!try_add_cand_for (data
, ivs
, data
->vgroups
[i
], originalp
))
6989 /* Tries to improve set of induction variables IVS. TRY_REPLACE_P
6990 points to a bool variable, this function tries to break local
6991 optimal fixed-point by replacing candidates in IVS if it's true. */
6994 try_improve_iv_set (struct ivopts_data
*data
,
6995 class iv_ca
*ivs
, bool *try_replace_p
)
6998 comp_cost acost
, best_cost
= iv_ca_cost (ivs
);
6999 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
, *tmp_delta
;
7000 struct iv_cand
*cand
;
7002 /* Try extending the set of induction variables by one. */
7003 for (i
= 0; i
< data
->vcands
.length (); i
++)
7005 cand
= data
->vcands
[i
];
7007 if (iv_ca_cand_used_p (ivs
, cand
))
7010 acost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, &n_ivs
, false);
7014 /* If we successfully added the candidate and the set is small enough,
7015 try optimizing it by removing other candidates. */
7016 if (n_ivs
<= ALWAYS_PRUNE_CAND_SET_BOUND
)
7018 iv_ca_delta_commit (data
, ivs
, act_delta
, true);
7019 acost
= iv_ca_prune (data
, ivs
, cand
, &tmp_delta
);
7020 iv_ca_delta_commit (data
, ivs
, act_delta
, false);
7021 act_delta
= iv_ca_delta_join (act_delta
, tmp_delta
);
7024 if (acost
< best_cost
)
7027 iv_ca_delta_free (&best_delta
);
7028 best_delta
= act_delta
;
7031 iv_ca_delta_free (&act_delta
);
7036 /* Try removing the candidates from the set instead. */
7037 best_cost
= iv_ca_prune (data
, ivs
, NULL
, &best_delta
);
7039 if (!best_delta
&& *try_replace_p
)
7041 *try_replace_p
= false;
7042 /* So far candidate selecting algorithm tends to choose fewer IVs
7043 so that it can handle cases in which loops have many variables
7044 but the best choice is often to use only one general biv. One
7045 weakness is it can't handle opposite cases, in which different
7046 candidates should be chosen with respect to each use. To solve
7047 the problem, we replace candidates in a manner described by the
7048 comments of iv_ca_replace, thus give general algorithm a chance
7049 to break local optimal fixed-point in these cases. */
7050 best_cost
= iv_ca_replace (data
, ivs
, &best_delta
);
7057 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
7058 iv_ca_delta_free (&best_delta
);
7059 return best_cost
== iv_ca_cost (ivs
);
7062 /* Attempts to find the optimal set of induction variables. We do simple
7063 greedy heuristic -- we try to replace at most one candidate in the selected
7064 solution and remove the unused ivs while this improves the cost. */
7066 static class iv_ca
*
7067 find_optimal_iv_set_1 (struct ivopts_data
*data
, bool originalp
)
7070 bool try_replace_p
= true;
7072 /* Get the initial solution. */
7073 set
= get_initial_solution (data
, originalp
);
7076 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7077 fprintf (dump_file
, "Unable to substitute for ivs, failed.\n");
7081 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7083 fprintf (dump_file
, "Initial set of candidates:\n");
7084 iv_ca_dump (data
, dump_file
, set
);
7087 while (try_improve_iv_set (data
, set
, &try_replace_p
))
7089 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7091 fprintf (dump_file
, "Improved to:\n");
7092 iv_ca_dump (data
, dump_file
, set
);
7096 /* If the set has infinite_cost, it can't be optimal. */
7097 if (iv_ca_cost (set
).infinite_cost_p ())
7099 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7101 "Overflow to infinite cost in try_improve_iv_set.\n");
7107 static class iv_ca
*
7108 find_optimal_iv_set (struct ivopts_data
*data
)
7111 comp_cost cost
, origcost
;
7112 class iv_ca
*set
, *origset
;
7114 /* Determine the cost based on a strategy that starts with original IVs,
7115 and try again using a strategy that prefers candidates not based
7117 origset
= find_optimal_iv_set_1 (data
, true);
7118 set
= find_optimal_iv_set_1 (data
, false);
7120 if (!origset
&& !set
)
7123 origcost
= origset
? iv_ca_cost (origset
) : infinite_cost
;
7124 cost
= set
? iv_ca_cost (set
) : infinite_cost
;
7126 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7128 fprintf (dump_file
, "Original cost %" PRId64
" (complexity %d)\n\n",
7129 origcost
.cost
, origcost
.complexity
);
7130 fprintf (dump_file
, "Final cost %" PRId64
" (complexity %d)\n\n",
7131 cost
.cost
, cost
.complexity
);
7134 /* Choose the one with the best cost. */
7135 if (origcost
<= cost
)
7142 iv_ca_free (&origset
);
7144 for (i
= 0; i
< data
->vgroups
.length (); i
++)
7146 struct iv_group
*group
= data
->vgroups
[i
];
7147 group
->selected
= iv_ca_cand_for_group (set
, group
)->cand
;
7153 /* Creates a new induction variable corresponding to CAND. */
7156 create_new_iv (struct ivopts_data
*data
, struct iv_cand
*cand
)
7158 gimple_stmt_iterator incr_pos
;
7161 struct iv_group
*group
;
7164 gcc_assert (cand
->iv
!= NULL
);
7169 incr_pos
= gsi_last_bb (ip_normal_pos (data
->current_loop
));
7173 incr_pos
= gsi_last_bb (ip_end_pos (data
->current_loop
));
7181 incr_pos
= gsi_for_stmt (cand
->incremented_at
);
7185 /* Mark that the iv is preserved. */
7186 name_info (data
, cand
->var_before
)->preserve_biv
= true;
7187 name_info (data
, cand
->var_after
)->preserve_biv
= true;
7189 /* Rewrite the increment so that it uses var_before directly. */
7190 use
= find_interesting_uses_op (data
, cand
->var_after
);
7191 group
= data
->vgroups
[use
->group_id
];
7192 group
->selected
= cand
;
7196 gimple_add_tmp_var (cand
->var_before
);
7198 base
= unshare_expr (cand
->iv
->base
);
7200 create_iv (base
, unshare_expr (cand
->iv
->step
),
7201 cand
->var_before
, data
->current_loop
,
7202 &incr_pos
, after
, &cand
->var_before
, &cand
->var_after
);
7205 /* Creates new induction variables described in SET. */
7208 create_new_ivs (struct ivopts_data
*data
, class iv_ca
*set
)
7211 struct iv_cand
*cand
;
7214 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
7216 cand
= data
->vcands
[i
];
7217 create_new_iv (data
, cand
);
7220 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7222 fprintf (dump_file
, "Selected IV set for loop %d",
7223 data
->current_loop
->num
);
7224 if (data
->loop_loc
!= UNKNOWN_LOCATION
)
7225 fprintf (dump_file
, " at %s:%d", LOCATION_FILE (data
->loop_loc
),
7226 LOCATION_LINE (data
->loop_loc
));
7227 fprintf (dump_file
, ", " HOST_WIDE_INT_PRINT_DEC
" avg niters",
7228 avg_loop_niter (data
->current_loop
));
7229 fprintf (dump_file
, ", %lu IVs:\n", bitmap_count_bits (set
->cands
));
7230 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
7232 cand
= data
->vcands
[i
];
7233 dump_cand (dump_file
, cand
);
7235 fprintf (dump_file
, "\n");
7239 /* Rewrites USE (definition of iv used in a nonlinear expression)
7240 using candidate CAND. */
7243 rewrite_use_nonlinear_expr (struct ivopts_data
*data
,
7244 struct iv_use
*use
, struct iv_cand
*cand
)
7247 gimple_stmt_iterator bsi
;
7248 tree comp
, type
= get_use_type (use
), tgt
;
7250 /* An important special case -- if we are asked to express value of
7251 the original iv by itself, just exit; there is no need to
7252 introduce a new computation (that might also need casting the
7253 variable to unsigned and back). */
7254 if (cand
->pos
== IP_ORIGINAL
7255 && cand
->incremented_at
== use
->stmt
)
7257 tree op
= NULL_TREE
;
7258 enum tree_code stmt_code
;
7260 gcc_assert (is_gimple_assign (use
->stmt
));
7261 gcc_assert (gimple_assign_lhs (use
->stmt
) == cand
->var_after
);
7263 /* Check whether we may leave the computation unchanged.
7264 This is the case only if it does not rely on other
7265 computations in the loop -- otherwise, the computation
7266 we rely upon may be removed in remove_unused_ivs,
7267 thus leading to ICE. */
7268 stmt_code
= gimple_assign_rhs_code (use
->stmt
);
7269 if (stmt_code
== PLUS_EXPR
7270 || stmt_code
== MINUS_EXPR
7271 || stmt_code
== POINTER_PLUS_EXPR
)
7273 if (gimple_assign_rhs1 (use
->stmt
) == cand
->var_before
)
7274 op
= gimple_assign_rhs2 (use
->stmt
);
7275 else if (gimple_assign_rhs2 (use
->stmt
) == cand
->var_before
)
7276 op
= gimple_assign_rhs1 (use
->stmt
);
7279 if (op
!= NULL_TREE
)
7281 if (expr_invariant_in_loop_p (data
->current_loop
, op
))
7283 if (TREE_CODE (op
) == SSA_NAME
)
7285 struct iv
*iv
= get_iv (data
, op
);
7286 if (iv
!= NULL
&& integer_zerop (iv
->step
))
7292 switch (gimple_code (use
->stmt
))
7295 tgt
= PHI_RESULT (use
->stmt
);
7297 /* If we should keep the biv, do not replace it. */
7298 if (name_info (data
, tgt
)->preserve_biv
)
7301 bsi
= gsi_after_labels (gimple_bb (use
->stmt
));
7305 tgt
= gimple_assign_lhs (use
->stmt
);
7306 bsi
= gsi_for_stmt (use
->stmt
);
7313 aff_tree aff_inv
, aff_var
;
7314 if (!get_computation_aff_1 (data
->current_loop
, use
->stmt
,
7315 use
, cand
, &aff_inv
, &aff_var
))
7318 unshare_aff_combination (&aff_inv
);
7319 unshare_aff_combination (&aff_var
);
7320 /* Prefer CSE opportunity than loop invariant by adding offset at last
7321 so that iv_uses have different offsets can be CSEed. */
7322 poly_widest_int offset
= aff_inv
.offset
;
7325 gimple_seq stmt_list
= NULL
, seq
= NULL
;
7326 tree comp_op1
= aff_combination_to_tree (&aff_inv
);
7327 tree comp_op2
= aff_combination_to_tree (&aff_var
);
7328 gcc_assert (comp_op1
&& comp_op2
);
7330 comp_op1
= force_gimple_operand (comp_op1
, &seq
, true, NULL
);
7331 gimple_seq_add_seq (&stmt_list
, seq
);
7332 comp_op2
= force_gimple_operand (comp_op2
, &seq
, true, NULL
);
7333 gimple_seq_add_seq (&stmt_list
, seq
);
7335 if (POINTER_TYPE_P (TREE_TYPE (comp_op2
)))
7336 std::swap (comp_op1
, comp_op2
);
7338 if (POINTER_TYPE_P (TREE_TYPE (comp_op1
)))
7340 comp
= fold_build_pointer_plus (comp_op1
,
7341 fold_convert (sizetype
, comp_op2
));
7342 comp
= fold_build_pointer_plus (comp
,
7343 wide_int_to_tree (sizetype
, offset
));
7347 comp
= fold_build2 (PLUS_EXPR
, TREE_TYPE (comp_op1
), comp_op1
,
7348 fold_convert (TREE_TYPE (comp_op1
), comp_op2
));
7349 comp
= fold_build2 (PLUS_EXPR
, TREE_TYPE (comp_op1
), comp
,
7350 wide_int_to_tree (TREE_TYPE (comp_op1
), offset
));
7353 comp
= fold_convert (type
, comp
);
7354 comp
= force_gimple_operand (comp
, &seq
, false, NULL
);
7355 gimple_seq_add_seq (&stmt_list
, seq
);
7356 if (gimple_code (use
->stmt
) != GIMPLE_PHI
7357 /* We can't allow re-allocating the stmt as it might be pointed
7359 && (get_gimple_rhs_num_ops (TREE_CODE (comp
))
7360 >= gimple_num_ops (gsi_stmt (bsi
))))
7362 comp
= force_gimple_operand (comp
, &seq
, true, NULL
);
7363 gimple_seq_add_seq (&stmt_list
, seq
);
7364 if (POINTER_TYPE_P (TREE_TYPE (tgt
)))
7366 duplicate_ssa_name_ptr_info (comp
, SSA_NAME_PTR_INFO (tgt
));
7367 /* As this isn't a plain copy we have to reset alignment
7369 if (SSA_NAME_PTR_INFO (comp
))
7370 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (comp
));
7374 gsi_insert_seq_before (&bsi
, stmt_list
, GSI_SAME_STMT
);
7375 if (gimple_code (use
->stmt
) == GIMPLE_PHI
)
7377 ass
= gimple_build_assign (tgt
, comp
);
7378 gsi_insert_before (&bsi
, ass
, GSI_SAME_STMT
);
7380 bsi
= gsi_for_stmt (use
->stmt
);
7381 remove_phi_node (&bsi
, false);
7385 gimple_assign_set_rhs_from_tree (&bsi
, comp
);
7386 use
->stmt
= gsi_stmt (bsi
);
7390 /* Performs a peephole optimization to reorder the iv update statement with
7391 a mem ref to enable instruction combining in later phases. The mem ref uses
7392 the iv value before the update, so the reordering transformation requires
7393 adjustment of the offset. CAND is the selected IV_CAND.
7397 t = MEM_REF (base, iv1, 8, 16); // base, index, stride, offset
7405 directly propagating t over to (1) will introduce overlapping live range
7406 thus increase register pressure. This peephole transform it into:
7410 t = MEM_REF (base, iv2, 8, 8);
7417 adjust_iv_update_pos (struct iv_cand
*cand
, struct iv_use
*use
)
7420 gimple
*iv_update
, *stmt
;
7422 gimple_stmt_iterator gsi
, gsi_iv
;
7424 if (cand
->pos
!= IP_NORMAL
)
7427 var_after
= cand
->var_after
;
7428 iv_update
= SSA_NAME_DEF_STMT (var_after
);
7430 bb
= gimple_bb (iv_update
);
7431 gsi
= gsi_last_nondebug_bb (bb
);
7432 stmt
= gsi_stmt (gsi
);
7434 /* Only handle conditional statement for now. */
7435 if (gimple_code (stmt
) != GIMPLE_COND
)
7438 gsi_prev_nondebug (&gsi
);
7439 stmt
= gsi_stmt (gsi
);
7440 if (stmt
!= iv_update
)
7443 gsi_prev_nondebug (&gsi
);
7444 if (gsi_end_p (gsi
))
7447 stmt
= gsi_stmt (gsi
);
7448 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
7451 if (stmt
!= use
->stmt
)
7454 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
7457 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7459 fprintf (dump_file
, "Reordering \n");
7460 print_gimple_stmt (dump_file
, iv_update
, 0);
7461 print_gimple_stmt (dump_file
, use
->stmt
, 0);
7462 fprintf (dump_file
, "\n");
7465 gsi
= gsi_for_stmt (use
->stmt
);
7466 gsi_iv
= gsi_for_stmt (iv_update
);
7467 gsi_move_before (&gsi_iv
, &gsi
);
7469 cand
->pos
= IP_BEFORE_USE
;
7470 cand
->incremented_at
= use
->stmt
;
7473 /* Return the alias pointer type that should be used for a MEM_REF
7474 associated with USE, which has type USE_PTR_ADDRESS. */
7477 get_alias_ptr_type_for_ptr_address (iv_use
*use
)
7479 gcall
*call
= as_a
<gcall
*> (use
->stmt
);
7480 switch (gimple_call_internal_fn (call
))
7483 case IFN_MASK_STORE
:
7484 case IFN_MASK_LOAD_LANES
:
7485 case IFN_MASK_STORE_LANES
:
7488 /* The second argument contains the correct alias type. */
7489 gcc_assert (use
->op_p
= gimple_call_arg_ptr (call
, 0));
7490 return TREE_TYPE (gimple_call_arg (call
, 1));
7498 /* Rewrites USE (address that is an iv) using candidate CAND. */
7501 rewrite_use_address (struct ivopts_data
*data
,
7502 struct iv_use
*use
, struct iv_cand
*cand
)
7507 adjust_iv_update_pos (cand
, use
);
7508 ok
= get_computation_aff (data
->current_loop
, use
->stmt
, use
, cand
, &aff
);
7510 unshare_aff_combination (&aff
);
7512 /* To avoid undefined overflow problems, all IV candidates use unsigned
7513 integer types. The drawback is that this makes it impossible for
7514 create_mem_ref to distinguish an IV that is based on a memory object
7515 from one that represents simply an offset.
7517 To work around this problem, we pass a hint to create_mem_ref that
7518 indicates which variable (if any) in aff is an IV based on a memory
7519 object. Note that we only consider the candidate. If this is not
7520 based on an object, the base of the reference is in some subexpression
7521 of the use -- but these will use pointer types, so they are recognized
7522 by the create_mem_ref heuristics anyway. */
7523 tree iv
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
7524 tree base_hint
= (cand
->iv
->base_object
) ? iv
: NULL_TREE
;
7525 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
7526 tree type
= use
->mem_type
;
7527 tree alias_ptr_type
;
7528 if (use
->type
== USE_PTR_ADDRESS
)
7529 alias_ptr_type
= get_alias_ptr_type_for_ptr_address (use
);
7532 gcc_assert (type
== TREE_TYPE (*use
->op_p
));
7533 unsigned int align
= get_object_alignment (*use
->op_p
);
7534 if (align
!= TYPE_ALIGN (type
))
7535 type
= build_aligned_type (type
, align
);
7536 alias_ptr_type
= reference_alias_ptr_type (*use
->op_p
);
7538 tree ref
= create_mem_ref (&bsi
, type
, &aff
, alias_ptr_type
,
7539 iv
, base_hint
, data
->speed
);
7541 if (use
->type
== USE_PTR_ADDRESS
)
7543 ref
= fold_build1 (ADDR_EXPR
, build_pointer_type (use
->mem_type
), ref
);
7544 ref
= fold_convert (get_use_type (use
), ref
);
7545 ref
= force_gimple_operand_gsi (&bsi
, ref
, true, NULL_TREE
,
7546 true, GSI_SAME_STMT
);
7549 copy_ref_info (ref
, *use
->op_p
);
7554 /* Rewrites USE (the condition such that one of the arguments is an iv) using
7558 rewrite_use_compare (struct ivopts_data
*data
,
7559 struct iv_use
*use
, struct iv_cand
*cand
)
7561 tree comp
, op
, bound
;
7562 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
7563 enum tree_code compare
;
7564 struct iv_group
*group
= data
->vgroups
[use
->group_id
];
7565 class cost_pair
*cp
= get_group_iv_cost (data
, group
, cand
);
7570 tree var
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
7571 tree var_type
= TREE_TYPE (var
);
7574 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7576 fprintf (dump_file
, "Replacing exit test: ");
7577 print_gimple_stmt (dump_file
, use
->stmt
, 0, TDF_SLIM
);
7580 bound
= unshare_expr (fold_convert (var_type
, bound
));
7581 op
= force_gimple_operand (bound
, &stmts
, true, NULL_TREE
);
7583 gsi_insert_seq_on_edge_immediate (
7584 loop_preheader_edge (data
->current_loop
),
7587 gcond
*cond_stmt
= as_a
<gcond
*> (use
->stmt
);
7588 gimple_cond_set_lhs (cond_stmt
, var
);
7589 gimple_cond_set_code (cond_stmt
, compare
);
7590 gimple_cond_set_rhs (cond_stmt
, op
);
7594 /* The induction variable elimination failed; just express the original
7596 comp
= get_computation_at (data
->current_loop
, use
->stmt
, use
, cand
);
7597 gcc_assert (comp
!= NULL_TREE
);
7598 gcc_assert (use
->op_p
!= NULL
);
7599 *use
->op_p
= force_gimple_operand_gsi (&bsi
, comp
, true,
7600 SSA_NAME_VAR (*use
->op_p
),
7601 true, GSI_SAME_STMT
);
7604 /* Rewrite the groups using the selected induction variables. */
7607 rewrite_groups (struct ivopts_data
*data
)
7611 for (i
= 0; i
< data
->vgroups
.length (); i
++)
7613 struct iv_group
*group
= data
->vgroups
[i
];
7614 struct iv_cand
*cand
= group
->selected
;
7618 if (group
->type
== USE_NONLINEAR_EXPR
)
7620 for (j
= 0; j
< group
->vuses
.length (); j
++)
7622 rewrite_use_nonlinear_expr (data
, group
->vuses
[j
], cand
);
7623 update_stmt (group
->vuses
[j
]->stmt
);
7626 else if (address_p (group
->type
))
7628 for (j
= 0; j
< group
->vuses
.length (); j
++)
7630 rewrite_use_address (data
, group
->vuses
[j
], cand
);
7631 update_stmt (group
->vuses
[j
]->stmt
);
7636 gcc_assert (group
->type
== USE_COMPARE
);
7638 for (j
= 0; j
< group
->vuses
.length (); j
++)
7640 rewrite_use_compare (data
, group
->vuses
[j
], cand
);
7641 update_stmt (group
->vuses
[j
]->stmt
);
7647 /* Removes the ivs that are not used after rewriting. */
7650 remove_unused_ivs (struct ivopts_data
*data
, bitmap toremove
)
7655 /* Figure out an order in which to release SSA DEFs so that we don't
7656 release something that we'd have to propagate into a debug stmt
7658 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
7660 struct version_info
*info
;
7662 info
= ver_info (data
, j
);
7664 && !integer_zerop (info
->iv
->step
)
7666 && !info
->iv
->nonlin_use
7667 && !info
->preserve_biv
)
7669 bitmap_set_bit (toremove
, SSA_NAME_VERSION (info
->iv
->ssa_name
));
7671 tree def
= info
->iv
->ssa_name
;
7673 if (MAY_HAVE_DEBUG_BIND_STMTS
&& SSA_NAME_DEF_STMT (def
))
7675 imm_use_iterator imm_iter
;
7676 use_operand_p use_p
;
7680 FOR_EACH_IMM_USE_STMT (stmt
, imm_iter
, def
)
7682 if (!gimple_debug_bind_p (stmt
))
7685 /* We just want to determine whether to do nothing
7686 (count == 0), to substitute the computed
7687 expression into a single use of the SSA DEF by
7688 itself (count == 1), or to use a debug temp
7689 because the SSA DEF is used multiple times or as
7690 part of a larger expression (count > 1). */
7692 if (gimple_debug_bind_get_value (stmt
) != def
)
7702 struct iv_use dummy_use
;
7703 struct iv_cand
*best_cand
= NULL
, *cand
;
7704 unsigned i
, best_pref
= 0, cand_pref
;
7705 tree comp
= NULL_TREE
;
7707 memset (&dummy_use
, 0, sizeof (dummy_use
));
7708 dummy_use
.iv
= info
->iv
;
7709 for (i
= 0; i
< data
->vgroups
.length () && i
< 64; i
++)
7711 cand
= data
->vgroups
[i
]->selected
;
7712 if (cand
== best_cand
)
7714 cand_pref
= operand_equal_p (cand
->iv
->step
,
7718 += TYPE_MODE (TREE_TYPE (cand
->iv
->base
))
7719 == TYPE_MODE (TREE_TYPE (info
->iv
->base
))
7722 += TREE_CODE (cand
->iv
->base
) == INTEGER_CST
7724 if (best_cand
== NULL
|| best_pref
< cand_pref
)
7727 = get_debug_computation_at (data
->current_loop
,
7728 SSA_NAME_DEF_STMT (def
),
7733 best_pref
= cand_pref
;
7742 comp
= unshare_expr (comp
);
7745 tree vexpr
= make_node (DEBUG_EXPR_DECL
);
7746 DECL_ARTIFICIAL (vexpr
) = 1;
7747 TREE_TYPE (vexpr
) = TREE_TYPE (comp
);
7748 if (SSA_NAME_VAR (def
))
7749 SET_DECL_MODE (vexpr
, DECL_MODE (SSA_NAME_VAR (def
)));
7751 SET_DECL_MODE (vexpr
, TYPE_MODE (TREE_TYPE (vexpr
)));
7753 = gimple_build_debug_bind (vexpr
, comp
, NULL
);
7754 gimple_stmt_iterator gsi
;
7756 if (gimple_code (SSA_NAME_DEF_STMT (def
)) == GIMPLE_PHI
)
7757 gsi
= gsi_after_labels (gimple_bb
7758 (SSA_NAME_DEF_STMT (def
)));
7760 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (def
));
7762 gsi_insert_before (&gsi
, def_temp
, GSI_SAME_STMT
);
7766 FOR_EACH_IMM_USE_STMT (stmt
, imm_iter
, def
)
7768 if (!gimple_debug_bind_p (stmt
))
7771 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
7772 SET_USE (use_p
, comp
);
7781 /* Frees memory occupied by class tree_niter_desc in *VALUE. Callback
7782 for hash_map::traverse. */
7785 free_tree_niter_desc (edge
const &, tree_niter_desc
*const &value
, void *)
7791 /* Frees data allocated by the optimization of a single loop. */
7794 free_loop_data (struct ivopts_data
*data
)
7802 data
->niters
->traverse
<void *, free_tree_niter_desc
> (NULL
);
7803 delete data
->niters
;
7804 data
->niters
= NULL
;
7807 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
7809 struct version_info
*info
;
7811 info
= ver_info (data
, i
);
7813 info
->has_nonlin_use
= false;
7814 info
->preserve_biv
= false;
7817 bitmap_clear (data
->relevant
);
7818 bitmap_clear (data
->important_candidates
);
7820 for (i
= 0; i
< data
->vgroups
.length (); i
++)
7822 struct iv_group
*group
= data
->vgroups
[i
];
7824 for (j
= 0; j
< group
->vuses
.length (); j
++)
7825 free (group
->vuses
[j
]);
7826 group
->vuses
.release ();
7828 BITMAP_FREE (group
->related_cands
);
7829 for (j
= 0; j
< group
->n_map_members
; j
++)
7831 if (group
->cost_map
[j
].inv_vars
)
7832 BITMAP_FREE (group
->cost_map
[j
].inv_vars
);
7833 if (group
->cost_map
[j
].inv_exprs
)
7834 BITMAP_FREE (group
->cost_map
[j
].inv_exprs
);
7837 free (group
->cost_map
);
7840 data
->vgroups
.truncate (0);
7842 for (i
= 0; i
< data
->vcands
.length (); i
++)
7844 struct iv_cand
*cand
= data
->vcands
[i
];
7847 BITMAP_FREE (cand
->inv_vars
);
7848 if (cand
->inv_exprs
)
7849 BITMAP_FREE (cand
->inv_exprs
);
7852 data
->vcands
.truncate (0);
7854 if (data
->version_info_size
< num_ssa_names
)
7856 data
->version_info_size
= 2 * num_ssa_names
;
7857 free (data
->version_info
);
7858 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
7861 data
->max_inv_var_id
= 0;
7862 data
->max_inv_expr_id
= 0;
7864 FOR_EACH_VEC_ELT (decl_rtl_to_reset
, i
, obj
)
7865 SET_DECL_RTL (obj
, NULL_RTX
);
7867 decl_rtl_to_reset
.truncate (0);
7869 data
->inv_expr_tab
->empty ();
7871 data
->iv_common_cand_tab
->empty ();
7872 data
->iv_common_cands
.truncate (0);
7875 /* Finalizes data structures used by the iv optimization pass. LOOPS is the
7879 tree_ssa_iv_optimize_finalize (struct ivopts_data
*data
)
7881 free_loop_data (data
);
7882 free (data
->version_info
);
7883 BITMAP_FREE (data
->relevant
);
7884 BITMAP_FREE (data
->important_candidates
);
7886 decl_rtl_to_reset
.release ();
7887 data
->vgroups
.release ();
7888 data
->vcands
.release ();
7889 delete data
->inv_expr_tab
;
7890 data
->inv_expr_tab
= NULL
;
7891 free_affine_expand_cache (&data
->name_expansion_cache
);
7892 if (data
->base_object_map
)
7893 delete data
->base_object_map
;
7894 delete data
->iv_common_cand_tab
;
7895 data
->iv_common_cand_tab
= NULL
;
7896 data
->iv_common_cands
.release ();
7897 obstack_free (&data
->iv_obstack
, NULL
);
7900 /* Returns true if the loop body BODY includes any function calls. */
7903 loop_body_includes_call (basic_block
*body
, unsigned num_nodes
)
7905 gimple_stmt_iterator gsi
;
7908 for (i
= 0; i
< num_nodes
; i
++)
7909 for (gsi
= gsi_start_bb (body
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
7911 gimple
*stmt
= gsi_stmt (gsi
);
7912 if (is_gimple_call (stmt
)
7913 && !gimple_call_internal_p (stmt
)
7914 && !is_inexpensive_builtin (gimple_call_fndecl (stmt
)))
7920 /* Determine cost scaling factor for basic blocks in loop. */
7921 #define COST_SCALING_FACTOR_BOUND (20)
7924 determine_scaling_factor (struct ivopts_data
*data
, basic_block
*body
)
7926 int lfreq
= data
->current_loop
->header
->count
.to_frequency (cfun
);
7927 if (!data
->speed
|| lfreq
<= 0)
7930 int max_freq
= lfreq
;
7931 for (unsigned i
= 0; i
< data
->current_loop
->num_nodes
; i
++)
7933 body
[i
]->aux
= (void *)(intptr_t) 1;
7934 if (max_freq
< body
[i
]->count
.to_frequency (cfun
))
7935 max_freq
= body
[i
]->count
.to_frequency (cfun
);
7937 if (max_freq
> lfreq
)
7939 int divisor
, factor
;
7940 /* Check if scaling factor itself needs to be scaled by the bound. This
7941 is to avoid overflow when scaling cost according to profile info. */
7942 if (max_freq
/ lfreq
> COST_SCALING_FACTOR_BOUND
)
7945 factor
= COST_SCALING_FACTOR_BOUND
;
7952 for (unsigned i
= 0; i
< data
->current_loop
->num_nodes
; i
++)
7954 int bfreq
= body
[i
]->count
.to_frequency (cfun
);
7958 body
[i
]->aux
= (void*)(intptr_t) (factor
* bfreq
/ divisor
);
7963 /* Find doloop comparison use and set its doloop_p on if found. */
7966 find_doloop_use (struct ivopts_data
*data
)
7968 struct loop
*loop
= data
->current_loop
;
7970 for (unsigned i
= 0; i
< data
->vgroups
.length (); i
++)
7972 struct iv_group
*group
= data
->vgroups
[i
];
7973 if (group
->type
== USE_COMPARE
)
7975 gcc_assert (group
->vuses
.length () == 1);
7976 struct iv_use
*use
= group
->vuses
[0];
7977 gimple
*stmt
= use
->stmt
;
7978 if (gimple_code (stmt
) == GIMPLE_COND
)
7980 basic_block bb
= gimple_bb (stmt
);
7981 edge true_edge
, false_edge
;
7982 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
7983 /* This comparison is used for loop latch. Require latch is empty
7985 if ((loop
->latch
== true_edge
->dest
7986 || loop
->latch
== false_edge
->dest
)
7987 && empty_block_p (loop
->latch
))
7989 group
->doloop_p
= true;
7990 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7992 fprintf (dump_file
, "Doloop cmp iv use: ");
7993 print_gimple_stmt (dump_file
, stmt
, TDF_DETAILS
);
8004 /* For the targets which support doloop, to predict whether later RTL doloop
8005 transformation will perform on this loop, further detect the doloop use and
8006 mark the flag doloop_use_p if predicted. */
8009 analyze_and_mark_doloop_use (struct ivopts_data
*data
)
8011 data
->doloop_use_p
= false;
8013 if (!flag_branch_on_count_reg
)
8016 if (data
->current_loop
->unroll
== USHRT_MAX
)
8019 if (!generic_predict_doloop_p (data
))
8022 if (find_doloop_use (data
))
8024 data
->doloop_use_p
= true;
8025 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8027 struct loop
*loop
= data
->current_loop
;
8029 "Predict loop %d can perform"
8030 " doloop optimization later.\n",
8032 flow_loop_dump (loop
, dump_file
, NULL
, 1);
8037 /* Optimizes the LOOP. Returns true if anything changed. */
8040 tree_ssa_iv_optimize_loop (struct ivopts_data
*data
, class loop
*loop
,
8043 bool changed
= false;
8045 edge exit
= single_dom_exit (loop
);
8048 gcc_assert (!data
->niters
);
8049 data
->current_loop
= loop
;
8050 data
->loop_loc
= find_loop_location (loop
).get_location_t ();
8051 data
->speed
= optimize_loop_for_speed_p (loop
);
8053 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8055 fprintf (dump_file
, "Processing loop %d", loop
->num
);
8056 if (data
->loop_loc
!= UNKNOWN_LOCATION
)
8057 fprintf (dump_file
, " at %s:%d", LOCATION_FILE (data
->loop_loc
),
8058 LOCATION_LINE (data
->loop_loc
));
8059 fprintf (dump_file
, "\n");
8063 fprintf (dump_file
, " single exit %d -> %d, exit condition ",
8064 exit
->src
->index
, exit
->dest
->index
);
8065 print_gimple_stmt (dump_file
, last_stmt (exit
->src
), 0, TDF_SLIM
);
8066 fprintf (dump_file
, "\n");
8069 fprintf (dump_file
, "\n");
8072 body
= get_loop_body (loop
);
8073 data
->body_includes_call
= loop_body_includes_call (body
, loop
->num_nodes
);
8074 renumber_gimple_stmt_uids_in_blocks (body
, loop
->num_nodes
);
8076 data
->loop_single_exit_p
8077 = exit
!= NULL
&& loop_only_exit_p (loop
, body
, exit
);
8079 /* For each ssa name determines whether it behaves as an induction variable
8081 if (!find_induction_variables (data
))
8084 /* Finds interesting uses (item 1). */
8085 find_interesting_uses (data
);
8086 if (data
->vgroups
.length () > MAX_CONSIDERED_GROUPS
)
8089 /* Determine cost scaling factor for basic blocks in loop. */
8090 determine_scaling_factor (data
, body
);
8092 /* Analyze doloop possibility and mark the doloop use if predicted. */
8093 analyze_and_mark_doloop_use (data
);
8095 /* Finds candidates for the induction variables (item 2). */
8096 find_iv_candidates (data
);
8098 /* Calculates the costs (item 3, part 1). */
8099 determine_iv_costs (data
);
8100 determine_group_iv_costs (data
);
8101 determine_set_costs (data
);
8103 /* Find the optimal set of induction variables (item 3, part 2). */
8104 iv_ca
= find_optimal_iv_set (data
);
8105 /* Cleanup basic block aux field. */
8106 for (unsigned i
= 0; i
< data
->current_loop
->num_nodes
; i
++)
8107 body
[i
]->aux
= NULL
;
8112 /* Create the new induction variables (item 4, part 1). */
8113 create_new_ivs (data
, iv_ca
);
8114 iv_ca_free (&iv_ca
);
8116 /* Rewrite the uses (item 4, part 2). */
8117 rewrite_groups (data
);
8119 /* Remove the ivs that are unused after rewriting. */
8120 remove_unused_ivs (data
, toremove
);
8124 free_loop_data (data
);
8129 /* Main entry point. Optimizes induction variables in loops. */
8132 tree_ssa_iv_optimize (void)
8134 struct ivopts_data data
;
8135 auto_bitmap toremove
;
8137 tree_ssa_iv_optimize_init (&data
);
8139 /* Optimize the loops starting with the innermost ones. */
8140 for (auto loop
: loops_list (cfun
, LI_FROM_INNERMOST
))
8142 if (!dbg_cnt (ivopts_loop
))
8145 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8146 flow_loop_dump (loop
, dump_file
, NULL
, 1);
8148 tree_ssa_iv_optimize_loop (&data
, loop
, toremove
);
8151 /* Remove eliminated IV defs. */
8152 release_defs_bitset (toremove
);
8154 /* We have changed the structure of induction variables; it might happen
8155 that definitions in the scev database refer to some of them that were
8158 /* Likewise niter and control-IV information. */
8159 free_numbers_of_iterations_estimates (cfun
);
8161 tree_ssa_iv_optimize_finalize (&data
);
8164 #include "gt-tree-ssa-loop-ivopts.h"