1 /* Induction variable optimizations.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass tries to find the optimal set of induction variables for the loop.
21 It optimizes just the basic linear induction variables (although adding
22 support for other types should not be too hard). It includes the
23 optimizations commonly known as strength reduction, induction variable
24 coalescing and induction variable elimination. It does it in the
27 1) The interesting uses of induction variables are found. This includes
29 -- uses of induction variables in non-linear expressions
30 -- addresses of arrays
31 -- comparisons of induction variables
33 Note the interesting uses are categorized and handled in group.
34 Generally, address type uses are grouped together if their iv bases
35 are different in constant offset.
37 2) Candidates for the induction variables are found. This includes
39 -- old induction variables
40 -- the variables defined by expressions derived from the "interesting
43 3) The optimal (w.r. to a cost function) set of variables is chosen. The
44 cost function assigns a cost to sets of induction variables and consists
47 -- The group/use costs. Each of the interesting groups/uses chooses
48 the best induction variable in the set and adds its cost to the sum.
49 The cost reflects the time spent on modifying the induction variables
50 value to be usable for the given purpose (adding base and offset for
52 -- The variable costs. Each of the variables has a cost assigned that
53 reflects the costs associated with incrementing the value of the
54 variable. The original variables are somewhat preferred.
55 -- The set cost. Depending on the size of the set, extra cost may be
56 added to reflect register pressure.
58 All the costs are defined in a machine-specific way, using the target
59 hooks and machine descriptions to determine them.
61 4) The trees are transformed to use the new variables, the dead code is
64 All of this is done loop by loop. Doing it globally is theoretically
65 possible, it might give a better performance and it might enable us
66 to decide costs more precisely, but getting all the interactions right
67 would be complicated. */
71 #include "coretypes.h"
77 #include "tree-pass.h"
82 #include "insn-config.h"
86 #include "gimple-pretty-print.h"
88 #include "fold-const.h"
89 #include "stor-layout.h"
92 #include "gimple-iterator.h"
93 #include "gimplify-me.h"
95 #include "tree-ssa-loop-ivopts.h"
96 #include "tree-ssa-loop-manip.h"
97 #include "tree-ssa-loop-niter.h"
98 #include "tree-ssa-loop.h"
101 #include "tree-dfa.h"
102 #include "tree-ssa.h"
104 #include "tree-scalar-evolution.h"
106 #include "tree-affine.h"
107 #include "tree-ssa-propagate.h"
108 #include "tree-ssa-address.h"
109 #include "builtins.h"
110 #include "tree-vectorizer.h"
112 /* FIXME: Expressions are expanded to RTL in this pass to determine the
113 cost of different addressing modes. This should be moved to a TBD
114 interface between the GIMPLE and RTL worlds. */
116 /* The infinite cost. */
117 #define INFTY 10000000
119 /* Returns the expected number of loop iterations for LOOP.
120 The average trip count is computed from profile data if it
123 static inline HOST_WIDE_INT
124 avg_loop_niter (struct loop
*loop
)
126 HOST_WIDE_INT niter
= estimated_stmt_executions_int (loop
);
129 niter
= likely_max_stmt_executions_int (loop
);
131 if (niter
== -1 || niter
> PARAM_VALUE (PARAM_AVG_LOOP_NITER
))
132 return PARAM_VALUE (PARAM_AVG_LOOP_NITER
);
140 /* Representation of the induction variable. */
143 tree base
; /* Initial value of the iv. */
144 tree base_object
; /* A memory object to that the induction variable points. */
145 tree step
; /* Step of the iv (constant only). */
146 tree ssa_name
; /* The ssa name with the value. */
147 struct iv_use
*nonlin_use
; /* The identifier in the use if it is the case. */
148 bool biv_p
; /* Is it a biv? */
149 bool no_overflow
; /* True if the iv doesn't overflow. */
150 bool have_address_use
;/* For biv, indicate if it's used in any address
154 /* Per-ssa version information (induction variable descriptions, etc.). */
157 tree name
; /* The ssa name. */
158 struct iv
*iv
; /* Induction variable description. */
159 bool has_nonlin_use
; /* For a loop-level invariant, whether it is used in
160 an expression that is not an induction variable. */
161 bool preserve_biv
; /* For the original biv, whether to preserve it. */
162 unsigned inv_id
; /* Id of an invariant. */
168 USE_NONLINEAR_EXPR
, /* Use in a nonlinear expression. */
169 USE_ADDRESS
, /* Use in an address. */
170 USE_COMPARE
/* Use is a compare. */
173 /* Cost of a computation. */
176 comp_cost (): cost (0), complexity (0), scratch (0)
179 comp_cost (int cost
, unsigned complexity
, int scratch
= 0)
180 : cost (cost
), complexity (complexity
), scratch (scratch
)
183 /* Returns true if COST is infinite. */
184 bool infinite_cost_p ();
186 /* Adds costs COST1 and COST2. */
187 friend comp_cost
operator+ (comp_cost cost1
, comp_cost cost2
);
189 /* Adds COST to the comp_cost. */
190 comp_cost
operator+= (comp_cost cost
);
192 /* Adds constant C to this comp_cost. */
193 comp_cost
operator+= (HOST_WIDE_INT c
);
195 /* Subtracts constant C to this comp_cost. */
196 comp_cost
operator-= (HOST_WIDE_INT c
);
198 /* Divide the comp_cost by constant C. */
199 comp_cost
operator/= (HOST_WIDE_INT c
);
201 /* Multiply the comp_cost by constant C. */
202 comp_cost
operator*= (HOST_WIDE_INT c
);
204 /* Subtracts costs COST1 and COST2. */
205 friend comp_cost
operator- (comp_cost cost1
, comp_cost cost2
);
207 /* Subtracts COST from this comp_cost. */
208 comp_cost
operator-= (comp_cost cost
);
210 /* Returns true if COST1 is smaller than COST2. */
211 friend bool operator< (comp_cost cost1
, comp_cost cost2
);
213 /* Returns true if COST1 and COST2 are equal. */
214 friend bool operator== (comp_cost cost1
, comp_cost cost2
);
216 /* Returns true if COST1 is smaller or equal than COST2. */
217 friend bool operator<= (comp_cost cost1
, comp_cost cost2
);
219 int cost
; /* The runtime cost. */
220 unsigned complexity
; /* The estimate of the complexity of the code for
221 the computation (in no concrete units --
222 complexity field should be larger for more
223 complex expressions and addressing modes). */
224 int scratch
; /* Scratch used during cost computation. */
227 static const comp_cost no_cost
;
228 static const comp_cost
infinite_cost (INFTY
, INFTY
, INFTY
);
231 comp_cost::infinite_cost_p ()
233 return cost
== INFTY
;
237 operator+ (comp_cost cost1
, comp_cost cost2
)
239 if (cost1
.infinite_cost_p () || cost2
.infinite_cost_p ())
240 return infinite_cost
;
242 cost1
.cost
+= cost2
.cost
;
243 cost1
.complexity
+= cost2
.complexity
;
249 operator- (comp_cost cost1
, comp_cost cost2
)
251 if (cost1
.infinite_cost_p ())
252 return infinite_cost
;
254 gcc_assert (!cost2
.infinite_cost_p ());
256 cost1
.cost
-= cost2
.cost
;
257 cost1
.complexity
-= cost2
.complexity
;
263 comp_cost::operator+= (comp_cost cost
)
265 *this = *this + cost
;
270 comp_cost::operator+= (HOST_WIDE_INT c
)
272 if (infinite_cost_p ())
281 comp_cost::operator-= (HOST_WIDE_INT c
)
283 if (infinite_cost_p ())
292 comp_cost::operator/= (HOST_WIDE_INT c
)
294 if (infinite_cost_p ())
303 comp_cost::operator*= (HOST_WIDE_INT c
)
305 if (infinite_cost_p ())
314 comp_cost::operator-= (comp_cost cost
)
316 *this = *this - cost
;
321 operator< (comp_cost cost1
, comp_cost cost2
)
323 if (cost1
.cost
== cost2
.cost
)
324 return cost1
.complexity
< cost2
.complexity
;
326 return cost1
.cost
< cost2
.cost
;
330 operator== (comp_cost cost1
, comp_cost cost2
)
332 return cost1
.cost
== cost2
.cost
333 && cost1
.complexity
== cost2
.complexity
;
337 operator<= (comp_cost cost1
, comp_cost cost2
)
339 return cost1
< cost2
|| cost1
== cost2
;
342 struct iv_inv_expr_ent
;
344 /* The candidate - cost pair. */
347 struct iv_cand
*cand
; /* The candidate. */
348 comp_cost cost
; /* The cost. */
349 enum tree_code comp
; /* For iv elimination, the comparison. */
350 bitmap inv_vars
; /* The list of invariant ssa_vars that have to be
351 preserved when representing iv_use with iv_cand. */
352 bitmap inv_exprs
; /* The list of newly created invariant expressions
353 when representing iv_use with iv_cand. */
354 tree value
; /* For final value elimination, the expression for
355 the final value of the iv. For iv elimination,
356 the new bound to compare with. */
362 unsigned id
; /* The id of the use. */
363 unsigned group_id
; /* The group id the use belongs to. */
364 enum use_type type
; /* Type of the use. */
365 struct iv
*iv
; /* The induction variable it is based on. */
366 gimple
*stmt
; /* Statement in that it occurs. */
367 tree
*op_p
; /* The place where it occurs. */
369 tree addr_base
; /* Base address with const offset stripped. */
370 unsigned HOST_WIDE_INT addr_offset
;
371 /* Const offset stripped from base address. */
377 /* The id of the group. */
379 /* Uses of the group are of the same type. */
381 /* The set of "related" IV candidates, plus the important ones. */
382 bitmap related_cands
;
383 /* Number of IV candidates in the cost_map. */
384 unsigned n_map_members
;
385 /* The costs wrto the iv candidates. */
386 struct cost_pair
*cost_map
;
387 /* The selected candidate for the group. */
388 struct iv_cand
*selected
;
389 /* Uses in the group. */
390 vec
<struct iv_use
*> vuses
;
393 /* The position where the iv is computed. */
396 IP_NORMAL
, /* At the end, just before the exit condition. */
397 IP_END
, /* At the end of the latch block. */
398 IP_BEFORE_USE
, /* Immediately before a specific use. */
399 IP_AFTER_USE
, /* Immediately after a specific use. */
400 IP_ORIGINAL
/* The original biv. */
403 /* The induction variable candidate. */
406 unsigned id
; /* The number of the candidate. */
407 bool important
; /* Whether this is an "important" candidate, i.e. such
408 that it should be considered by all uses. */
409 ENUM_BITFIELD(iv_position
) pos
: 8; /* Where it is computed. */
410 gimple
*incremented_at
;/* For original biv, the statement where it is
412 tree var_before
; /* The variable used for it before increment. */
413 tree var_after
; /* The variable used for it after increment. */
414 struct iv
*iv
; /* The value of the candidate. NULL for
415 "pseudocandidate" used to indicate the possibility
416 to replace the final value of an iv by direct
417 computation of the value. */
418 unsigned cost
; /* Cost of the candidate. */
419 unsigned cost_step
; /* Cost of the candidate's increment operation. */
420 struct iv_use
*ainc_use
; /* For IP_{BEFORE,AFTER}_USE candidates, the place
421 where it is incremented. */
422 bitmap inv_vars
; /* The list of invariant ssa_vars used in step of the
424 bitmap inv_exprs
; /* If step is more complicated than a single ssa_var,
425 hanlde it as a new invariant expression which will
426 be hoisted out of loop. */
427 struct iv
*orig_iv
; /* The original iv if this cand is added from biv with
431 /* Hashtable entry for common candidate derived from iv uses. */
432 struct iv_common_cand
436 /* IV uses from which this common candidate is derived. */
437 auto_vec
<struct iv_use
*> uses
;
441 /* Hashtable helpers. */
443 struct iv_common_cand_hasher
: delete_ptr_hash
<iv_common_cand
>
445 static inline hashval_t
hash (const iv_common_cand
*);
446 static inline bool equal (const iv_common_cand
*, const iv_common_cand
*);
449 /* Hash function for possible common candidates. */
452 iv_common_cand_hasher::hash (const iv_common_cand
*ccand
)
457 /* Hash table equality function for common candidates. */
460 iv_common_cand_hasher::equal (const iv_common_cand
*ccand1
,
461 const iv_common_cand
*ccand2
)
463 return (ccand1
->hash
== ccand2
->hash
464 && operand_equal_p (ccand1
->base
, ccand2
->base
, 0)
465 && operand_equal_p (ccand1
->step
, ccand2
->step
, 0)
466 && (TYPE_PRECISION (TREE_TYPE (ccand1
->base
))
467 == TYPE_PRECISION (TREE_TYPE (ccand2
->base
))));
470 /* Loop invariant expression hashtable entry. */
472 struct iv_inv_expr_ent
474 /* Tree expression of the entry. */
476 /* Unique indentifier. */
482 /* Sort iv_inv_expr_ent pair A and B by id field. */
485 sort_iv_inv_expr_ent (const void *a
, const void *b
)
487 const iv_inv_expr_ent
* const *e1
= (const iv_inv_expr_ent
* const *) (a
);
488 const iv_inv_expr_ent
* const *e2
= (const iv_inv_expr_ent
* const *) (b
);
490 unsigned id1
= (*e1
)->id
;
491 unsigned id2
= (*e2
)->id
;
501 /* Hashtable helpers. */
503 struct iv_inv_expr_hasher
: free_ptr_hash
<iv_inv_expr_ent
>
505 static inline hashval_t
hash (const iv_inv_expr_ent
*);
506 static inline bool equal (const iv_inv_expr_ent
*, const iv_inv_expr_ent
*);
509 /* Hash function for loop invariant expressions. */
512 iv_inv_expr_hasher::hash (const iv_inv_expr_ent
*expr
)
517 /* Hash table equality function for expressions. */
520 iv_inv_expr_hasher::equal (const iv_inv_expr_ent
*expr1
,
521 const iv_inv_expr_ent
*expr2
)
523 return expr1
->hash
== expr2
->hash
524 && operand_equal_p (expr1
->expr
, expr2
->expr
, 0);
529 /* The currently optimized loop. */
530 struct loop
*current_loop
;
531 source_location loop_loc
;
533 /* Numbers of iterations for all exits of the current loop. */
534 hash_map
<edge
, tree_niter_desc
*> *niters
;
536 /* Number of registers used in it. */
539 /* The size of version_info array allocated. */
540 unsigned version_info_size
;
542 /* The array of information for the ssa names. */
543 struct version_info
*version_info
;
545 /* The hashtable of loop invariant expressions created
547 hash_table
<iv_inv_expr_hasher
> *inv_expr_tab
;
549 /* The bitmap of indices in version_info whose value was changed. */
552 /* The uses of induction variables. */
553 vec
<iv_group
*> vgroups
;
555 /* The candidates. */
556 vec
<iv_cand
*> vcands
;
558 /* A bitmap of important candidates. */
559 bitmap important_candidates
;
561 /* Cache used by tree_to_aff_combination_expand. */
562 hash_map
<tree
, name_expansion
*> *name_expansion_cache
;
564 /* The hashtable of common candidates derived from iv uses. */
565 hash_table
<iv_common_cand_hasher
> *iv_common_cand_tab
;
567 /* The common candidates. */
568 vec
<iv_common_cand
*> iv_common_cands
;
570 /* The maximum invariant variable id. */
571 unsigned max_inv_var_id
;
573 /* The maximum invariant expression id. */
574 unsigned max_inv_expr_id
;
576 /* Number of no_overflow BIVs which are not used in memory address. */
577 unsigned bivs_not_used_in_addr
;
579 /* Obstack for iv structure. */
580 struct obstack iv_obstack
;
582 /* Whether to consider just related and important candidates when replacing a
584 bool consider_all_candidates
;
586 /* Are we optimizing for speed? */
589 /* Whether the loop body includes any function calls. */
590 bool body_includes_call
;
592 /* Whether the loop body can only be exited via single exit. */
593 bool loop_single_exit_p
;
596 /* An assignment of iv candidates to uses. */
600 /* The number of uses covered by the assignment. */
603 /* Number of uses that cannot be expressed by the candidates in the set. */
606 /* Candidate assigned to a use, together with the related costs. */
607 struct cost_pair
**cand_for_group
;
609 /* Number of times each candidate is used. */
610 unsigned *n_cand_uses
;
612 /* The candidates used. */
615 /* The number of candidates in the set. */
618 /* The number of invariants needed, including both invariant variants and
619 invariant expressions. */
622 /* Total cost of expressing uses. */
623 comp_cost cand_use_cost
;
625 /* Total cost of candidates. */
628 /* Number of times each invariant variable is used. */
629 unsigned *n_inv_var_uses
;
631 /* Number of times each invariant expression is used. */
632 unsigned *n_inv_expr_uses
;
634 /* Total cost of the assignment. */
638 /* Difference of two iv candidate assignments. */
643 struct iv_group
*group
;
645 /* An old assignment (for rollback purposes). */
646 struct cost_pair
*old_cp
;
648 /* A new assignment. */
649 struct cost_pair
*new_cp
;
651 /* Next change in the list. */
652 struct iv_ca_delta
*next
;
655 /* Bound on number of candidates below that all candidates are considered. */
657 #define CONSIDER_ALL_CANDIDATES_BOUND \
658 ((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
660 /* If there are more iv occurrences, we just give up (it is quite unlikely that
661 optimizing such a loop would help, and it would take ages). */
663 #define MAX_CONSIDERED_GROUPS \
664 ((unsigned) PARAM_VALUE (PARAM_IV_MAX_CONSIDERED_USES))
666 /* If there are at most this number of ivs in the set, try removing unnecessary
667 ivs from the set always. */
669 #define ALWAYS_PRUNE_CAND_SET_BOUND \
670 ((unsigned) PARAM_VALUE (PARAM_IV_ALWAYS_PRUNE_CAND_SET_BOUND))
672 /* The list of trees for that the decl_rtl field must be reset is stored
675 static vec
<tree
> decl_rtl_to_reset
;
677 static comp_cost
force_expr_to_var_cost (tree
, bool);
679 /* The single loop exit if it dominates the latch, NULL otherwise. */
682 single_dom_exit (struct loop
*loop
)
684 edge exit
= single_exit (loop
);
689 if (!just_once_each_iteration_p (loop
, exit
->src
))
695 /* Dumps information about the induction variable IV to FILE. Don't dump
696 variable's name if DUMP_NAME is FALSE. The information is dumped with
697 preceding spaces indicated by INDENT_LEVEL. */
700 dump_iv (FILE *file
, struct iv
*iv
, bool dump_name
, unsigned indent_level
)
703 const char spaces
[9] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\0'};
705 if (indent_level
> 4)
707 p
= spaces
+ 8 - (indent_level
<< 1);
709 fprintf (file
, "%sIV struct:\n", p
);
710 if (iv
->ssa_name
&& dump_name
)
712 fprintf (file
, "%s SSA_NAME:\t", p
);
713 print_generic_expr (file
, iv
->ssa_name
, TDF_SLIM
);
714 fprintf (file
, "\n");
717 fprintf (file
, "%s Type:\t", p
);
718 print_generic_expr (file
, TREE_TYPE (iv
->base
), TDF_SLIM
);
719 fprintf (file
, "\n");
721 fprintf (file
, "%s Base:\t", p
);
722 print_generic_expr (file
, iv
->base
, TDF_SLIM
);
723 fprintf (file
, "\n");
725 fprintf (file
, "%s Step:\t", p
);
726 print_generic_expr (file
, iv
->step
, TDF_SLIM
);
727 fprintf (file
, "\n");
731 fprintf (file
, "%s Object:\t", p
);
732 print_generic_expr (file
, iv
->base_object
, TDF_SLIM
);
733 fprintf (file
, "\n");
736 fprintf (file
, "%s Biv:\t%c\n", p
, iv
->biv_p
? 'Y' : 'N');
738 fprintf (file
, "%s Overflowness wrto loop niter:\t%s\n",
739 p
, iv
->no_overflow
? "No-overflow" : "Overflow");
742 /* Dumps information about the USE to FILE. */
745 dump_use (FILE *file
, struct iv_use
*use
)
747 fprintf (file
, " Use %d.%d:\n", use
->group_id
, use
->id
);
748 fprintf (file
, " At stmt:\t");
749 print_gimple_stmt (file
, use
->stmt
, 0);
750 fprintf (file
, " At pos:\t");
752 print_generic_expr (file
, *use
->op_p
, TDF_SLIM
);
753 fprintf (file
, "\n");
754 dump_iv (file
, use
->iv
, false, 2);
757 /* Dumps information about the uses to FILE. */
760 dump_groups (FILE *file
, struct ivopts_data
*data
)
763 struct iv_group
*group
;
765 for (i
= 0; i
< data
->vgroups
.length (); i
++)
767 group
= data
->vgroups
[i
];
768 fprintf (file
, "Group %d:\n", group
->id
);
769 if (group
->type
== USE_NONLINEAR_EXPR
)
770 fprintf (file
, " Type:\tGENERIC\n");
771 else if (group
->type
== USE_ADDRESS
)
772 fprintf (file
, " Type:\tADDRESS\n");
775 gcc_assert (group
->type
== USE_COMPARE
);
776 fprintf (file
, " Type:\tCOMPARE\n");
778 for (j
= 0; j
< group
->vuses
.length (); j
++)
779 dump_use (file
, group
->vuses
[j
]);
783 /* Dumps information about induction variable candidate CAND to FILE. */
786 dump_cand (FILE *file
, struct iv_cand
*cand
)
788 struct iv
*iv
= cand
->iv
;
790 fprintf (file
, "Candidate %d:\n", cand
->id
);
793 fprintf (file
, " Depend on inv.vars: ");
794 dump_bitmap (file
, cand
->inv_vars
);
798 fprintf (file
, " Depend on inv.exprs: ");
799 dump_bitmap (file
, cand
->inv_exprs
);
802 if (cand
->var_before
)
804 fprintf (file
, " Var befor: ");
805 print_generic_expr (file
, cand
->var_before
, TDF_SLIM
);
806 fprintf (file
, "\n");
810 fprintf (file
, " Var after: ");
811 print_generic_expr (file
, cand
->var_after
, TDF_SLIM
);
812 fprintf (file
, "\n");
818 fprintf (file
, " Incr POS: before exit test\n");
822 fprintf (file
, " Incr POS: before use %d\n", cand
->ainc_use
->id
);
826 fprintf (file
, " Incr POS: after use %d\n", cand
->ainc_use
->id
);
830 fprintf (file
, " Incr POS: at end\n");
834 fprintf (file
, " Incr POS: orig biv\n");
838 dump_iv (file
, iv
, false, 1);
841 /* Returns the info for ssa version VER. */
843 static inline struct version_info
*
844 ver_info (struct ivopts_data
*data
, unsigned ver
)
846 return data
->version_info
+ ver
;
849 /* Returns the info for ssa name NAME. */
851 static inline struct version_info
*
852 name_info (struct ivopts_data
*data
, tree name
)
854 return ver_info (data
, SSA_NAME_VERSION (name
));
857 /* Returns true if STMT is after the place where the IP_NORMAL ivs will be
861 stmt_after_ip_normal_pos (struct loop
*loop
, gimple
*stmt
)
863 basic_block bb
= ip_normal_pos (loop
), sbb
= gimple_bb (stmt
);
867 if (sbb
== loop
->latch
)
873 return stmt
== last_stmt (bb
);
876 /* Returns true if STMT if after the place where the original induction
877 variable CAND is incremented. If TRUE_IF_EQUAL is set, we return true
878 if the positions are identical. */
881 stmt_after_inc_pos (struct iv_cand
*cand
, gimple
*stmt
, bool true_if_equal
)
883 basic_block cand_bb
= gimple_bb (cand
->incremented_at
);
884 basic_block stmt_bb
= gimple_bb (stmt
);
886 if (!dominated_by_p (CDI_DOMINATORS
, stmt_bb
, cand_bb
))
889 if (stmt_bb
!= cand_bb
)
893 && gimple_uid (stmt
) == gimple_uid (cand
->incremented_at
))
895 return gimple_uid (stmt
) > gimple_uid (cand
->incremented_at
);
898 /* Returns true if STMT if after the place where the induction variable
899 CAND is incremented in LOOP. */
902 stmt_after_increment (struct loop
*loop
, struct iv_cand
*cand
, gimple
*stmt
)
910 return stmt_after_ip_normal_pos (loop
, stmt
);
914 return stmt_after_inc_pos (cand
, stmt
, false);
917 return stmt_after_inc_pos (cand
, stmt
, true);
924 /* Returns true if EXP is a ssa name that occurs in an abnormal phi node. */
927 abnormal_ssa_name_p (tree exp
)
932 if (TREE_CODE (exp
) != SSA_NAME
)
935 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp
) != 0;
938 /* Returns false if BASE or INDEX contains a ssa name that occurs in an
939 abnormal phi node. Callback for for_each_index. */
942 idx_contains_abnormal_ssa_name_p (tree base
, tree
*index
,
943 void *data ATTRIBUTE_UNUSED
)
945 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
947 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 2)))
949 if (abnormal_ssa_name_p (TREE_OPERAND (base
, 3)))
953 return !abnormal_ssa_name_p (*index
);
956 /* Returns true if EXPR contains a ssa name that occurs in an
957 abnormal phi node. */
960 contains_abnormal_ssa_name_p (tree expr
)
963 enum tree_code_class codeclass
;
968 code
= TREE_CODE (expr
);
969 codeclass
= TREE_CODE_CLASS (code
);
971 if (code
== SSA_NAME
)
972 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
) != 0;
974 if (code
== INTEGER_CST
975 || is_gimple_min_invariant (expr
))
978 if (code
== ADDR_EXPR
)
979 return !for_each_index (&TREE_OPERAND (expr
, 0),
980 idx_contains_abnormal_ssa_name_p
,
983 if (code
== COND_EXPR
)
984 return contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0))
985 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1))
986 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 2));
992 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 1)))
997 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr
, 0)))
1009 /* Returns the structure describing number of iterations determined from
1010 EXIT of DATA->current_loop, or NULL if something goes wrong. */
1012 static struct tree_niter_desc
*
1013 niter_for_exit (struct ivopts_data
*data
, edge exit
)
1015 struct tree_niter_desc
*desc
;
1016 tree_niter_desc
**slot
;
1020 data
->niters
= new hash_map
<edge
, tree_niter_desc
*>;
1024 slot
= data
->niters
->get (exit
);
1028 /* Try to determine number of iterations. We cannot safely work with ssa
1029 names that appear in phi nodes on abnormal edges, so that we do not
1030 create overlapping life ranges for them (PR 27283). */
1031 desc
= XNEW (struct tree_niter_desc
);
1032 if (!number_of_iterations_exit (data
->current_loop
,
1034 || contains_abnormal_ssa_name_p (desc
->niter
))
1039 data
->niters
->put (exit
, desc
);
1047 /* Returns the structure describing number of iterations determined from
1048 single dominating exit of DATA->current_loop, or NULL if something
1051 static struct tree_niter_desc
*
1052 niter_for_single_dom_exit (struct ivopts_data
*data
)
1054 edge exit
= single_dom_exit (data
->current_loop
);
1059 return niter_for_exit (data
, exit
);
1062 /* Initializes data structures used by the iv optimization pass, stored
1066 tree_ssa_iv_optimize_init (struct ivopts_data
*data
)
1068 data
->version_info_size
= 2 * num_ssa_names
;
1069 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
1070 data
->relevant
= BITMAP_ALLOC (NULL
);
1071 data
->important_candidates
= BITMAP_ALLOC (NULL
);
1072 data
->max_inv_var_id
= 0;
1073 data
->max_inv_expr_id
= 0;
1074 data
->niters
= NULL
;
1075 data
->vgroups
.create (20);
1076 data
->vcands
.create (20);
1077 data
->inv_expr_tab
= new hash_table
<iv_inv_expr_hasher
> (10);
1078 data
->name_expansion_cache
= NULL
;
1079 data
->iv_common_cand_tab
= new hash_table
<iv_common_cand_hasher
> (10);
1080 data
->iv_common_cands
.create (20);
1081 decl_rtl_to_reset
.create (20);
1082 gcc_obstack_init (&data
->iv_obstack
);
1085 /* Returns a memory object to that EXPR points. In case we are able to
1086 determine that it does not point to any such object, NULL is returned. */
1089 determine_base_object (tree expr
)
1091 enum tree_code code
= TREE_CODE (expr
);
1094 /* If this is a pointer casted to any type, we need to determine
1095 the base object for the pointer; so handle conversions before
1096 throwing away non-pointer expressions. */
1097 if (CONVERT_EXPR_P (expr
))
1098 return determine_base_object (TREE_OPERAND (expr
, 0));
1100 if (!POINTER_TYPE_P (TREE_TYPE (expr
)))
1109 obj
= TREE_OPERAND (expr
, 0);
1110 base
= get_base_address (obj
);
1115 if (TREE_CODE (base
) == MEM_REF
)
1116 return determine_base_object (TREE_OPERAND (base
, 0));
1118 return fold_convert (ptr_type_node
,
1119 build_fold_addr_expr (base
));
1121 case POINTER_PLUS_EXPR
:
1122 return determine_base_object (TREE_OPERAND (expr
, 0));
1126 /* Pointer addition is done solely using POINTER_PLUS_EXPR. */
1130 return fold_convert (ptr_type_node
, expr
);
1134 /* Return true if address expression with non-DECL_P operand appears
1138 contain_complex_addr_expr (tree expr
)
1143 switch (TREE_CODE (expr
))
1145 case POINTER_PLUS_EXPR
:
1148 res
|= contain_complex_addr_expr (TREE_OPERAND (expr
, 0));
1149 res
|= contain_complex_addr_expr (TREE_OPERAND (expr
, 1));
1153 return (!DECL_P (TREE_OPERAND (expr
, 0)));
1162 /* Allocates an induction variable with given initial value BASE and step STEP
1163 for loop LOOP. NO_OVERFLOW implies the iv doesn't overflow. */
1166 alloc_iv (struct ivopts_data
*data
, tree base
, tree step
,
1167 bool no_overflow
= false)
1170 struct iv
*iv
= (struct iv
*) obstack_alloc (&data
->iv_obstack
,
1171 sizeof (struct iv
));
1172 gcc_assert (step
!= NULL_TREE
);
1174 /* Lower address expression in base except ones with DECL_P as operand.
1176 1) More accurate cost can be computed for address expressions;
1177 2) Duplicate candidates won't be created for bases in different
1178 forms, like &a[0] and &a. */
1180 if ((TREE_CODE (expr
) == ADDR_EXPR
&& !DECL_P (TREE_OPERAND (expr
, 0)))
1181 || contain_complex_addr_expr (expr
))
1184 tree_to_aff_combination (expr
, TREE_TYPE (expr
), &comb
);
1185 base
= fold_convert (TREE_TYPE (base
), aff_combination_to_tree (&comb
));
1189 iv
->base_object
= determine_base_object (base
);
1192 iv
->nonlin_use
= NULL
;
1193 iv
->ssa_name
= NULL_TREE
;
1195 && !iv_can_overflow_p (data
->current_loop
, TREE_TYPE (base
),
1198 iv
->no_overflow
= no_overflow
;
1199 iv
->have_address_use
= false;
1204 /* Sets STEP and BASE for induction variable IV. NO_OVERFLOW implies the IV
1205 doesn't overflow. */
1208 set_iv (struct ivopts_data
*data
, tree iv
, tree base
, tree step
,
1211 struct version_info
*info
= name_info (data
, iv
);
1213 gcc_assert (!info
->iv
);
1215 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (iv
));
1216 info
->iv
= alloc_iv (data
, base
, step
, no_overflow
);
1217 info
->iv
->ssa_name
= iv
;
1220 /* Finds induction variable declaration for VAR. */
1223 get_iv (struct ivopts_data
*data
, tree var
)
1226 tree type
= TREE_TYPE (var
);
1228 if (!POINTER_TYPE_P (type
)
1229 && !INTEGRAL_TYPE_P (type
))
1232 if (!name_info (data
, var
)->iv
)
1234 bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
1237 || !flow_bb_inside_loop_p (data
->current_loop
, bb
))
1238 set_iv (data
, var
, var
, build_int_cst (type
, 0), true);
1241 return name_info (data
, var
)->iv
;
1244 /* Return the first non-invariant ssa var found in EXPR. */
1247 extract_single_var_from_expr (tree expr
)
1251 enum tree_code code
;
1253 if (!expr
|| is_gimple_min_invariant (expr
))
1256 code
= TREE_CODE (expr
);
1257 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
1259 n
= TREE_OPERAND_LENGTH (expr
);
1260 for (i
= 0; i
< n
; i
++)
1262 tmp
= extract_single_var_from_expr (TREE_OPERAND (expr
, i
));
1268 return (TREE_CODE (expr
) == SSA_NAME
) ? expr
: NULL
;
1271 /* Finds basic ivs. */
1274 find_bivs (struct ivopts_data
*data
)
1278 tree step
, type
, base
, stop
;
1280 struct loop
*loop
= data
->current_loop
;
1283 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1287 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi
)))
1290 if (virtual_operand_p (PHI_RESULT (phi
)))
1293 if (!simple_iv (loop
, loop
, PHI_RESULT (phi
), &iv
, true))
1296 if (integer_zerop (iv
.step
))
1300 base
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
1301 /* Stop expanding iv base at the first ssa var referred by iv step.
1302 Ideally we should stop at any ssa var, because that's expensive
1303 and unusual to happen, we just do it on the first one.
1305 See PR64705 for the rationale. */
1306 stop
= extract_single_var_from_expr (step
);
1307 base
= expand_simple_operations (base
, stop
);
1308 if (contains_abnormal_ssa_name_p (base
)
1309 || contains_abnormal_ssa_name_p (step
))
1312 type
= TREE_TYPE (PHI_RESULT (phi
));
1313 base
= fold_convert (type
, base
);
1316 if (POINTER_TYPE_P (type
))
1317 step
= convert_to_ptrofftype (step
);
1319 step
= fold_convert (type
, step
);
1322 set_iv (data
, PHI_RESULT (phi
), base
, step
, iv
.no_overflow
);
1329 /* Marks basic ivs. */
1332 mark_bivs (struct ivopts_data
*data
)
1337 struct iv
*iv
, *incr_iv
;
1338 struct loop
*loop
= data
->current_loop
;
1339 basic_block incr_bb
;
1342 data
->bivs_not_used_in_addr
= 0;
1343 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
1347 iv
= get_iv (data
, PHI_RESULT (phi
));
1351 var
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
1352 def
= SSA_NAME_DEF_STMT (var
);
1353 /* Don't mark iv peeled from other one as biv. */
1355 && gimple_code (def
) == GIMPLE_PHI
1356 && gimple_bb (def
) == loop
->header
)
1359 incr_iv
= get_iv (data
, var
);
1363 /* If the increment is in the subloop, ignore it. */
1364 incr_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
1365 if (incr_bb
->loop_father
!= data
->current_loop
1366 || (incr_bb
->flags
& BB_IRREDUCIBLE_LOOP
))
1370 incr_iv
->biv_p
= true;
1371 if (iv
->no_overflow
)
1372 data
->bivs_not_used_in_addr
++;
1373 if (incr_iv
->no_overflow
)
1374 data
->bivs_not_used_in_addr
++;
1378 /* Checks whether STMT defines a linear induction variable and stores its
1379 parameters to IV. */
1382 find_givs_in_stmt_scev (struct ivopts_data
*data
, gimple
*stmt
, affine_iv
*iv
)
1385 struct loop
*loop
= data
->current_loop
;
1387 iv
->base
= NULL_TREE
;
1388 iv
->step
= NULL_TREE
;
1390 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1393 lhs
= gimple_assign_lhs (stmt
);
1394 if (TREE_CODE (lhs
) != SSA_NAME
)
1397 if (!simple_iv (loop
, loop_containing_stmt (stmt
), lhs
, iv
, true))
1400 /* Stop expanding iv base at the first ssa var referred by iv step.
1401 Ideally we should stop at any ssa var, because that's expensive
1402 and unusual to happen, we just do it on the first one.
1404 See PR64705 for the rationale. */
1405 stop
= extract_single_var_from_expr (iv
->step
);
1406 iv
->base
= expand_simple_operations (iv
->base
, stop
);
1407 if (contains_abnormal_ssa_name_p (iv
->base
)
1408 || contains_abnormal_ssa_name_p (iv
->step
))
1411 /* If STMT could throw, then do not consider STMT as defining a GIV.
1412 While this will suppress optimizations, we can not safely delete this
1413 GIV and associated statements, even if it appears it is not used. */
1414 if (stmt_could_throw_p (stmt
))
1420 /* Finds general ivs in statement STMT. */
1423 find_givs_in_stmt (struct ivopts_data
*data
, gimple
*stmt
)
1427 if (!find_givs_in_stmt_scev (data
, stmt
, &iv
))
1430 set_iv (data
, gimple_assign_lhs (stmt
), iv
.base
, iv
.step
, iv
.no_overflow
);
1433 /* Finds general ivs in basic block BB. */
1436 find_givs_in_bb (struct ivopts_data
*data
, basic_block bb
)
1438 gimple_stmt_iterator bsi
;
1440 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1441 find_givs_in_stmt (data
, gsi_stmt (bsi
));
1444 /* Finds general ivs. */
1447 find_givs (struct ivopts_data
*data
)
1449 struct loop
*loop
= data
->current_loop
;
1450 basic_block
*body
= get_loop_body_in_dom_order (loop
);
1453 for (i
= 0; i
< loop
->num_nodes
; i
++)
1454 find_givs_in_bb (data
, body
[i
]);
1458 /* For each ssa name defined in LOOP determines whether it is an induction
1459 variable and if so, its initial value and step. */
1462 find_induction_variables (struct ivopts_data
*data
)
1467 if (!find_bivs (data
))
1473 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1475 struct tree_niter_desc
*niter
= niter_for_single_dom_exit (data
);
1479 fprintf (dump_file
, " number of iterations ");
1480 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
1481 if (!integer_zerop (niter
->may_be_zero
))
1483 fprintf (dump_file
, "; zero if ");
1484 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
1486 fprintf (dump_file
, "\n");
1489 fprintf (dump_file
, "\n<Induction Vars>:\n");
1490 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1492 struct version_info
*info
= ver_info (data
, i
);
1493 if (info
->iv
&& info
->iv
->step
&& !integer_zerop (info
->iv
->step
))
1494 dump_iv (dump_file
, ver_info (data
, i
)->iv
, true, 0);
1501 /* Records a use of TYPE at *USE_P in STMT whose value is IV in GROUP.
1502 For address type use, ADDR_BASE is the stripped IV base, ADDR_OFFSET
1503 is the const offset stripped from IV base; for other types use, both
1504 are zero by default. */
1506 static struct iv_use
*
1507 record_use (struct iv_group
*group
, tree
*use_p
, struct iv
*iv
,
1508 gimple
*stmt
, enum use_type type
, tree addr_base
,
1509 unsigned HOST_WIDE_INT addr_offset
)
1511 struct iv_use
*use
= XCNEW (struct iv_use
);
1513 use
->id
= group
->vuses
.length ();
1514 use
->group_id
= group
->id
;
1519 use
->addr_base
= addr_base
;
1520 use
->addr_offset
= addr_offset
;
1522 group
->vuses
.safe_push (use
);
1526 /* Checks whether OP is a loop-level invariant and if so, records it.
1527 NONLINEAR_USE is true if the invariant is used in a way we do not
1528 handle specially. */
1531 record_invariant (struct ivopts_data
*data
, tree op
, bool nonlinear_use
)
1534 struct version_info
*info
;
1536 if (TREE_CODE (op
) != SSA_NAME
1537 || virtual_operand_p (op
))
1540 bb
= gimple_bb (SSA_NAME_DEF_STMT (op
));
1542 && flow_bb_inside_loop_p (data
->current_loop
, bb
))
1545 info
= name_info (data
, op
);
1547 info
->has_nonlin_use
|= nonlinear_use
;
1549 info
->inv_id
= ++data
->max_inv_var_id
;
1550 bitmap_set_bit (data
->relevant
, SSA_NAME_VERSION (op
));
1554 strip_offset (tree expr
, unsigned HOST_WIDE_INT
*offset
);
1556 /* Record a group of TYPE. */
1558 static struct iv_group
*
1559 record_group (struct ivopts_data
*data
, enum use_type type
)
1561 struct iv_group
*group
= XCNEW (struct iv_group
);
1563 group
->id
= data
->vgroups
.length ();
1565 group
->related_cands
= BITMAP_ALLOC (NULL
);
1566 group
->vuses
.create (1);
1568 data
->vgroups
.safe_push (group
);
1572 /* Record a use of TYPE at *USE_P in STMT whose value is IV in a group.
1573 New group will be created if there is no existing group for the use. */
1575 static struct iv_use
*
1576 record_group_use (struct ivopts_data
*data
, tree
*use_p
,
1577 struct iv
*iv
, gimple
*stmt
, enum use_type type
)
1579 tree addr_base
= NULL
;
1580 struct iv_group
*group
= NULL
;
1581 unsigned HOST_WIDE_INT addr_offset
= 0;
1583 /* Record non address type use in a new group. */
1584 if (type
== USE_ADDRESS
&& iv
->base_object
)
1588 addr_base
= strip_offset (iv
->base
, &addr_offset
);
1589 for (i
= 0; i
< data
->vgroups
.length (); i
++)
1593 group
= data
->vgroups
[i
];
1594 use
= group
->vuses
[0];
1595 if (use
->type
!= USE_ADDRESS
|| !use
->iv
->base_object
)
1598 /* Check if it has the same stripped base and step. */
1599 if (operand_equal_p (iv
->base_object
, use
->iv
->base_object
, 0)
1600 && operand_equal_p (iv
->step
, use
->iv
->step
, 0)
1601 && operand_equal_p (addr_base
, use
->addr_base
, 0))
1604 if (i
== data
->vgroups
.length ())
1609 group
= record_group (data
, type
);
1611 return record_use (group
, use_p
, iv
, stmt
, type
, addr_base
, addr_offset
);
1614 /* Checks whether the use OP is interesting and if so, records it. */
1616 static struct iv_use
*
1617 find_interesting_uses_op (struct ivopts_data
*data
, tree op
)
1623 if (TREE_CODE (op
) != SSA_NAME
)
1626 iv
= get_iv (data
, op
);
1632 gcc_assert (iv
->nonlin_use
->type
== USE_NONLINEAR_EXPR
);
1633 return iv
->nonlin_use
;
1636 if (integer_zerop (iv
->step
))
1638 record_invariant (data
, op
, true);
1642 stmt
= SSA_NAME_DEF_STMT (op
);
1643 gcc_assert (gimple_code (stmt
) == GIMPLE_PHI
|| is_gimple_assign (stmt
));
1645 use
= record_group_use (data
, NULL
, iv
, stmt
, USE_NONLINEAR_EXPR
);
1646 iv
->nonlin_use
= use
;
1650 /* Indicate how compare type iv_use can be handled. */
1651 enum comp_iv_rewrite
1654 /* We may rewrite compare type iv_use by expressing value of the iv_use. */
1656 /* We may rewrite compare type iv_uses on both sides of comparison by
1657 expressing value of each iv_use. */
1659 /* We may rewrite compare type iv_use by expressing value of the iv_use
1660 or by eliminating it with other iv_cand. */
1664 /* Given a condition in statement STMT, checks whether it is a compare
1665 of an induction variable and an invariant. If this is the case,
1666 CONTROL_VAR is set to location of the iv, BOUND to the location of
1667 the invariant, IV_VAR and IV_BOUND are set to the corresponding
1668 induction variable descriptions, and true is returned. If this is not
1669 the case, CONTROL_VAR and BOUND are set to the arguments of the
1670 condition and false is returned. */
1672 static enum comp_iv_rewrite
1673 extract_cond_operands (struct ivopts_data
*data
, gimple
*stmt
,
1674 tree
**control_var
, tree
**bound
,
1675 struct iv
**iv_var
, struct iv
**iv_bound
)
1677 /* The objects returned when COND has constant operands. */
1678 static struct iv const_iv
;
1680 tree
*op0
= &zero
, *op1
= &zero
;
1681 struct iv
*iv0
= &const_iv
, *iv1
= &const_iv
;
1682 enum comp_iv_rewrite rewrite_type
= COMP_IV_NA
;
1684 if (gimple_code (stmt
) == GIMPLE_COND
)
1686 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
1687 op0
= gimple_cond_lhs_ptr (cond_stmt
);
1688 op1
= gimple_cond_rhs_ptr (cond_stmt
);
1692 op0
= gimple_assign_rhs1_ptr (stmt
);
1693 op1
= gimple_assign_rhs2_ptr (stmt
);
1696 zero
= integer_zero_node
;
1697 const_iv
.step
= integer_zero_node
;
1699 if (TREE_CODE (*op0
) == SSA_NAME
)
1700 iv0
= get_iv (data
, *op0
);
1701 if (TREE_CODE (*op1
) == SSA_NAME
)
1702 iv1
= get_iv (data
, *op1
);
1704 /* If both sides of comparison are IVs. We can express ivs on both end. */
1705 if (iv0
&& iv1
&& !integer_zerop (iv0
->step
) && !integer_zerop (iv1
->step
))
1707 rewrite_type
= COMP_IV_EXPR_2
;
1711 /* If none side of comparison is IV. */
1712 if ((!iv0
|| integer_zerop (iv0
->step
))
1713 && (!iv1
|| integer_zerop (iv1
->step
)))
1716 /* Control variable may be on the other side. */
1717 if (!iv0
|| integer_zerop (iv0
->step
))
1719 std::swap (op0
, op1
);
1720 std::swap (iv0
, iv1
);
1722 /* If one side is IV and the other side isn't loop invariant. */
1724 rewrite_type
= COMP_IV_EXPR
;
1725 /* If one side is IV and the other side is loop invariant. */
1726 else if (!integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
))
1727 rewrite_type
= COMP_IV_ELIM
;
1739 return rewrite_type
;
1742 /* Checks whether the condition in STMT is interesting and if so,
1746 find_interesting_uses_cond (struct ivopts_data
*data
, gimple
*stmt
)
1748 tree
*var_p
, *bound_p
;
1749 struct iv
*var_iv
, *bound_iv
;
1750 enum comp_iv_rewrite ret
;
1752 ret
= extract_cond_operands (data
, stmt
,
1753 &var_p
, &bound_p
, &var_iv
, &bound_iv
);
1754 if (ret
== COMP_IV_NA
)
1756 find_interesting_uses_op (data
, *var_p
);
1757 find_interesting_uses_op (data
, *bound_p
);
1761 record_group_use (data
, var_p
, var_iv
, stmt
, USE_COMPARE
);
1762 /* Record compare type iv_use for iv on the other side of comparison. */
1763 if (ret
== COMP_IV_EXPR_2
)
1764 record_group_use (data
, bound_p
, bound_iv
, stmt
, USE_COMPARE
);
1767 /* Returns the outermost loop EXPR is obviously invariant in
1768 relative to the loop LOOP, i.e. if all its operands are defined
1769 outside of the returned loop. Returns NULL if EXPR is not
1770 even obviously invariant in LOOP. */
1773 outermost_invariant_loop_for_expr (struct loop
*loop
, tree expr
)
1778 if (is_gimple_min_invariant (expr
))
1779 return current_loops
->tree_root
;
1781 if (TREE_CODE (expr
) == SSA_NAME
)
1783 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1786 if (flow_bb_inside_loop_p (loop
, def_bb
))
1788 return superloop_at_depth (loop
,
1789 loop_depth (def_bb
->loop_father
) + 1);
1792 return current_loops
->tree_root
;
1798 unsigned maxdepth
= 0;
1799 len
= TREE_OPERAND_LENGTH (expr
);
1800 for (i
= 0; i
< len
; i
++)
1802 struct loop
*ivloop
;
1803 if (!TREE_OPERAND (expr
, i
))
1806 ivloop
= outermost_invariant_loop_for_expr (loop
, TREE_OPERAND (expr
, i
));
1809 maxdepth
= MAX (maxdepth
, loop_depth (ivloop
));
1812 return superloop_at_depth (loop
, maxdepth
);
1815 /* Returns true if expression EXPR is obviously invariant in LOOP,
1816 i.e. if all its operands are defined outside of the LOOP. LOOP
1817 should not be the function body. */
1820 expr_invariant_in_loop_p (struct loop
*loop
, tree expr
)
1825 gcc_assert (loop_depth (loop
) > 0);
1827 if (is_gimple_min_invariant (expr
))
1830 if (TREE_CODE (expr
) == SSA_NAME
)
1832 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (expr
));
1834 && flow_bb_inside_loop_p (loop
, def_bb
))
1843 len
= TREE_OPERAND_LENGTH (expr
);
1844 for (i
= 0; i
< len
; i
++)
1845 if (TREE_OPERAND (expr
, i
)
1846 && !expr_invariant_in_loop_p (loop
, TREE_OPERAND (expr
, i
)))
1852 /* Given expression EXPR which computes inductive values with respect
1853 to loop recorded in DATA, this function returns biv from which EXPR
1854 is derived by tracing definition chains of ssa variables in EXPR. */
1857 find_deriving_biv_for_expr (struct ivopts_data
*data
, tree expr
)
1862 enum tree_code code
;
1865 if (expr
== NULL_TREE
)
1868 if (is_gimple_min_invariant (expr
))
1871 code
= TREE_CODE (expr
);
1872 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
1874 n
= TREE_OPERAND_LENGTH (expr
);
1875 for (i
= 0; i
< n
; i
++)
1877 iv
= find_deriving_biv_for_expr (data
, TREE_OPERAND (expr
, i
));
1883 /* Stop if it's not ssa name. */
1884 if (code
!= SSA_NAME
)
1887 iv
= get_iv (data
, expr
);
1888 if (!iv
|| integer_zerop (iv
->step
))
1893 stmt
= SSA_NAME_DEF_STMT (expr
);
1894 if (gphi
*phi
= dyn_cast
<gphi
*> (stmt
))
1897 use_operand_p use_p
;
1898 basic_block phi_bb
= gimple_bb (phi
);
1900 /* Skip loop header PHI that doesn't define biv. */
1901 if (phi_bb
->loop_father
== data
->current_loop
)
1904 if (virtual_operand_p (gimple_phi_result (phi
)))
1907 FOR_EACH_PHI_ARG (use_p
, phi
, iter
, SSA_OP_USE
)
1909 tree use
= USE_FROM_PTR (use_p
);
1910 iv
= find_deriving_biv_for_expr (data
, use
);
1916 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1919 e1
= gimple_assign_rhs1 (stmt
);
1920 code
= gimple_assign_rhs_code (stmt
);
1921 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
)
1922 return find_deriving_biv_for_expr (data
, e1
);
1929 case POINTER_PLUS_EXPR
:
1930 /* Increments, decrements and multiplications by a constant
1932 e2
= gimple_assign_rhs2 (stmt
);
1933 iv
= find_deriving_biv_for_expr (data
, e2
);
1939 /* Casts are simple. */
1940 return find_deriving_biv_for_expr (data
, e1
);
1949 /* Record BIV, its predecessor and successor that they are used in
1950 address type uses. */
1953 record_biv_for_address_use (struct ivopts_data
*data
, struct iv
*biv
)
1956 tree type
, base_1
, base_2
;
1959 if (!biv
|| !biv
->biv_p
|| integer_zerop (biv
->step
)
1960 || biv
->have_address_use
|| !biv
->no_overflow
)
1963 type
= TREE_TYPE (biv
->base
);
1964 if (!INTEGRAL_TYPE_P (type
))
1967 biv
->have_address_use
= true;
1968 data
->bivs_not_used_in_addr
--;
1969 base_1
= fold_build2 (PLUS_EXPR
, type
, biv
->base
, biv
->step
);
1970 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
1972 struct iv
*iv
= ver_info (data
, i
)->iv
;
1974 if (!iv
|| !iv
->biv_p
|| integer_zerop (iv
->step
)
1975 || iv
->have_address_use
|| !iv
->no_overflow
)
1978 if (type
!= TREE_TYPE (iv
->base
)
1979 || !INTEGRAL_TYPE_P (TREE_TYPE (iv
->base
)))
1982 if (!operand_equal_p (biv
->step
, iv
->step
, 0))
1985 base_2
= fold_build2 (PLUS_EXPR
, type
, iv
->base
, iv
->step
);
1986 if (operand_equal_p (base_1
, iv
->base
, 0)
1987 || operand_equal_p (base_2
, biv
->base
, 0))
1989 iv
->have_address_use
= true;
1990 data
->bivs_not_used_in_addr
--;
1995 /* Cumulates the steps of indices into DATA and replaces their values with the
1996 initial ones. Returns false when the value of the index cannot be determined.
1997 Callback for for_each_index. */
1999 struct ifs_ivopts_data
2001 struct ivopts_data
*ivopts_data
;
2007 idx_find_step (tree base
, tree
*idx
, void *data
)
2009 struct ifs_ivopts_data
*dta
= (struct ifs_ivopts_data
*) data
;
2011 bool use_overflow_semantics
= false;
2012 tree step
, iv_base
, iv_step
, lbound
, off
;
2013 struct loop
*loop
= dta
->ivopts_data
->current_loop
;
2015 /* If base is a component ref, require that the offset of the reference
2017 if (TREE_CODE (base
) == COMPONENT_REF
)
2019 off
= component_ref_field_offset (base
);
2020 return expr_invariant_in_loop_p (loop
, off
);
2023 /* If base is array, first check whether we will be able to move the
2024 reference out of the loop (in order to take its address in strength
2025 reduction). In order for this to work we need both lower bound
2026 and step to be loop invariants. */
2027 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
2029 /* Moreover, for a range, the size needs to be invariant as well. */
2030 if (TREE_CODE (base
) == ARRAY_RANGE_REF
2031 && !expr_invariant_in_loop_p (loop
, TYPE_SIZE (TREE_TYPE (base
))))
2034 step
= array_ref_element_size (base
);
2035 lbound
= array_ref_low_bound (base
);
2037 if (!expr_invariant_in_loop_p (loop
, step
)
2038 || !expr_invariant_in_loop_p (loop
, lbound
))
2042 if (TREE_CODE (*idx
) != SSA_NAME
)
2045 iv
= get_iv (dta
->ivopts_data
, *idx
);
2049 /* XXX We produce for a base of *D42 with iv->base being &x[0]
2050 *&x[0], which is not folded and does not trigger the
2051 ARRAY_REF path below. */
2054 if (integer_zerop (iv
->step
))
2057 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
2059 step
= array_ref_element_size (base
);
2061 /* We only handle addresses whose step is an integer constant. */
2062 if (TREE_CODE (step
) != INTEGER_CST
)
2066 /* The step for pointer arithmetics already is 1 byte. */
2067 step
= size_one_node
;
2071 if (iv
->no_overflow
&& nowrap_type_p (TREE_TYPE (iv_step
)))
2072 use_overflow_semantics
= true;
2074 if (!convert_affine_scev (dta
->ivopts_data
->current_loop
,
2075 sizetype
, &iv_base
, &iv_step
, dta
->stmt
,
2076 use_overflow_semantics
))
2078 /* The index might wrap. */
2082 step
= fold_build2 (MULT_EXPR
, sizetype
, step
, iv_step
);
2083 dta
->step
= fold_build2 (PLUS_EXPR
, sizetype
, dta
->step
, step
);
2085 if (dta
->ivopts_data
->bivs_not_used_in_addr
)
2088 iv
= find_deriving_biv_for_expr (dta
->ivopts_data
, iv
->ssa_name
);
2090 record_biv_for_address_use (dta
->ivopts_data
, iv
);
2095 /* Records use in index IDX. Callback for for_each_index. Ivopts data
2096 object is passed to it in DATA. */
2099 idx_record_use (tree base
, tree
*idx
,
2102 struct ivopts_data
*data
= (struct ivopts_data
*) vdata
;
2103 find_interesting_uses_op (data
, *idx
);
2104 if (TREE_CODE (base
) == ARRAY_REF
|| TREE_CODE (base
) == ARRAY_RANGE_REF
)
2106 find_interesting_uses_op (data
, array_ref_element_size (base
));
2107 find_interesting_uses_op (data
, array_ref_low_bound (base
));
2112 /* If we can prove that TOP = cst * BOT for some constant cst,
2113 store cst to MUL and return true. Otherwise return false.
2114 The returned value is always sign-extended, regardless of the
2115 signedness of TOP and BOT. */
2118 constant_multiple_of (tree top
, tree bot
, widest_int
*mul
)
2121 enum tree_code code
;
2122 unsigned precision
= TYPE_PRECISION (TREE_TYPE (top
));
2123 widest_int res
, p0
, p1
;
2128 if (operand_equal_p (top
, bot
, 0))
2134 code
= TREE_CODE (top
);
2138 mby
= TREE_OPERAND (top
, 1);
2139 if (TREE_CODE (mby
) != INTEGER_CST
)
2142 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &res
))
2145 *mul
= wi::sext (res
* wi::to_widest (mby
), precision
);
2150 if (!constant_multiple_of (TREE_OPERAND (top
, 0), bot
, &p0
)
2151 || !constant_multiple_of (TREE_OPERAND (top
, 1), bot
, &p1
))
2154 if (code
== MINUS_EXPR
)
2156 *mul
= wi::sext (p0
+ p1
, precision
);
2160 if (TREE_CODE (bot
) != INTEGER_CST
)
2163 p0
= widest_int::from (top
, SIGNED
);
2164 p1
= widest_int::from (bot
, SIGNED
);
2167 *mul
= wi::sext (wi::divmod_trunc (p0
, p1
, SIGNED
, &res
), precision
);
2175 /* Return true if memory reference REF with step STEP may be unaligned. */
2178 may_be_unaligned_p (tree ref
, tree step
)
2180 /* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
2181 thus they are not misaligned. */
2182 if (TREE_CODE (ref
) == TARGET_MEM_REF
)
2185 unsigned int align
= TYPE_ALIGN (TREE_TYPE (ref
));
2186 if (GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref
))) > align
)
2187 align
= GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref
)));
2189 unsigned HOST_WIDE_INT bitpos
;
2190 unsigned int ref_align
;
2191 get_object_alignment_1 (ref
, &ref_align
, &bitpos
);
2192 if (ref_align
< align
2193 || (bitpos
% align
) != 0
2194 || (bitpos
% BITS_PER_UNIT
) != 0)
2197 unsigned int trailing_zeros
= tree_ctz (step
);
2198 if (trailing_zeros
< HOST_BITS_PER_INT
2199 && (1U << trailing_zeros
) * BITS_PER_UNIT
< align
)
2205 /* Return true if EXPR may be non-addressable. */
2208 may_be_nonaddressable_p (tree expr
)
2210 switch (TREE_CODE (expr
))
2212 case TARGET_MEM_REF
:
2213 /* TARGET_MEM_REFs are translated directly to valid MEMs on the
2214 target, thus they are always addressable. */
2218 /* Likewise for MEM_REFs, modulo the storage order. */
2219 return REF_REVERSE_STORAGE_ORDER (expr
);
2222 if (REF_REVERSE_STORAGE_ORDER (expr
))
2224 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
2227 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr
, 0))))
2229 return DECL_NONADDRESSABLE_P (TREE_OPERAND (expr
, 1))
2230 || may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
2233 case ARRAY_RANGE_REF
:
2234 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr
, 0))))
2236 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
2238 case VIEW_CONVERT_EXPR
:
2239 /* This kind of view-conversions may wrap non-addressable objects
2240 and make them look addressable. After some processing the
2241 non-addressability may be uncovered again, causing ADDR_EXPRs
2242 of inappropriate objects to be built. */
2243 if (is_gimple_reg (TREE_OPERAND (expr
, 0))
2244 || !is_gimple_addressable (TREE_OPERAND (expr
, 0)))
2246 return may_be_nonaddressable_p (TREE_OPERAND (expr
, 0));
2258 /* Finds addresses in *OP_P inside STMT. */
2261 find_interesting_uses_address (struct ivopts_data
*data
, gimple
*stmt
,
2264 tree base
= *op_p
, step
= size_zero_node
;
2266 struct ifs_ivopts_data ifs_ivopts_data
;
2268 /* Do not play with volatile memory references. A bit too conservative,
2269 perhaps, but safe. */
2270 if (gimple_has_volatile_ops (stmt
))
2273 /* Ignore bitfields for now. Not really something terribly complicated
2275 if (TREE_CODE (base
) == BIT_FIELD_REF
)
2278 base
= unshare_expr (base
);
2280 if (TREE_CODE (base
) == TARGET_MEM_REF
)
2282 tree type
= build_pointer_type (TREE_TYPE (base
));
2286 && TREE_CODE (TMR_BASE (base
)) == SSA_NAME
)
2288 civ
= get_iv (data
, TMR_BASE (base
));
2292 TMR_BASE (base
) = civ
->base
;
2295 if (TMR_INDEX2 (base
)
2296 && TREE_CODE (TMR_INDEX2 (base
)) == SSA_NAME
)
2298 civ
= get_iv (data
, TMR_INDEX2 (base
));
2302 TMR_INDEX2 (base
) = civ
->base
;
2305 if (TMR_INDEX (base
)
2306 && TREE_CODE (TMR_INDEX (base
)) == SSA_NAME
)
2308 civ
= get_iv (data
, TMR_INDEX (base
));
2312 TMR_INDEX (base
) = civ
->base
;
2317 if (TMR_STEP (base
))
2318 astep
= fold_build2 (MULT_EXPR
, type
, TMR_STEP (base
), astep
);
2320 step
= fold_build2 (PLUS_EXPR
, type
, step
, astep
);
2324 if (integer_zerop (step
))
2326 base
= tree_mem_ref_addr (type
, base
);
2330 ifs_ivopts_data
.ivopts_data
= data
;
2331 ifs_ivopts_data
.stmt
= stmt
;
2332 ifs_ivopts_data
.step
= size_zero_node
;
2333 if (!for_each_index (&base
, idx_find_step
, &ifs_ivopts_data
)
2334 || integer_zerop (ifs_ivopts_data
.step
))
2336 step
= ifs_ivopts_data
.step
;
2338 /* Check that the base expression is addressable. This needs
2339 to be done after substituting bases of IVs into it. */
2340 if (may_be_nonaddressable_p (base
))
2343 /* Moreover, on strict alignment platforms, check that it is
2344 sufficiently aligned. */
2345 if (STRICT_ALIGNMENT
&& may_be_unaligned_p (base
, step
))
2348 base
= build_fold_addr_expr (base
);
2350 /* Substituting bases of IVs into the base expression might
2351 have caused folding opportunities. */
2352 if (TREE_CODE (base
) == ADDR_EXPR
)
2354 tree
*ref
= &TREE_OPERAND (base
, 0);
2355 while (handled_component_p (*ref
))
2356 ref
= &TREE_OPERAND (*ref
, 0);
2357 if (TREE_CODE (*ref
) == MEM_REF
)
2359 tree tem
= fold_binary (MEM_REF
, TREE_TYPE (*ref
),
2360 TREE_OPERAND (*ref
, 0),
2361 TREE_OPERAND (*ref
, 1));
2368 civ
= alloc_iv (data
, base
, step
);
2369 /* Fail if base object of this memory reference is unknown. */
2370 if (civ
->base_object
== NULL_TREE
)
2373 record_group_use (data
, op_p
, civ
, stmt
, USE_ADDRESS
);
2377 for_each_index (op_p
, idx_record_use
, data
);
2380 /* Finds and records invariants used in STMT. */
2383 find_invariants_stmt (struct ivopts_data
*data
, gimple
*stmt
)
2386 use_operand_p use_p
;
2389 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
2391 op
= USE_FROM_PTR (use_p
);
2392 record_invariant (data
, op
, false);
2396 /* Finds interesting uses of induction variables in the statement STMT. */
2399 find_interesting_uses_stmt (struct ivopts_data
*data
, gimple
*stmt
)
2402 tree op
, *lhs
, *rhs
;
2404 use_operand_p use_p
;
2405 enum tree_code code
;
2407 find_invariants_stmt (data
, stmt
);
2409 if (gimple_code (stmt
) == GIMPLE_COND
)
2411 find_interesting_uses_cond (data
, stmt
);
2415 if (is_gimple_assign (stmt
))
2417 lhs
= gimple_assign_lhs_ptr (stmt
);
2418 rhs
= gimple_assign_rhs1_ptr (stmt
);
2420 if (TREE_CODE (*lhs
) == SSA_NAME
)
2422 /* If the statement defines an induction variable, the uses are not
2423 interesting by themselves. */
2425 iv
= get_iv (data
, *lhs
);
2427 if (iv
&& !integer_zerop (iv
->step
))
2431 code
= gimple_assign_rhs_code (stmt
);
2432 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
2433 && (REFERENCE_CLASS_P (*rhs
)
2434 || is_gimple_val (*rhs
)))
2436 if (REFERENCE_CLASS_P (*rhs
))
2437 find_interesting_uses_address (data
, stmt
, rhs
);
2439 find_interesting_uses_op (data
, *rhs
);
2441 if (REFERENCE_CLASS_P (*lhs
))
2442 find_interesting_uses_address (data
, stmt
, lhs
);
2445 else if (TREE_CODE_CLASS (code
) == tcc_comparison
)
2447 find_interesting_uses_cond (data
, stmt
);
2451 /* TODO -- we should also handle address uses of type
2453 memory = call (whatever);
2460 if (gimple_code (stmt
) == GIMPLE_PHI
2461 && gimple_bb (stmt
) == data
->current_loop
->header
)
2463 iv
= get_iv (data
, PHI_RESULT (stmt
));
2465 if (iv
&& !integer_zerop (iv
->step
))
2469 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
2471 op
= USE_FROM_PTR (use_p
);
2473 if (TREE_CODE (op
) != SSA_NAME
)
2476 iv
= get_iv (data
, op
);
2480 find_interesting_uses_op (data
, op
);
2484 /* Finds interesting uses of induction variables outside of loops
2485 on loop exit edge EXIT. */
2488 find_interesting_uses_outside (struct ivopts_data
*data
, edge exit
)
2494 for (psi
= gsi_start_phis (exit
->dest
); !gsi_end_p (psi
); gsi_next (&psi
))
2497 def
= PHI_ARG_DEF_FROM_EDGE (phi
, exit
);
2498 if (!virtual_operand_p (def
))
2499 find_interesting_uses_op (data
, def
);
2503 /* Return TRUE if OFFSET is within the range of [base + offset] addressing
2504 mode for memory reference represented by USE. */
2506 static GTY (()) vec
<rtx
, va_gc
> *addr_list
;
2509 addr_offset_valid_p (struct iv_use
*use
, HOST_WIDE_INT offset
)
2512 unsigned list_index
;
2513 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (use
->iv
->base
));
2514 machine_mode addr_mode
, mem_mode
= TYPE_MODE (TREE_TYPE (*use
->op_p
));
2516 list_index
= (unsigned) as
* MAX_MACHINE_MODE
+ (unsigned) mem_mode
;
2517 if (list_index
>= vec_safe_length (addr_list
))
2518 vec_safe_grow_cleared (addr_list
, list_index
+ MAX_MACHINE_MODE
);
2520 addr
= (*addr_list
)[list_index
];
2523 addr_mode
= targetm
.addr_space
.address_mode (as
);
2524 reg
= gen_raw_REG (addr_mode
, LAST_VIRTUAL_REGISTER
+ 1);
2525 addr
= gen_rtx_fmt_ee (PLUS
, addr_mode
, reg
, NULL_RTX
);
2526 (*addr_list
)[list_index
] = addr
;
2529 addr_mode
= GET_MODE (addr
);
2531 XEXP (addr
, 1) = gen_int_mode (offset
, addr_mode
);
2532 return (memory_address_addr_space_p (mem_mode
, addr
, as
));
2535 /* Comparison function to sort group in ascending order of addr_offset. */
2538 group_compare_offset (const void *a
, const void *b
)
2540 const struct iv_use
*const *u1
= (const struct iv_use
*const *) a
;
2541 const struct iv_use
*const *u2
= (const struct iv_use
*const *) b
;
2543 if ((*u1
)->addr_offset
!= (*u2
)->addr_offset
)
2544 return (*u1
)->addr_offset
< (*u2
)->addr_offset
? -1 : 1;
2549 /* Check if small groups should be split. Return true if no group
2550 contains more than two uses with distinct addr_offsets. Return
2551 false otherwise. We want to split such groups because:
2553 1) Small groups don't have much benefit and may interfer with
2554 general candidate selection.
2555 2) Size for problem with only small groups is usually small and
2556 general algorithm can handle it well.
2558 TODO -- Above claim may not hold when we want to merge memory
2559 accesses with conseuctive addresses. */
2562 split_small_address_groups_p (struct ivopts_data
*data
)
2564 unsigned int i
, j
, distinct
= 1;
2566 struct iv_group
*group
;
2568 for (i
= 0; i
< data
->vgroups
.length (); i
++)
2570 group
= data
->vgroups
[i
];
2571 if (group
->vuses
.length () == 1)
2574 gcc_assert (group
->type
== USE_ADDRESS
);
2575 if (group
->vuses
.length () == 2)
2577 if (group
->vuses
[0]->addr_offset
> group
->vuses
[1]->addr_offset
)
2578 std::swap (group
->vuses
[0], group
->vuses
[1]);
2581 group
->vuses
.qsort (group_compare_offset
);
2587 for (pre
= group
->vuses
[0], j
= 1; j
< group
->vuses
.length (); j
++)
2589 if (group
->vuses
[j
]->addr_offset
!= pre
->addr_offset
)
2591 pre
= group
->vuses
[j
];
2600 return (distinct
<= 2);
2603 /* For each group of address type uses, this function further groups
2604 these uses according to the maximum offset supported by target's
2605 [base + offset] addressing mode. */
2608 split_address_groups (struct ivopts_data
*data
)
2611 /* Always split group. */
2612 bool split_p
= split_small_address_groups_p (data
);
2614 for (i
= 0; i
< data
->vgroups
.length (); i
++)
2616 struct iv_group
*new_group
= NULL
;
2617 struct iv_group
*group
= data
->vgroups
[i
];
2618 struct iv_use
*use
= group
->vuses
[0];
2621 use
->group_id
= group
->id
;
2622 if (group
->vuses
.length () == 1)
2625 gcc_assert (group
->type
== USE_ADDRESS
);
2627 for (j
= 1; j
< group
->vuses
.length ();)
2629 struct iv_use
*next
= group
->vuses
[j
];
2630 HOST_WIDE_INT offset
= next
->addr_offset
- use
->addr_offset
;
2632 /* Split group if aksed to, or the offset against the first
2633 use can't fit in offset part of addressing mode. IV uses
2634 having the same offset are still kept in one group. */
2636 (split_p
|| !addr_offset_valid_p (use
, offset
)))
2639 new_group
= record_group (data
, group
->type
);
2640 group
->vuses
.ordered_remove (j
);
2641 new_group
->vuses
.safe_push (next
);
2646 next
->group_id
= group
->id
;
2652 /* Finds uses of the induction variables that are interesting. */
2655 find_interesting_uses (struct ivopts_data
*data
)
2658 gimple_stmt_iterator bsi
;
2659 basic_block
*body
= get_loop_body (data
->current_loop
);
2663 for (i
= 0; i
< data
->current_loop
->num_nodes
; i
++)
2668 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2669 if (e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2670 && !flow_bb_inside_loop_p (data
->current_loop
, e
->dest
))
2671 find_interesting_uses_outside (data
, e
);
2673 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
2674 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
2675 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
2676 if (!is_gimple_debug (gsi_stmt (bsi
)))
2677 find_interesting_uses_stmt (data
, gsi_stmt (bsi
));
2681 split_address_groups (data
);
2683 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2685 fprintf (dump_file
, "\n<IV Groups>:\n");
2686 dump_groups (dump_file
, data
);
2687 fprintf (dump_file
, "\n");
2691 /* Strips constant offsets from EXPR and stores them to OFFSET. If INSIDE_ADDR
2692 is true, assume we are inside an address. If TOP_COMPREF is true, assume
2693 we are at the top-level of the processed address. */
2696 strip_offset_1 (tree expr
, bool inside_addr
, bool top_compref
,
2697 HOST_WIDE_INT
*offset
)
2699 tree op0
= NULL_TREE
, op1
= NULL_TREE
, tmp
, step
;
2700 enum tree_code code
;
2701 tree type
, orig_type
= TREE_TYPE (expr
);
2702 HOST_WIDE_INT off0
, off1
, st
;
2703 tree orig_expr
= expr
;
2707 type
= TREE_TYPE (expr
);
2708 code
= TREE_CODE (expr
);
2714 if (!cst_and_fits_in_hwi (expr
)
2715 || integer_zerop (expr
))
2718 *offset
= int_cst_value (expr
);
2719 return build_int_cst (orig_type
, 0);
2721 case POINTER_PLUS_EXPR
:
2724 op0
= TREE_OPERAND (expr
, 0);
2725 op1
= TREE_OPERAND (expr
, 1);
2727 op0
= strip_offset_1 (op0
, false, false, &off0
);
2728 op1
= strip_offset_1 (op1
, false, false, &off1
);
2730 *offset
= (code
== MINUS_EXPR
? off0
- off1
: off0
+ off1
);
2731 if (op0
== TREE_OPERAND (expr
, 0)
2732 && op1
== TREE_OPERAND (expr
, 1))
2735 if (integer_zerop (op1
))
2737 else if (integer_zerop (op0
))
2739 if (code
== MINUS_EXPR
)
2740 expr
= fold_build1 (NEGATE_EXPR
, type
, op1
);
2745 expr
= fold_build2 (code
, type
, op0
, op1
);
2747 return fold_convert (orig_type
, expr
);
2750 op1
= TREE_OPERAND (expr
, 1);
2751 if (!cst_and_fits_in_hwi (op1
))
2754 op0
= TREE_OPERAND (expr
, 0);
2755 op0
= strip_offset_1 (op0
, false, false, &off0
);
2756 if (op0
== TREE_OPERAND (expr
, 0))
2759 *offset
= off0
* int_cst_value (op1
);
2760 if (integer_zerop (op0
))
2763 expr
= fold_build2 (MULT_EXPR
, type
, op0
, op1
);
2765 return fold_convert (orig_type
, expr
);
2768 case ARRAY_RANGE_REF
:
2772 step
= array_ref_element_size (expr
);
2773 if (!cst_and_fits_in_hwi (step
))
2776 st
= int_cst_value (step
);
2777 op1
= TREE_OPERAND (expr
, 1);
2778 op1
= strip_offset_1 (op1
, false, false, &off1
);
2779 *offset
= off1
* st
;
2782 && integer_zerop (op1
))
2784 /* Strip the component reference completely. */
2785 op0
= TREE_OPERAND (expr
, 0);
2786 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2799 tmp
= component_ref_field_offset (expr
);
2800 field
= TREE_OPERAND (expr
, 1);
2802 && cst_and_fits_in_hwi (tmp
)
2803 && cst_and_fits_in_hwi (DECL_FIELD_BIT_OFFSET (field
)))
2805 HOST_WIDE_INT boffset
, abs_off
;
2807 /* Strip the component reference completely. */
2808 op0
= TREE_OPERAND (expr
, 0);
2809 op0
= strip_offset_1 (op0
, inside_addr
, top_compref
, &off0
);
2810 boffset
= int_cst_value (DECL_FIELD_BIT_OFFSET (field
));
2811 abs_off
= abs_hwi (boffset
) / BITS_PER_UNIT
;
2815 *offset
= off0
+ int_cst_value (tmp
) + abs_off
;
2822 op0
= TREE_OPERAND (expr
, 0);
2823 op0
= strip_offset_1 (op0
, true, true, &off0
);
2826 if (op0
== TREE_OPERAND (expr
, 0))
2829 expr
= build_fold_addr_expr (op0
);
2830 return fold_convert (orig_type
, expr
);
2833 /* ??? Offset operand? */
2834 inside_addr
= false;
2841 /* Default handling of expressions for that we want to recurse into
2842 the first operand. */
2843 op0
= TREE_OPERAND (expr
, 0);
2844 op0
= strip_offset_1 (op0
, inside_addr
, false, &off0
);
2847 if (op0
== TREE_OPERAND (expr
, 0)
2848 && (!op1
|| op1
== TREE_OPERAND (expr
, 1)))
2851 expr
= copy_node (expr
);
2852 TREE_OPERAND (expr
, 0) = op0
;
2854 TREE_OPERAND (expr
, 1) = op1
;
2856 /* Inside address, we might strip the top level component references,
2857 thus changing type of the expression. Handling of ADDR_EXPR
2859 expr
= fold_convert (orig_type
, expr
);
2864 /* Strips constant offsets from EXPR and stores them to OFFSET. */
2867 strip_offset (tree expr
, unsigned HOST_WIDE_INT
*offset
)
2870 tree core
= strip_offset_1 (expr
, false, false, &off
);
2875 /* Returns variant of TYPE that can be used as base for different uses.
2876 We return unsigned type with the same precision, which avoids problems
2880 generic_type_for (tree type
)
2882 if (POINTER_TYPE_P (type
))
2883 return unsigned_type_for (type
);
2885 if (TYPE_UNSIGNED (type
))
2888 return unsigned_type_for (type
);
2891 /* Private data for walk_tree. */
2893 struct walk_tree_data
2896 struct ivopts_data
*idata
;
2899 /* Callback function for walk_tree, it records invariants and symbol
2900 reference in *EXPR_P. DATA is the structure storing result info. */
2903 find_inv_vars_cb (tree
*expr_p
, int *ws ATTRIBUTE_UNUSED
, void *data
)
2906 struct version_info
*info
;
2907 struct walk_tree_data
*wdata
= (struct walk_tree_data
*) data
;
2909 if (TREE_CODE (op
) != SSA_NAME
)
2912 info
= name_info (wdata
->idata
, op
);
2913 /* Because we expand simple operations when finding IVs, loop invariant
2914 variable that isn't referred by the original loop could be used now.
2915 Record such invariant variables here. */
2918 struct ivopts_data
*idata
= wdata
->idata
;
2919 basic_block bb
= gimple_bb (SSA_NAME_DEF_STMT (op
));
2921 if (!bb
|| !flow_bb_inside_loop_p (idata
->current_loop
, bb
))
2923 set_iv (idata
, op
, op
, build_int_cst (TREE_TYPE (op
), 0), true);
2924 record_invariant (idata
, op
, false);
2927 if (!info
->inv_id
|| info
->has_nonlin_use
)
2930 if (!*wdata
->inv_vars
)
2931 *wdata
->inv_vars
= BITMAP_ALLOC (NULL
);
2932 bitmap_set_bit (*wdata
->inv_vars
, info
->inv_id
);
2937 /* Records invariants in *EXPR_P. INV_VARS is the bitmap to that we should
2941 find_inv_vars (struct ivopts_data
*data
, tree
*expr_p
, bitmap
*inv_vars
)
2943 struct walk_tree_data wdata
;
2949 wdata
.inv_vars
= inv_vars
;
2950 walk_tree (expr_p
, find_inv_vars_cb
, &wdata
, NULL
);
2953 /* Get entry from invariant expr hash table for INV_EXPR. New entry
2954 will be recorded if it doesn't exist yet. Given below two exprs:
2955 inv_expr + cst1, inv_expr + cst2
2956 It's hard to make decision whether constant part should be stripped
2957 or not. We choose to not strip based on below facts:
2958 1) We need to count ADD cost for constant part if it's stripped,
2959 which is't always trivial where this functions is called.
2960 2) Stripping constant away may be conflict with following loop
2961 invariant hoisting pass.
2962 3) Not stripping constant away results in more invariant exprs,
2963 which usually leads to decision preferring lower reg pressure. */
2965 static iv_inv_expr_ent
*
2966 get_loop_invariant_expr (struct ivopts_data
*data
, tree inv_expr
)
2968 STRIP_NOPS (inv_expr
);
2970 if (TREE_CODE (inv_expr
) == INTEGER_CST
|| TREE_CODE (inv_expr
) == SSA_NAME
)
2973 /* Don't strip constant part away as we used to. */
2975 /* Stores EXPR in DATA->inv_expr_tab, return pointer to iv_inv_expr_ent. */
2976 struct iv_inv_expr_ent ent
;
2977 ent
.expr
= inv_expr
;
2978 ent
.hash
= iterative_hash_expr (inv_expr
, 0);
2979 struct iv_inv_expr_ent
**slot
= data
->inv_expr_tab
->find_slot (&ent
, INSERT
);
2983 *slot
= XNEW (struct iv_inv_expr_ent
);
2984 (*slot
)->expr
= inv_expr
;
2985 (*slot
)->hash
= ent
.hash
;
2986 (*slot
)->id
= ++data
->max_inv_expr_id
;
2992 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2993 position to POS. If USE is not NULL, the candidate is set as related to
2994 it. If both BASE and STEP are NULL, we add a pseudocandidate for the
2995 replacement of the final value of the iv by a direct computation. */
2997 static struct iv_cand
*
2998 add_candidate_1 (struct ivopts_data
*data
,
2999 tree base
, tree step
, bool important
, enum iv_position pos
,
3000 struct iv_use
*use
, gimple
*incremented_at
,
3001 struct iv
*orig_iv
= NULL
)
3004 struct iv_cand
*cand
= NULL
;
3005 tree type
, orig_type
;
3007 gcc_assert (base
&& step
);
3009 /* -fkeep-gc-roots-live means that we have to keep a real pointer
3010 live, but the ivopts code may replace a real pointer with one
3011 pointing before or after the memory block that is then adjusted
3012 into the memory block during the loop. FIXME: It would likely be
3013 better to actually force the pointer live and still use ivopts;
3014 for example, it would be enough to write the pointer into memory
3015 and keep it there until after the loop. */
3016 if (flag_keep_gc_roots_live
&& POINTER_TYPE_P (TREE_TYPE (base
)))
3019 /* For non-original variables, make sure their values are computed in a type
3020 that does not invoke undefined behavior on overflows (since in general,
3021 we cannot prove that these induction variables are non-wrapping). */
3022 if (pos
!= IP_ORIGINAL
)
3024 orig_type
= TREE_TYPE (base
);
3025 type
= generic_type_for (orig_type
);
3026 if (type
!= orig_type
)
3028 base
= fold_convert (type
, base
);
3029 step
= fold_convert (type
, step
);
3033 for (i
= 0; i
< data
->vcands
.length (); i
++)
3035 cand
= data
->vcands
[i
];
3037 if (cand
->pos
!= pos
)
3040 if (cand
->incremented_at
!= incremented_at
3041 || ((pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
3042 && cand
->ainc_use
!= use
))
3045 if (operand_equal_p (base
, cand
->iv
->base
, 0)
3046 && operand_equal_p (step
, cand
->iv
->step
, 0)
3047 && (TYPE_PRECISION (TREE_TYPE (base
))
3048 == TYPE_PRECISION (TREE_TYPE (cand
->iv
->base
))))
3052 if (i
== data
->vcands
.length ())
3054 cand
= XCNEW (struct iv_cand
);
3056 cand
->iv
= alloc_iv (data
, base
, step
);
3058 if (pos
!= IP_ORIGINAL
)
3060 cand
->var_before
= create_tmp_var_raw (TREE_TYPE (base
), "ivtmp");
3061 cand
->var_after
= cand
->var_before
;
3063 cand
->important
= important
;
3064 cand
->incremented_at
= incremented_at
;
3065 data
->vcands
.safe_push (cand
);
3067 if (TREE_CODE (step
) != INTEGER_CST
)
3069 find_inv_vars (data
, &step
, &cand
->inv_vars
);
3071 iv_inv_expr_ent
*inv_expr
= get_loop_invariant_expr (data
, step
);
3072 /* Share bitmap between inv_vars and inv_exprs for cand. */
3073 if (inv_expr
!= NULL
)
3075 cand
->inv_exprs
= cand
->inv_vars
;
3076 cand
->inv_vars
= NULL
;
3077 if (cand
->inv_exprs
)
3078 bitmap_clear (cand
->inv_exprs
);
3080 cand
->inv_exprs
= BITMAP_ALLOC (NULL
);
3082 bitmap_set_bit (cand
->inv_exprs
, inv_expr
->id
);
3086 if (pos
== IP_AFTER_USE
|| pos
== IP_BEFORE_USE
)
3087 cand
->ainc_use
= use
;
3089 cand
->ainc_use
= NULL
;
3091 cand
->orig_iv
= orig_iv
;
3092 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3093 dump_cand (dump_file
, cand
);
3096 cand
->important
|= important
;
3098 /* Relate candidate to the group for which it is added. */
3100 bitmap_set_bit (data
->vgroups
[use
->group_id
]->related_cands
, i
);
3105 /* Returns true if incrementing the induction variable at the end of the LOOP
3108 The purpose is to avoid splitting latch edge with a biv increment, thus
3109 creating a jump, possibly confusing other optimization passes and leaving
3110 less freedom to scheduler. So we allow IP_END only if IP_NORMAL is not
3111 available (so we do not have a better alternative), or if the latch edge
3112 is already nonempty. */
3115 allow_ip_end_pos_p (struct loop
*loop
)
3117 if (!ip_normal_pos (loop
))
3120 if (!empty_block_p (ip_end_pos (loop
)))
3126 /* If possible, adds autoincrement candidates BASE + STEP * i based on use USE.
3127 Important field is set to IMPORTANT. */
3130 add_autoinc_candidates (struct ivopts_data
*data
, tree base
, tree step
,
3131 bool important
, struct iv_use
*use
)
3133 basic_block use_bb
= gimple_bb (use
->stmt
);
3134 machine_mode mem_mode
;
3135 unsigned HOST_WIDE_INT cstepi
;
3137 /* If we insert the increment in any position other than the standard
3138 ones, we must ensure that it is incremented once per iteration.
3139 It must not be in an inner nested loop, or one side of an if
3141 if (use_bb
->loop_father
!= data
->current_loop
3142 || !dominated_by_p (CDI_DOMINATORS
, data
->current_loop
->latch
, use_bb
)
3143 || stmt_could_throw_p (use
->stmt
)
3144 || !cst_and_fits_in_hwi (step
))
3147 cstepi
= int_cst_value (step
);
3149 mem_mode
= TYPE_MODE (TREE_TYPE (*use
->op_p
));
3150 if (((USE_LOAD_PRE_INCREMENT (mem_mode
)
3151 || USE_STORE_PRE_INCREMENT (mem_mode
))
3152 && GET_MODE_SIZE (mem_mode
) == cstepi
)
3153 || ((USE_LOAD_PRE_DECREMENT (mem_mode
)
3154 || USE_STORE_PRE_DECREMENT (mem_mode
))
3155 && GET_MODE_SIZE (mem_mode
) == -cstepi
))
3157 enum tree_code code
= MINUS_EXPR
;
3159 tree new_step
= step
;
3161 if (POINTER_TYPE_P (TREE_TYPE (base
)))
3163 new_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (step
), step
);
3164 code
= POINTER_PLUS_EXPR
;
3167 new_step
= fold_convert (TREE_TYPE (base
), new_step
);
3168 new_base
= fold_build2 (code
, TREE_TYPE (base
), base
, new_step
);
3169 add_candidate_1 (data
, new_base
, step
, important
, IP_BEFORE_USE
, use
,
3172 if (((USE_LOAD_POST_INCREMENT (mem_mode
)
3173 || USE_STORE_POST_INCREMENT (mem_mode
))
3174 && GET_MODE_SIZE (mem_mode
) == cstepi
)
3175 || ((USE_LOAD_POST_DECREMENT (mem_mode
)
3176 || USE_STORE_POST_DECREMENT (mem_mode
))
3177 && GET_MODE_SIZE (mem_mode
) == -cstepi
))
3179 add_candidate_1 (data
, base
, step
, important
, IP_AFTER_USE
, use
,
3184 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
3185 position to POS. If USE is not NULL, the candidate is set as related to
3186 it. The candidate computation is scheduled before exit condition and at
3190 add_candidate (struct ivopts_data
*data
,
3191 tree base
, tree step
, bool important
, struct iv_use
*use
,
3192 struct iv
*orig_iv
= NULL
)
3194 if (ip_normal_pos (data
->current_loop
))
3195 add_candidate_1 (data
, base
, step
, important
,
3196 IP_NORMAL
, use
, NULL
, orig_iv
);
3197 if (ip_end_pos (data
->current_loop
)
3198 && allow_ip_end_pos_p (data
->current_loop
))
3199 add_candidate_1 (data
, base
, step
, important
, IP_END
, use
, NULL
, orig_iv
);
3202 /* Adds standard iv candidates. */
3205 add_standard_iv_candidates (struct ivopts_data
*data
)
3207 add_candidate (data
, integer_zero_node
, integer_one_node
, true, NULL
);
3209 /* The same for a double-integer type if it is still fast enough. */
3211 (long_integer_type_node
) > TYPE_PRECISION (integer_type_node
)
3212 && TYPE_PRECISION (long_integer_type_node
) <= BITS_PER_WORD
)
3213 add_candidate (data
, build_int_cst (long_integer_type_node
, 0),
3214 build_int_cst (long_integer_type_node
, 1), true, NULL
);
3216 /* The same for a double-integer type if it is still fast enough. */
3218 (long_long_integer_type_node
) > TYPE_PRECISION (long_integer_type_node
)
3219 && TYPE_PRECISION (long_long_integer_type_node
) <= BITS_PER_WORD
)
3220 add_candidate (data
, build_int_cst (long_long_integer_type_node
, 0),
3221 build_int_cst (long_long_integer_type_node
, 1), true, NULL
);
3225 /* Adds candidates bases on the old induction variable IV. */
3228 add_iv_candidate_for_biv (struct ivopts_data
*data
, struct iv
*iv
)
3232 struct iv_cand
*cand
;
3234 /* Check if this biv is used in address type use. */
3235 if (iv
->no_overflow
&& iv
->have_address_use
3236 && INTEGRAL_TYPE_P (TREE_TYPE (iv
->base
))
3237 && TYPE_PRECISION (TREE_TYPE (iv
->base
)) < TYPE_PRECISION (sizetype
))
3239 tree base
= fold_convert (sizetype
, iv
->base
);
3240 tree step
= fold_convert (sizetype
, iv
->step
);
3242 /* Add iv cand of same precision as index part in TARGET_MEM_REF. */
3243 add_candidate (data
, base
, step
, true, NULL
, iv
);
3244 /* Add iv cand of the original type only if it has nonlinear use. */
3246 add_candidate (data
, iv
->base
, iv
->step
, true, NULL
);
3249 add_candidate (data
, iv
->base
, iv
->step
, true, NULL
);
3251 /* The same, but with initial value zero. */
3252 if (POINTER_TYPE_P (TREE_TYPE (iv
->base
)))
3253 add_candidate (data
, size_int (0), iv
->step
, true, NULL
);
3255 add_candidate (data
, build_int_cst (TREE_TYPE (iv
->base
), 0),
3256 iv
->step
, true, NULL
);
3258 phi
= SSA_NAME_DEF_STMT (iv
->ssa_name
);
3259 if (gimple_code (phi
) == GIMPLE_PHI
)
3261 /* Additionally record the possibility of leaving the original iv
3263 def
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (data
->current_loop
));
3264 /* Don't add candidate if it's from another PHI node because
3265 it's an affine iv appearing in the form of PEELED_CHREC. */
3266 phi
= SSA_NAME_DEF_STMT (def
);
3267 if (gimple_code (phi
) != GIMPLE_PHI
)
3269 cand
= add_candidate_1 (data
,
3270 iv
->base
, iv
->step
, true, IP_ORIGINAL
, NULL
,
3271 SSA_NAME_DEF_STMT (def
));
3274 cand
->var_before
= iv
->ssa_name
;
3275 cand
->var_after
= def
;
3279 gcc_assert (gimple_bb (phi
) == data
->current_loop
->header
);
3283 /* Adds candidates based on the old induction variables. */
3286 add_iv_candidate_for_bivs (struct ivopts_data
*data
)
3292 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
3294 iv
= ver_info (data
, i
)->iv
;
3295 if (iv
&& iv
->biv_p
&& !integer_zerop (iv
->step
))
3296 add_iv_candidate_for_biv (data
, iv
);
3300 /* Record common candidate {BASE, STEP} derived from USE in hashtable. */
3303 record_common_cand (struct ivopts_data
*data
, tree base
,
3304 tree step
, struct iv_use
*use
)
3306 struct iv_common_cand ent
;
3307 struct iv_common_cand
**slot
;
3311 ent
.hash
= iterative_hash_expr (base
, 0);
3312 ent
.hash
= iterative_hash_expr (step
, ent
.hash
);
3314 slot
= data
->iv_common_cand_tab
->find_slot (&ent
, INSERT
);
3317 *slot
= new iv_common_cand ();
3318 (*slot
)->base
= base
;
3319 (*slot
)->step
= step
;
3320 (*slot
)->uses
.create (8);
3321 (*slot
)->hash
= ent
.hash
;
3322 data
->iv_common_cands
.safe_push ((*slot
));
3325 gcc_assert (use
!= NULL
);
3326 (*slot
)->uses
.safe_push (use
);
3330 /* Comparison function used to sort common candidates. */
3333 common_cand_cmp (const void *p1
, const void *p2
)
3336 const struct iv_common_cand
*const *const ccand1
3337 = (const struct iv_common_cand
*const *)p1
;
3338 const struct iv_common_cand
*const *const ccand2
3339 = (const struct iv_common_cand
*const *)p2
;
3341 n1
= (*ccand1
)->uses
.length ();
3342 n2
= (*ccand2
)->uses
.length ();
3346 /* Adds IV candidates based on common candidated recorded. */
3349 add_iv_candidate_derived_from_uses (struct ivopts_data
*data
)
3352 struct iv_cand
*cand_1
, *cand_2
;
3354 data
->iv_common_cands
.qsort (common_cand_cmp
);
3355 for (i
= 0; i
< data
->iv_common_cands
.length (); i
++)
3357 struct iv_common_cand
*ptr
= data
->iv_common_cands
[i
];
3359 /* Only add IV candidate if it's derived from multiple uses. */
3360 if (ptr
->uses
.length () <= 1)
3365 if (ip_normal_pos (data
->current_loop
))
3366 cand_1
= add_candidate_1 (data
, ptr
->base
, ptr
->step
,
3367 false, IP_NORMAL
, NULL
, NULL
);
3369 if (ip_end_pos (data
->current_loop
)
3370 && allow_ip_end_pos_p (data
->current_loop
))
3371 cand_2
= add_candidate_1 (data
, ptr
->base
, ptr
->step
,
3372 false, IP_END
, NULL
, NULL
);
3374 /* Bind deriving uses and the new candidates. */
3375 for (j
= 0; j
< ptr
->uses
.length (); j
++)
3377 struct iv_group
*group
= data
->vgroups
[ptr
->uses
[j
]->group_id
];
3379 bitmap_set_bit (group
->related_cands
, cand_1
->id
);
3381 bitmap_set_bit (group
->related_cands
, cand_2
->id
);
3385 /* Release data since it is useless from this point. */
3386 data
->iv_common_cand_tab
->empty ();
3387 data
->iv_common_cands
.truncate (0);
3390 /* Adds candidates based on the value of USE's iv. */
3393 add_iv_candidate_for_use (struct ivopts_data
*data
, struct iv_use
*use
)
3395 unsigned HOST_WIDE_INT offset
;
3398 struct iv
*iv
= use
->iv
;
3400 add_candidate (data
, iv
->base
, iv
->step
, false, use
);
3402 /* Record common candidate for use in case it can be shared by others. */
3403 record_common_cand (data
, iv
->base
, iv
->step
, use
);
3405 /* Record common candidate with initial value zero. */
3406 basetype
= TREE_TYPE (iv
->base
);
3407 if (POINTER_TYPE_P (basetype
))
3408 basetype
= sizetype
;
3409 record_common_cand (data
, build_int_cst (basetype
, 0), iv
->step
, use
);
3411 /* Record common candidate with constant offset stripped in base.
3412 Like the use itself, we also add candidate directly for it. */
3413 base
= strip_offset (iv
->base
, &offset
);
3414 if (offset
|| base
!= iv
->base
)
3416 record_common_cand (data
, base
, iv
->step
, use
);
3417 add_candidate (data
, base
, iv
->step
, false, use
);
3420 /* Record common candidate with base_object removed in base. */
3423 if (iv
->base_object
!= NULL
&& TREE_CODE (base
) == POINTER_PLUS_EXPR
)
3425 tree step
= iv
->step
;
3428 base
= TREE_OPERAND (base
, 1);
3429 step
= fold_convert (sizetype
, step
);
3430 record_common_cand (data
, base
, step
, use
);
3431 /* Also record common candidate with offset stripped. */
3432 base
= strip_offset (base
, &offset
);
3434 record_common_cand (data
, base
, step
, use
);
3437 /* At last, add auto-incremental candidates. Make such variables
3438 important since other iv uses with same base object may be based
3440 if (use
!= NULL
&& use
->type
== USE_ADDRESS
)
3441 add_autoinc_candidates (data
, iv
->base
, iv
->step
, true, use
);
3444 /* Adds candidates based on the uses. */
3447 add_iv_candidate_for_groups (struct ivopts_data
*data
)
3451 /* Only add candidate for the first use in group. */
3452 for (i
= 0; i
< data
->vgroups
.length (); i
++)
3454 struct iv_group
*group
= data
->vgroups
[i
];
3456 gcc_assert (group
->vuses
[0] != NULL
);
3457 add_iv_candidate_for_use (data
, group
->vuses
[0]);
3459 add_iv_candidate_derived_from_uses (data
);
3462 /* Record important candidates and add them to related_cands bitmaps. */
3465 record_important_candidates (struct ivopts_data
*data
)
3468 struct iv_group
*group
;
3470 for (i
= 0; i
< data
->vcands
.length (); i
++)
3472 struct iv_cand
*cand
= data
->vcands
[i
];
3474 if (cand
->important
)
3475 bitmap_set_bit (data
->important_candidates
, i
);
3478 data
->consider_all_candidates
= (data
->vcands
.length ()
3479 <= CONSIDER_ALL_CANDIDATES_BOUND
);
3481 /* Add important candidates to groups' related_cands bitmaps. */
3482 for (i
= 0; i
< data
->vgroups
.length (); i
++)
3484 group
= data
->vgroups
[i
];
3485 bitmap_ior_into (group
->related_cands
, data
->important_candidates
);
3489 /* Allocates the data structure mapping the (use, candidate) pairs to costs.
3490 If consider_all_candidates is true, we use a two-dimensional array, otherwise
3491 we allocate a simple list to every use. */
3494 alloc_use_cost_map (struct ivopts_data
*data
)
3496 unsigned i
, size
, s
;
3498 for (i
= 0; i
< data
->vgroups
.length (); i
++)
3500 struct iv_group
*group
= data
->vgroups
[i
];
3502 if (data
->consider_all_candidates
)
3503 size
= data
->vcands
.length ();
3506 s
= bitmap_count_bits (group
->related_cands
);
3508 /* Round up to the power of two, so that moduling by it is fast. */
3509 size
= s
? (1 << ceil_log2 (s
)) : 1;
3512 group
->n_map_members
= size
;
3513 group
->cost_map
= XCNEWVEC (struct cost_pair
, size
);
3517 /* Sets cost of (GROUP, CAND) pair to COST and record that it depends
3518 on invariants INV_VARS and that the value used in expressing it is
3519 VALUE, and in case of iv elimination the comparison operator is COMP. */
3522 set_group_iv_cost (struct ivopts_data
*data
,
3523 struct iv_group
*group
, struct iv_cand
*cand
,
3524 comp_cost cost
, bitmap inv_vars
, tree value
,
3525 enum tree_code comp
, bitmap inv_exprs
)
3529 if (cost
.infinite_cost_p ())
3531 BITMAP_FREE (inv_vars
);
3532 BITMAP_FREE (inv_exprs
);
3536 if (data
->consider_all_candidates
)
3538 group
->cost_map
[cand
->id
].cand
= cand
;
3539 group
->cost_map
[cand
->id
].cost
= cost
;
3540 group
->cost_map
[cand
->id
].inv_vars
= inv_vars
;
3541 group
->cost_map
[cand
->id
].inv_exprs
= inv_exprs
;
3542 group
->cost_map
[cand
->id
].value
= value
;
3543 group
->cost_map
[cand
->id
].comp
= comp
;
3547 /* n_map_members is a power of two, so this computes modulo. */
3548 s
= cand
->id
& (group
->n_map_members
- 1);
3549 for (i
= s
; i
< group
->n_map_members
; i
++)
3550 if (!group
->cost_map
[i
].cand
)
3552 for (i
= 0; i
< s
; i
++)
3553 if (!group
->cost_map
[i
].cand
)
3559 group
->cost_map
[i
].cand
= cand
;
3560 group
->cost_map
[i
].cost
= cost
;
3561 group
->cost_map
[i
].inv_vars
= inv_vars
;
3562 group
->cost_map
[i
].inv_exprs
= inv_exprs
;
3563 group
->cost_map
[i
].value
= value
;
3564 group
->cost_map
[i
].comp
= comp
;
3567 /* Gets cost of (GROUP, CAND) pair. */
3569 static struct cost_pair
*
3570 get_group_iv_cost (struct ivopts_data
*data
, struct iv_group
*group
,
3571 struct iv_cand
*cand
)
3574 struct cost_pair
*ret
;
3579 if (data
->consider_all_candidates
)
3581 ret
= group
->cost_map
+ cand
->id
;
3588 /* n_map_members is a power of two, so this computes modulo. */
3589 s
= cand
->id
& (group
->n_map_members
- 1);
3590 for (i
= s
; i
< group
->n_map_members
; i
++)
3591 if (group
->cost_map
[i
].cand
== cand
)
3592 return group
->cost_map
+ i
;
3593 else if (group
->cost_map
[i
].cand
== NULL
)
3595 for (i
= 0; i
< s
; i
++)
3596 if (group
->cost_map
[i
].cand
== cand
)
3597 return group
->cost_map
+ i
;
3598 else if (group
->cost_map
[i
].cand
== NULL
)
3604 /* Produce DECL_RTL for object obj so it looks like it is stored in memory. */
3606 produce_memory_decl_rtl (tree obj
, int *regno
)
3608 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (obj
));
3609 machine_mode address_mode
= targetm
.addr_space
.address_mode (as
);
3613 if (TREE_STATIC (obj
) || DECL_EXTERNAL (obj
))
3615 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (obj
));
3616 x
= gen_rtx_SYMBOL_REF (address_mode
, name
);
3617 SET_SYMBOL_REF_DECL (x
, obj
);
3618 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
3619 set_mem_addr_space (x
, as
);
3620 targetm
.encode_section_info (obj
, x
, true);
3624 x
= gen_raw_REG (address_mode
, (*regno
)++);
3625 x
= gen_rtx_MEM (DECL_MODE (obj
), x
);
3626 set_mem_addr_space (x
, as
);
3632 /* Prepares decl_rtl for variables referred in *EXPR_P. Callback for
3633 walk_tree. DATA contains the actual fake register number. */
3636 prepare_decl_rtl (tree
*expr_p
, int *ws
, void *data
)
3638 tree obj
= NULL_TREE
;
3640 int *regno
= (int *) data
;
3642 switch (TREE_CODE (*expr_p
))
3645 for (expr_p
= &TREE_OPERAND (*expr_p
, 0);
3646 handled_component_p (*expr_p
);
3647 expr_p
= &TREE_OPERAND (*expr_p
, 0))
3650 if (DECL_P (obj
) && HAS_RTL_P (obj
) && !DECL_RTL_SET_P (obj
))
3651 x
= produce_memory_decl_rtl (obj
, regno
);
3656 obj
= SSA_NAME_VAR (*expr_p
);
3657 /* Defer handling of anonymous SSA_NAMEs to the expander. */
3660 if (!DECL_RTL_SET_P (obj
))
3661 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
3670 if (DECL_RTL_SET_P (obj
))
3673 if (DECL_MODE (obj
) == BLKmode
)
3674 x
= produce_memory_decl_rtl (obj
, regno
);
3676 x
= gen_raw_REG (DECL_MODE (obj
), (*regno
)++);
3686 decl_rtl_to_reset
.safe_push (obj
);
3687 SET_DECL_RTL (obj
, x
);
3693 /* Determines cost of the computation of EXPR. */
3696 computation_cost (tree expr
, bool speed
)
3700 tree type
= TREE_TYPE (expr
);
3702 /* Avoid using hard regs in ways which may be unsupported. */
3703 int regno
= LAST_VIRTUAL_REGISTER
+ 1;
3704 struct cgraph_node
*node
= cgraph_node::get (current_function_decl
);
3705 enum node_frequency real_frequency
= node
->frequency
;
3707 node
->frequency
= NODE_FREQUENCY_NORMAL
;
3708 crtl
->maybe_hot_insn_p
= speed
;
3709 walk_tree (&expr
, prepare_decl_rtl
, ®no
, NULL
);
3711 rslt
= expand_expr (expr
, NULL_RTX
, TYPE_MODE (type
), EXPAND_NORMAL
);
3714 default_rtl_profile ();
3715 node
->frequency
= real_frequency
;
3717 cost
= seq_cost (seq
, speed
);
3719 cost
+= address_cost (XEXP (rslt
, 0), TYPE_MODE (type
),
3720 TYPE_ADDR_SPACE (type
), speed
);
3721 else if (!REG_P (rslt
))
3722 cost
+= set_src_cost (rslt
, TYPE_MODE (type
), speed
);
3727 /* Returns variable containing the value of candidate CAND at statement AT. */
3730 var_at_stmt (struct loop
*loop
, struct iv_cand
*cand
, gimple
*stmt
)
3732 if (stmt_after_increment (loop
, cand
, stmt
))
3733 return cand
->var_after
;
3735 return cand
->var_before
;
3738 /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
3739 same precision that is at least as wide as the precision of TYPE, stores
3740 BA to A and BB to B, and returns the type of BA. Otherwise, returns the
3744 determine_common_wider_type (tree
*a
, tree
*b
)
3746 tree wider_type
= NULL
;
3748 tree atype
= TREE_TYPE (*a
);
3750 if (CONVERT_EXPR_P (*a
))
3752 suba
= TREE_OPERAND (*a
, 0);
3753 wider_type
= TREE_TYPE (suba
);
3754 if (TYPE_PRECISION (wider_type
) < TYPE_PRECISION (atype
))
3760 if (CONVERT_EXPR_P (*b
))
3762 subb
= TREE_OPERAND (*b
, 0);
3763 if (TYPE_PRECISION (wider_type
) != TYPE_PRECISION (TREE_TYPE (subb
)))
3774 /* Determines the expression by that USE is expressed from induction variable
3775 CAND at statement AT in LOOP. The expression is stored in two parts in a
3776 decomposed form. The invariant part is stored in AFF_INV; while variant
3777 part in AFF_VAR. Store ratio of CAND.step over USE.step in PRAT if it's
3778 non-null. Returns false if USE cannot be expressed using CAND. */
3781 get_computation_aff_1 (struct loop
*loop
, gimple
*at
, struct iv_use
*use
,
3782 struct iv_cand
*cand
, struct aff_tree
*aff_inv
,
3783 struct aff_tree
*aff_var
, widest_int
*prat
= NULL
)
3785 tree ubase
= use
->iv
->base
, ustep
= use
->iv
->step
;
3786 tree cbase
= cand
->iv
->base
, cstep
= cand
->iv
->step
;
3787 tree common_type
, uutype
, var
, cstep_common
;
3788 tree utype
= TREE_TYPE (ubase
), ctype
= TREE_TYPE (cbase
);
3792 /* We must have a precision to express the values of use. */
3793 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
3796 var
= var_at_stmt (loop
, cand
, at
);
3797 uutype
= unsigned_type_for (utype
);
3799 /* If the conversion is not noop, perform it. */
3800 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
3802 if (cand
->orig_iv
!= NULL
&& CONVERT_EXPR_P (cbase
)
3803 && (CONVERT_EXPR_P (cstep
) || TREE_CODE (cstep
) == INTEGER_CST
))
3805 tree inner_base
, inner_step
, inner_type
;
3806 inner_base
= TREE_OPERAND (cbase
, 0);
3807 if (CONVERT_EXPR_P (cstep
))
3808 inner_step
= TREE_OPERAND (cstep
, 0);
3812 inner_type
= TREE_TYPE (inner_base
);
3813 /* If candidate is added from a biv whose type is smaller than
3814 ctype, we know both candidate and the biv won't overflow.
3815 In this case, it's safe to skip the convertion in candidate.
3816 As an example, (unsigned short)((unsigned long)A) equals to
3817 (unsigned short)A, if A has a type no larger than short. */
3818 if (TYPE_PRECISION (inner_type
) <= TYPE_PRECISION (uutype
))
3824 cbase
= fold_convert (uutype
, cbase
);
3825 cstep
= fold_convert (uutype
, cstep
);
3826 var
= fold_convert (uutype
, var
);
3829 /* Ratio is 1 when computing the value of biv cand by itself.
3830 We can't rely on constant_multiple_of in this case because the
3831 use is created after the original biv is selected. The call
3832 could fail because of inconsistent fold behavior. See PR68021
3833 for more information. */
3834 if (cand
->pos
== IP_ORIGINAL
&& cand
->incremented_at
== use
->stmt
)
3836 gcc_assert (is_gimple_assign (use
->stmt
));
3837 gcc_assert (use
->iv
->ssa_name
== cand
->var_after
);
3838 gcc_assert (gimple_assign_lhs (use
->stmt
) == cand
->var_after
);
3841 else if (!constant_multiple_of (ustep
, cstep
, &rat
))
3847 /* In case both UBASE and CBASE are shortened to UUTYPE from some common
3848 type, we achieve better folding by computing their difference in this
3849 wider type, and cast the result to UUTYPE. We do not need to worry about
3850 overflows, as all the arithmetics will in the end be performed in UUTYPE
3852 common_type
= determine_common_wider_type (&ubase
, &cbase
);
3854 /* use = ubase - ratio * cbase + ratio * var. */
3855 tree_to_aff_combination (ubase
, common_type
, aff_inv
);
3856 tree_to_aff_combination (cbase
, common_type
, &aff_cbase
);
3857 tree_to_aff_combination (var
, uutype
, aff_var
);
3859 /* We need to shift the value if we are after the increment. */
3860 if (stmt_after_increment (loop
, cand
, at
))
3864 if (common_type
!= uutype
)
3865 cstep_common
= fold_convert (common_type
, cstep
);
3867 cstep_common
= cstep
;
3869 tree_to_aff_combination (cstep_common
, common_type
, &cstep_aff
);
3870 aff_combination_add (&aff_cbase
, &cstep_aff
);
3873 aff_combination_scale (&aff_cbase
, -rat
);
3874 aff_combination_add (aff_inv
, &aff_cbase
);
3875 if (common_type
!= uutype
)
3876 aff_combination_convert (aff_inv
, uutype
);
3878 aff_combination_scale (aff_var
, rat
);
3882 /* Determines the expression by that USE is expressed from induction variable
3883 CAND at statement AT in LOOP. The expression is stored in a decomposed
3884 form into AFF. Returns false if USE cannot be expressed using CAND. */
3887 get_computation_aff (struct loop
*loop
, gimple
*at
, struct iv_use
*use
,
3888 struct iv_cand
*cand
, struct aff_tree
*aff
)
3892 if (!get_computation_aff_1 (loop
, at
, use
, cand
, aff
, &aff_var
))
3895 aff_combination_add (aff
, &aff_var
);
3899 /* Return the type of USE. */
3902 get_use_type (struct iv_use
*use
)
3904 tree base_type
= TREE_TYPE (use
->iv
->base
);
3907 if (use
->type
== USE_ADDRESS
)
3909 /* The base_type may be a void pointer. Create a pointer type based on
3910 the mem_ref instead. */
3911 type
= build_pointer_type (TREE_TYPE (*use
->op_p
));
3912 gcc_assert (TYPE_ADDR_SPACE (TREE_TYPE (type
))
3913 == TYPE_ADDR_SPACE (TREE_TYPE (base_type
)));
3921 /* Determines the expression by that USE is expressed from induction variable
3922 CAND at statement AT in LOOP. The computation is unshared. */
3925 get_computation_at (struct loop
*loop
, gimple
*at
,
3926 struct iv_use
*use
, struct iv_cand
*cand
)
3929 tree type
= get_use_type (use
);
3931 if (!get_computation_aff (loop
, at
, use
, cand
, &aff
))
3933 unshare_aff_combination (&aff
);
3934 return fold_convert (type
, aff_combination_to_tree (&aff
));
3937 /* Adjust the cost COST for being in loop setup rather than loop body.
3938 If we're optimizing for space, the loop setup overhead is constant;
3939 if we're optimizing for speed, amortize it over the per-iteration cost.
3940 If ROUND_UP_P is true, the result is round up rather than to zero when
3941 optimizing for speed. */
3943 adjust_setup_cost (struct ivopts_data
*data
, unsigned cost
,
3944 bool round_up_p
= false)
3948 else if (optimize_loop_for_speed_p (data
->current_loop
))
3950 HOST_WIDE_INT niters
= avg_loop_niter (data
->current_loop
);
3951 return ((HOST_WIDE_INT
) cost
+ (round_up_p
? niters
- 1 : 0)) / niters
;
3957 /* Calculate the SPEED or size cost of shiftadd EXPR in MODE. MULT is the
3958 EXPR operand holding the shift. COST0 and COST1 are the costs for
3959 calculating the operands of EXPR. Returns true if successful, and returns
3960 the cost in COST. */
3963 get_shiftadd_cost (tree expr
, scalar_int_mode mode
, comp_cost cost0
,
3964 comp_cost cost1
, tree mult
, bool speed
, comp_cost
*cost
)
3967 tree op1
= TREE_OPERAND (expr
, 1);
3968 tree cst
= TREE_OPERAND (mult
, 1);
3969 tree multop
= TREE_OPERAND (mult
, 0);
3970 int m
= exact_log2 (int_cst_value (cst
));
3971 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
3972 int as_cost
, sa_cost
;
3975 if (!(m
>= 0 && m
< maxm
))
3979 mult_in_op1
= operand_equal_p (op1
, mult
, 0);
3981 as_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
3983 /* If the target has a cheap shift-and-add or shift-and-sub instruction,
3984 use that in preference to a shift insn followed by an add insn. */
3985 sa_cost
= (TREE_CODE (expr
) != MINUS_EXPR
3986 ? shiftadd_cost (speed
, mode
, m
)
3988 ? shiftsub1_cost (speed
, mode
, m
)
3989 : shiftsub0_cost (speed
, mode
, m
)));
3991 res
= comp_cost (MIN (as_cost
, sa_cost
), 0);
3992 res
+= (mult_in_op1
? cost0
: cost1
);
3994 STRIP_NOPS (multop
);
3995 if (!is_gimple_val (multop
))
3996 res
+= force_expr_to_var_cost (multop
, speed
);
4002 /* Estimates cost of forcing expression EXPR into a variable. */
4005 force_expr_to_var_cost (tree expr
, bool speed
)
4007 static bool costs_initialized
= false;
4008 static unsigned integer_cost
[2];
4009 static unsigned symbol_cost
[2];
4010 static unsigned address_cost
[2];
4012 comp_cost cost0
, cost1
, cost
;
4014 scalar_int_mode int_mode
;
4016 if (!costs_initialized
)
4018 tree type
= build_pointer_type (integer_type_node
);
4023 var
= create_tmp_var_raw (integer_type_node
, "test_var");
4024 TREE_STATIC (var
) = 1;
4025 x
= produce_memory_decl_rtl (var
, NULL
);
4026 SET_DECL_RTL (var
, x
);
4028 addr
= build1 (ADDR_EXPR
, type
, var
);
4031 for (i
= 0; i
< 2; i
++)
4033 integer_cost
[i
] = computation_cost (build_int_cst (integer_type_node
,
4036 symbol_cost
[i
] = computation_cost (addr
, i
) + 1;
4039 = computation_cost (fold_build_pointer_plus_hwi (addr
, 2000), i
) + 1;
4040 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4042 fprintf (dump_file
, "force_expr_to_var_cost %s costs:\n", i
? "speed" : "size");
4043 fprintf (dump_file
, " integer %d\n", (int) integer_cost
[i
]);
4044 fprintf (dump_file
, " symbol %d\n", (int) symbol_cost
[i
]);
4045 fprintf (dump_file
, " address %d\n", (int) address_cost
[i
]);
4046 fprintf (dump_file
, " other %d\n", (int) target_spill_cost
[i
]);
4047 fprintf (dump_file
, "\n");
4051 costs_initialized
= true;
4056 if (SSA_VAR_P (expr
))
4059 if (is_gimple_min_invariant (expr
))
4061 if (TREE_CODE (expr
) == INTEGER_CST
)
4062 return comp_cost (integer_cost
[speed
], 0);
4064 if (TREE_CODE (expr
) == ADDR_EXPR
)
4066 tree obj
= TREE_OPERAND (expr
, 0);
4069 || TREE_CODE (obj
) == PARM_DECL
4070 || TREE_CODE (obj
) == RESULT_DECL
)
4071 return comp_cost (symbol_cost
[speed
], 0);
4074 return comp_cost (address_cost
[speed
], 0);
4077 switch (TREE_CODE (expr
))
4079 case POINTER_PLUS_EXPR
:
4083 case TRUNC_DIV_EXPR
:
4088 op0
= TREE_OPERAND (expr
, 0);
4089 op1
= TREE_OPERAND (expr
, 1);
4097 op0
= TREE_OPERAND (expr
, 0);
4103 /* Just an arbitrary value, FIXME. */
4104 return comp_cost (target_spill_cost
[speed
], 0);
4107 if (op0
== NULL_TREE
4108 || TREE_CODE (op0
) == SSA_NAME
|| CONSTANT_CLASS_P (op0
))
4111 cost0
= force_expr_to_var_cost (op0
, speed
);
4113 if (op1
== NULL_TREE
4114 || TREE_CODE (op1
) == SSA_NAME
|| CONSTANT_CLASS_P (op1
))
4117 cost1
= force_expr_to_var_cost (op1
, speed
);
4119 mode
= TYPE_MODE (TREE_TYPE (expr
));
4120 switch (TREE_CODE (expr
))
4122 case POINTER_PLUS_EXPR
:
4126 cost
= comp_cost (add_cost (speed
, mode
), 0);
4127 if (TREE_CODE (expr
) != NEGATE_EXPR
)
4129 tree mult
= NULL_TREE
;
4131 if (TREE_CODE (op1
) == MULT_EXPR
)
4133 else if (TREE_CODE (op0
) == MULT_EXPR
)
4136 if (mult
!= NULL_TREE
4137 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4138 && cst_and_fits_in_hwi (TREE_OPERAND (mult
, 1))
4139 && get_shiftadd_cost (expr
, int_mode
, cost0
, cost1
, mult
,
4147 tree inner_mode
, outer_mode
;
4148 outer_mode
= TREE_TYPE (expr
);
4149 inner_mode
= TREE_TYPE (op0
);
4150 cost
= comp_cost (convert_cost (TYPE_MODE (outer_mode
),
4151 TYPE_MODE (inner_mode
), speed
), 0);
4156 if (cst_and_fits_in_hwi (op0
))
4157 cost
= comp_cost (mult_by_coeff_cost (int_cst_value (op0
),
4159 else if (cst_and_fits_in_hwi (op1
))
4160 cost
= comp_cost (mult_by_coeff_cost (int_cst_value (op1
),
4163 return comp_cost (target_spill_cost
[speed
], 0);
4166 case TRUNC_DIV_EXPR
:
4167 /* Division by power of two is usually cheap, so we allow it. Forbid
4169 if (integer_pow2p (TREE_OPERAND (expr
, 1)))
4170 cost
= comp_cost (add_cost (speed
, mode
), 0);
4172 cost
= comp_cost (target_spill_cost
[speed
], 0);
4180 cost
= comp_cost (add_cost (speed
, mode
), 0);
4192 /* Estimates cost of forcing EXPR into a variable. INV_VARS is a set of the
4193 invariants the computation depends on. */
4196 force_var_cost (struct ivopts_data
*data
, tree expr
, bitmap
*inv_vars
)
4201 find_inv_vars (data
, &expr
, inv_vars
);
4202 return force_expr_to_var_cost (expr
, data
->speed
);
4205 /* Returns cost of auto-modifying address expression in shape base + offset.
4206 AINC_STEP is step size of the address IV. AINC_OFFSET is offset of the
4207 address expression. The address expression has ADDR_MODE in addr space
4208 AS. The memory access has MEM_MODE. SPEED means we are optimizing for
4213 AINC_PRE_INC
, /* Pre increment. */
4214 AINC_PRE_DEC
, /* Pre decrement. */
4215 AINC_POST_INC
, /* Post increment. */
4216 AINC_POST_DEC
, /* Post decrement. */
4217 AINC_NONE
/* Also the number of auto increment types. */
4220 struct ainc_cost_data
4222 unsigned costs
[AINC_NONE
];
4226 get_address_cost_ainc (HOST_WIDE_INT ainc_step
, HOST_WIDE_INT ainc_offset
,
4227 machine_mode addr_mode
, machine_mode mem_mode
,
4228 addr_space_t as
, bool speed
)
4230 if (!USE_LOAD_PRE_DECREMENT (mem_mode
)
4231 && !USE_STORE_PRE_DECREMENT (mem_mode
)
4232 && !USE_LOAD_POST_DECREMENT (mem_mode
)
4233 && !USE_STORE_POST_DECREMENT (mem_mode
)
4234 && !USE_LOAD_PRE_INCREMENT (mem_mode
)
4235 && !USE_STORE_PRE_INCREMENT (mem_mode
)
4236 && !USE_LOAD_POST_INCREMENT (mem_mode
)
4237 && !USE_STORE_POST_INCREMENT (mem_mode
))
4238 return infinite_cost
;
4240 static vec
<ainc_cost_data
*> ainc_cost_data_list
;
4241 unsigned idx
= (unsigned) as
* MAX_MACHINE_MODE
+ (unsigned) mem_mode
;
4242 if (idx
>= ainc_cost_data_list
.length ())
4244 unsigned nsize
= ((unsigned) as
+ 1) *MAX_MACHINE_MODE
;
4246 gcc_assert (nsize
> idx
);
4247 ainc_cost_data_list
.safe_grow_cleared (nsize
);
4250 ainc_cost_data
*data
= ainc_cost_data_list
[idx
];
4253 rtx reg
= gen_raw_REG (addr_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4255 data
= (ainc_cost_data
*) xcalloc (1, sizeof (*data
));
4256 data
->costs
[AINC_PRE_DEC
] = INFTY
;
4257 data
->costs
[AINC_POST_DEC
] = INFTY
;
4258 data
->costs
[AINC_PRE_INC
] = INFTY
;
4259 data
->costs
[AINC_POST_INC
] = INFTY
;
4260 if (USE_LOAD_PRE_DECREMENT (mem_mode
)
4261 || USE_STORE_PRE_DECREMENT (mem_mode
))
4263 rtx addr
= gen_rtx_PRE_DEC (addr_mode
, reg
);
4265 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
4266 data
->costs
[AINC_PRE_DEC
]
4267 = address_cost (addr
, mem_mode
, as
, speed
);
4269 if (USE_LOAD_POST_DECREMENT (mem_mode
)
4270 || USE_STORE_POST_DECREMENT (mem_mode
))
4272 rtx addr
= gen_rtx_POST_DEC (addr_mode
, reg
);
4274 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
4275 data
->costs
[AINC_POST_DEC
]
4276 = address_cost (addr
, mem_mode
, as
, speed
);
4278 if (USE_LOAD_PRE_INCREMENT (mem_mode
)
4279 || USE_STORE_PRE_INCREMENT (mem_mode
))
4281 rtx addr
= gen_rtx_PRE_INC (addr_mode
, reg
);
4283 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
4284 data
->costs
[AINC_PRE_INC
]
4285 = address_cost (addr
, mem_mode
, as
, speed
);
4287 if (USE_LOAD_POST_INCREMENT (mem_mode
)
4288 || USE_STORE_POST_INCREMENT (mem_mode
))
4290 rtx addr
= gen_rtx_POST_INC (addr_mode
, reg
);
4292 if (memory_address_addr_space_p (mem_mode
, addr
, as
))
4293 data
->costs
[AINC_POST_INC
]
4294 = address_cost (addr
, mem_mode
, as
, speed
);
4296 ainc_cost_data_list
[idx
] = data
;
4299 HOST_WIDE_INT msize
= GET_MODE_SIZE (mem_mode
);
4300 if (ainc_offset
== 0 && msize
== ainc_step
)
4301 return comp_cost (data
->costs
[AINC_POST_INC
], 0);
4302 if (ainc_offset
== 0 && msize
== -ainc_step
)
4303 return comp_cost (data
->costs
[AINC_POST_DEC
], 0);
4304 if (ainc_offset
== msize
&& msize
== ainc_step
)
4305 return comp_cost (data
->costs
[AINC_PRE_INC
], 0);
4306 if (ainc_offset
== -msize
&& msize
== -ainc_step
)
4307 return comp_cost (data
->costs
[AINC_PRE_DEC
], 0);
4309 return infinite_cost
;
4312 /* Return cost of computing USE's address expression by using CAND.
4313 AFF_INV and AFF_VAR represent invariant and variant parts of the
4314 address expression, respectively. If AFF_INV is simple, store
4315 the loop invariant variables which are depended by it in INV_VARS;
4316 if AFF_INV is complicated, handle it as a new invariant expression
4317 and record it in INV_EXPR. RATIO indicates multiple times between
4318 steps of USE and CAND. If CAN_AUTOINC is nonNULL, store boolean
4319 value to it indicating if this is an auto-increment address. */
4322 get_address_cost (struct ivopts_data
*data
, struct iv_use
*use
,
4323 struct iv_cand
*cand
, aff_tree
*aff_inv
,
4324 aff_tree
*aff_var
, HOST_WIDE_INT ratio
,
4325 bitmap
*inv_vars
, iv_inv_expr_ent
**inv_expr
,
4326 bool *can_autoinc
, bool speed
)
4329 bool simple_inv
= true;
4330 tree comp_inv
= NULL_TREE
, type
= aff_var
->type
;
4331 comp_cost var_cost
= no_cost
, cost
= no_cost
;
4332 struct mem_address parts
= {NULL_TREE
, integer_one_node
,
4333 NULL_TREE
, NULL_TREE
, NULL_TREE
};
4334 machine_mode addr_mode
= TYPE_MODE (type
);
4335 machine_mode mem_mode
= TYPE_MODE (TREE_TYPE (*use
->op_p
));
4336 addr_space_t as
= TYPE_ADDR_SPACE (TREE_TYPE (use
->iv
->base
));
4338 if (!aff_combination_const_p (aff_inv
))
4340 parts
.index
= integer_one_node
;
4341 /* Addressing mode "base + index". */
4342 if (valid_mem_ref_p (mem_mode
, as
, &parts
))
4344 parts
.step
= wide_int_to_tree (type
, ratio
);
4345 /* Addressing mode "base + index << scale". */
4346 if (ratio
!= 1 && !valid_mem_ref_p (mem_mode
, as
, &parts
))
4347 parts
.step
= NULL_TREE
;
4349 if (aff_inv
->offset
!= 0)
4351 parts
.offset
= wide_int_to_tree (sizetype
, aff_inv
->offset
);
4352 /* Addressing mode "base + index [<< scale] + offset". */
4353 if (!valid_mem_ref_p (mem_mode
, as
, &parts
))
4354 parts
.offset
= NULL_TREE
;
4356 aff_inv
->offset
= 0;
4359 move_fixed_address_to_symbol (&parts
, aff_inv
);
4360 /* Base is fixed address and is moved to symbol part. */
4361 if (parts
.symbol
!= NULL_TREE
&& aff_combination_zero_p (aff_inv
))
4362 parts
.base
= NULL_TREE
;
4364 /* Addressing mode "symbol + base + index [<< scale] [+ offset]". */
4365 if (parts
.symbol
!= NULL_TREE
4366 && !valid_mem_ref_p (mem_mode
, as
, &parts
))
4368 aff_combination_add_elt (aff_inv
, parts
.symbol
, 1);
4369 parts
.symbol
= NULL_TREE
;
4370 /* Reset SIMPLE_INV since symbol address needs to be computed
4371 outside of address expression in this case. */
4373 /* Symbol part is moved back to base part, it can't be NULL. */
4374 parts
.base
= integer_one_node
;
4378 parts
.index
= NULL_TREE
;
4382 if (can_autoinc
&& ratio
== 1 && cst_and_fits_in_hwi (cand
->iv
->step
))
4384 HOST_WIDE_INT ainc_step
= int_cst_value (cand
->iv
->step
);
4385 HOST_WIDE_INT ainc_offset
= (aff_inv
->offset
).to_shwi ();
4387 if (stmt_after_increment (data
->current_loop
, cand
, use
->stmt
))
4388 ainc_offset
+= ainc_step
;
4389 cost
= get_address_cost_ainc (ainc_step
, ainc_offset
,
4390 addr_mode
, mem_mode
, as
, speed
);
4391 if (!cost
.infinite_cost_p ())
4393 *can_autoinc
= true;
4398 if (!aff_combination_zero_p (aff_inv
))
4400 parts
.offset
= wide_int_to_tree (sizetype
, aff_inv
->offset
);
4401 /* Addressing mode "base + offset". */
4402 if (!valid_mem_ref_p (mem_mode
, as
, &parts
))
4403 parts
.offset
= NULL_TREE
;
4405 aff_inv
->offset
= 0;
4410 simple_inv
= (aff_inv
== NULL
4411 || aff_combination_const_p (aff_inv
)
4412 || aff_combination_singleton_var_p (aff_inv
));
4413 if (!aff_combination_zero_p (aff_inv
))
4414 comp_inv
= aff_combination_to_tree (aff_inv
);
4415 if (comp_inv
!= NULL_TREE
)
4416 cost
= force_var_cost (data
, comp_inv
, inv_vars
);
4417 if (ratio
!= 1 && parts
.step
== NULL_TREE
)
4418 var_cost
+= mult_by_coeff_cost (ratio
, addr_mode
, speed
);
4419 if (comp_inv
!= NULL_TREE
&& parts
.index
== NULL_TREE
)
4420 var_cost
+= add_cost (speed
, addr_mode
);
4422 if (comp_inv
&& inv_expr
&& !simple_inv
)
4424 *inv_expr
= get_loop_invariant_expr (data
, comp_inv
);
4425 /* Clear depends on. */
4426 if (*inv_expr
!= NULL
&& inv_vars
&& *inv_vars
)
4427 bitmap_clear (*inv_vars
);
4429 /* Cost of small invariant expression adjusted against loop niters
4430 is usually zero, which makes it difficult to be differentiated
4431 from candidate based on loop invariant variables. Secondly, the
4432 generated invariant expression may not be hoisted out of loop by
4433 following pass. We penalize the cost by rounding up in order to
4434 neutralize such effects. */
4435 cost
.cost
= adjust_setup_cost (data
, cost
.cost
, true);
4436 cost
.scratch
= cost
.cost
;
4440 addr
= addr_for_mem_ref (&parts
, as
, false);
4441 gcc_assert (memory_address_addr_space_p (mem_mode
, addr
, as
));
4442 cost
+= address_cost (addr
, mem_mode
, as
, speed
);
4444 if (parts
.symbol
!= NULL_TREE
)
4445 cost
.complexity
+= 1;
4446 if (parts
.step
!= NULL_TREE
&& !integer_onep (parts
.step
))
4447 cost
.complexity
+= 1;
4448 if (parts
.base
!= NULL_TREE
&& parts
.index
!= NULL_TREE
)
4449 cost
.complexity
+= 1;
4450 if (parts
.offset
!= NULL_TREE
&& !integer_zerop (parts
.offset
))
4451 cost
.complexity
+= 1;
4456 /* Scale (multiply) the computed COST (except scratch part that should be
4457 hoisted out a loop) by header->frequency / AT->frequency, which makes
4458 expected cost more accurate. */
4461 get_scaled_computation_cost_at (ivopts_data
*data
, gimple
*at
, comp_cost cost
)
4463 int loop_freq
= data
->current_loop
->header
->frequency
;
4464 int bb_freq
= gimple_bb (at
)->frequency
;
4467 gcc_assert (cost
.scratch
<= cost
.cost
);
4469 = cost
.scratch
+ (cost
.cost
- cost
.scratch
) * bb_freq
/ loop_freq
;
4471 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4472 fprintf (dump_file
, "Scaling cost based on bb prob "
4473 "by %2.2f: %d (scratch: %d) -> %d (%d/%d)\n",
4474 1.0f
* bb_freq
/ loop_freq
, cost
.cost
,
4475 cost
.scratch
, scaled_cost
, bb_freq
, loop_freq
);
4477 cost
.cost
= scaled_cost
;
4483 /* Determines the cost of the computation by that USE is expressed
4484 from induction variable CAND. If ADDRESS_P is true, we just need
4485 to create an address from it, otherwise we want to get it into
4486 register. A set of invariants we depend on is stored in INV_VARS.
4487 If CAN_AUTOINC is nonnull, use it to record whether autoinc
4488 addressing is likely. If INV_EXPR is nonnull, record invariant
4489 expr entry in it. */
4492 get_computation_cost (struct ivopts_data
*data
, struct iv_use
*use
,
4493 struct iv_cand
*cand
, bool address_p
, bitmap
*inv_vars
,
4494 bool *can_autoinc
, iv_inv_expr_ent
**inv_expr
)
4496 gimple
*at
= use
->stmt
;
4497 tree ubase
= use
->iv
->base
, cbase
= cand
->iv
->base
;
4498 tree utype
= TREE_TYPE (ubase
), ctype
= TREE_TYPE (cbase
);
4499 tree comp_inv
= NULL_TREE
;
4500 HOST_WIDE_INT ratio
, aratio
;
4503 aff_tree aff_inv
, aff_var
;
4504 bool speed
= optimize_bb_for_speed_p (gimple_bb (at
));
4509 *can_autoinc
= false;
4513 /* Check if we have enough precision to express the values of use. */
4514 if (TYPE_PRECISION (utype
) > TYPE_PRECISION (ctype
))
4515 return infinite_cost
;
4518 || (use
->iv
->base_object
4519 && cand
->iv
->base_object
4520 && POINTER_TYPE_P (TREE_TYPE (use
->iv
->base_object
))
4521 && POINTER_TYPE_P (TREE_TYPE (cand
->iv
->base_object
))))
4523 /* Do not try to express address of an object with computation based
4524 on address of a different object. This may cause problems in rtl
4525 level alias analysis (that does not expect this to be happening,
4526 as this is illegal in C), and would be unlikely to be useful
4528 if (use
->iv
->base_object
4529 && cand
->iv
->base_object
4530 && !operand_equal_p (use
->iv
->base_object
, cand
->iv
->base_object
, 0))
4531 return infinite_cost
;
4534 if (!get_computation_aff_1 (data
->current_loop
, at
, use
,
4535 cand
, &aff_inv
, &aff_var
, &rat
)
4536 || !wi::fits_shwi_p (rat
))
4537 return infinite_cost
;
4539 ratio
= rat
.to_shwi ();
4542 cost
= get_address_cost (data
, use
, cand
, &aff_inv
, &aff_var
, ratio
,
4543 inv_vars
, inv_expr
, can_autoinc
, speed
);
4544 return get_scaled_computation_cost_at (data
, at
, cost
);
4547 bool simple_inv
= (aff_combination_const_p (&aff_inv
)
4548 || aff_combination_singleton_var_p (&aff_inv
));
4549 tree signed_type
= signed_type_for (aff_combination_type (&aff_inv
));
4550 aff_combination_convert (&aff_inv
, signed_type
);
4551 if (!aff_combination_zero_p (&aff_inv
))
4552 comp_inv
= aff_combination_to_tree (&aff_inv
);
4554 cost
= force_var_cost (data
, comp_inv
, inv_vars
);
4555 if (comp_inv
&& inv_expr
&& !simple_inv
)
4557 *inv_expr
= get_loop_invariant_expr (data
, comp_inv
);
4558 /* Clear depends on. */
4559 if (*inv_expr
!= NULL
&& inv_vars
&& *inv_vars
)
4560 bitmap_clear (*inv_vars
);
4562 cost
.cost
= adjust_setup_cost (data
, cost
.cost
);
4563 /* Record setup cost in scratch field. */
4564 cost
.scratch
= cost
.cost
;
4566 /* Cost of constant integer can be covered when adding invariant part to
4568 else if (comp_inv
&& CONSTANT_CLASS_P (comp_inv
))
4571 /* Need type narrowing to represent use with cand. */
4572 if (TYPE_PRECISION (utype
) < TYPE_PRECISION (ctype
))
4574 machine_mode outer_mode
= TYPE_MODE (utype
);
4575 machine_mode inner_mode
= TYPE_MODE (ctype
);
4576 cost
+= comp_cost (convert_cost (outer_mode
, inner_mode
, speed
), 0);
4579 /* Turn a + i * (-c) into a - i * c. */
4580 if (ratio
< 0 && comp_inv
&& !integer_zerop (comp_inv
))
4586 cost
+= mult_by_coeff_cost (aratio
, TYPE_MODE (utype
), speed
);
4588 /* TODO: We may also need to check if we can compute a + i * 4 in one
4590 /* Need to add up the invariant and variant parts. */
4591 if (comp_inv
&& !integer_zerop (comp_inv
))
4592 cost
+= add_cost (speed
, TYPE_MODE (utype
));
4594 return get_scaled_computation_cost_at (data
, at
, cost
);
4597 /* Determines cost of computing the use in GROUP with CAND in a generic
4601 determine_group_iv_cost_generic (struct ivopts_data
*data
,
4602 struct iv_group
*group
, struct iv_cand
*cand
)
4605 iv_inv_expr_ent
*inv_expr
= NULL
;
4606 bitmap inv_vars
= NULL
, inv_exprs
= NULL
;
4607 struct iv_use
*use
= group
->vuses
[0];
4609 /* The simple case first -- if we need to express value of the preserved
4610 original biv, the cost is 0. This also prevents us from counting the
4611 cost of increment twice -- once at this use and once in the cost of
4613 if (cand
->pos
== IP_ORIGINAL
&& cand
->incremented_at
== use
->stmt
)
4616 cost
= get_computation_cost (data
, use
, cand
, false,
4617 &inv_vars
, NULL
, &inv_expr
);
4621 inv_exprs
= BITMAP_ALLOC (NULL
);
4622 bitmap_set_bit (inv_exprs
, inv_expr
->id
);
4624 set_group_iv_cost (data
, group
, cand
, cost
, inv_vars
,
4625 NULL_TREE
, ERROR_MARK
, inv_exprs
);
4626 return !cost
.infinite_cost_p ();
4629 /* Determines cost of computing uses in GROUP with CAND in addresses. */
4632 determine_group_iv_cost_address (struct ivopts_data
*data
,
4633 struct iv_group
*group
, struct iv_cand
*cand
)
4636 bitmap inv_vars
= NULL
, inv_exprs
= NULL
;
4638 iv_inv_expr_ent
*inv_expr
= NULL
;
4639 struct iv_use
*use
= group
->vuses
[0];
4640 comp_cost sum_cost
= no_cost
, cost
;
4642 cost
= get_computation_cost (data
, use
, cand
, true,
4643 &inv_vars
, &can_autoinc
, &inv_expr
);
4647 inv_exprs
= BITMAP_ALLOC (NULL
);
4648 bitmap_set_bit (inv_exprs
, inv_expr
->id
);
4651 if (!sum_cost
.infinite_cost_p () && cand
->ainc_use
== use
)
4654 sum_cost
-= cand
->cost_step
;
4655 /* If we generated the candidate solely for exploiting autoincrement
4656 opportunities, and it turns out it can't be used, set the cost to
4657 infinity to make sure we ignore it. */
4658 else if (cand
->pos
== IP_AFTER_USE
|| cand
->pos
== IP_BEFORE_USE
)
4659 sum_cost
= infinite_cost
;
4662 /* Uses in a group can share setup code, so only add setup cost once. */
4663 cost
-= cost
.scratch
;
4664 /* Compute and add costs for rest uses of this group. */
4665 for (i
= 1; i
< group
->vuses
.length () && !sum_cost
.infinite_cost_p (); i
++)
4667 struct iv_use
*next
= group
->vuses
[i
];
4669 /* TODO: We could skip computing cost for sub iv_use when it has the
4670 same cost as the first iv_use, but the cost really depends on the
4671 offset and where the iv_use is. */
4672 cost
= get_computation_cost (data
, next
, cand
, true,
4673 NULL
, &can_autoinc
, &inv_expr
);
4677 inv_exprs
= BITMAP_ALLOC (NULL
);
4679 bitmap_set_bit (inv_exprs
, inv_expr
->id
);
4683 set_group_iv_cost (data
, group
, cand
, sum_cost
, inv_vars
,
4684 NULL_TREE
, ERROR_MARK
, inv_exprs
);
4686 return !sum_cost
.infinite_cost_p ();
4689 /* Computes value of candidate CAND at position AT in iteration NITER, and
4690 stores it to VAL. */
4693 cand_value_at (struct loop
*loop
, struct iv_cand
*cand
, gimple
*at
, tree niter
,
4696 aff_tree step
, delta
, nit
;
4697 struct iv
*iv
= cand
->iv
;
4698 tree type
= TREE_TYPE (iv
->base
);
4700 if (POINTER_TYPE_P (type
))
4701 steptype
= sizetype
;
4703 steptype
= unsigned_type_for (type
);
4705 tree_to_aff_combination (iv
->step
, TREE_TYPE (iv
->step
), &step
);
4706 aff_combination_convert (&step
, steptype
);
4707 tree_to_aff_combination (niter
, TREE_TYPE (niter
), &nit
);
4708 aff_combination_convert (&nit
, steptype
);
4709 aff_combination_mult (&nit
, &step
, &delta
);
4710 if (stmt_after_increment (loop
, cand
, at
))
4711 aff_combination_add (&delta
, &step
);
4713 tree_to_aff_combination (iv
->base
, type
, val
);
4714 if (!POINTER_TYPE_P (type
))
4715 aff_combination_convert (val
, steptype
);
4716 aff_combination_add (val
, &delta
);
4719 /* Returns period of induction variable iv. */
4722 iv_period (struct iv
*iv
)
4724 tree step
= iv
->step
, period
, type
;
4727 gcc_assert (step
&& TREE_CODE (step
) == INTEGER_CST
);
4729 type
= unsigned_type_for (TREE_TYPE (step
));
4730 /* Period of the iv is lcm (step, type_range)/step -1,
4731 i.e., N*type_range/step - 1. Since type range is power
4732 of two, N == (step >> num_of_ending_zeros_binary (step),
4733 so the final result is
4735 (type_range >> num_of_ending_zeros_binary (step)) - 1
4738 pow2div
= num_ending_zeros (step
);
4740 period
= build_low_bits_mask (type
,
4741 (TYPE_PRECISION (type
)
4742 - tree_to_uhwi (pow2div
)));
4747 /* Returns the comparison operator used when eliminating the iv USE. */
4749 static enum tree_code
4750 iv_elimination_compare (struct ivopts_data
*data
, struct iv_use
*use
)
4752 struct loop
*loop
= data
->current_loop
;
4756 ex_bb
= gimple_bb (use
->stmt
);
4757 exit
= EDGE_SUCC (ex_bb
, 0);
4758 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4759 exit
= EDGE_SUCC (ex_bb
, 1);
4761 return (exit
->flags
& EDGE_TRUE_VALUE
? EQ_EXPR
: NE_EXPR
);
4764 /* Returns true if we can prove that BASE - OFFSET does not overflow. For now,
4765 we only detect the situation that BASE = SOMETHING + OFFSET, where the
4766 calculation is performed in non-wrapping type.
4768 TODO: More generally, we could test for the situation that
4769 BASE = SOMETHING + OFFSET' and OFFSET is between OFFSET' and zero.
4770 This would require knowing the sign of OFFSET. */
4773 difference_cannot_overflow_p (struct ivopts_data
*data
, tree base
, tree offset
)
4775 enum tree_code code
;
4777 aff_tree aff_e1
, aff_e2
, aff_offset
;
4779 if (!nowrap_type_p (TREE_TYPE (base
)))
4782 base
= expand_simple_operations (base
);
4784 if (TREE_CODE (base
) == SSA_NAME
)
4786 gimple
*stmt
= SSA_NAME_DEF_STMT (base
);
4788 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
4791 code
= gimple_assign_rhs_code (stmt
);
4792 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
4795 e1
= gimple_assign_rhs1 (stmt
);
4796 e2
= gimple_assign_rhs2 (stmt
);
4800 code
= TREE_CODE (base
);
4801 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
4803 e1
= TREE_OPERAND (base
, 0);
4804 e2
= TREE_OPERAND (base
, 1);
4807 /* Use affine expansion as deeper inspection to prove the equality. */
4808 tree_to_aff_combination_expand (e2
, TREE_TYPE (e2
),
4809 &aff_e2
, &data
->name_expansion_cache
);
4810 tree_to_aff_combination_expand (offset
, TREE_TYPE (offset
),
4811 &aff_offset
, &data
->name_expansion_cache
);
4812 aff_combination_scale (&aff_offset
, -1);
4816 aff_combination_add (&aff_e2
, &aff_offset
);
4817 if (aff_combination_zero_p (&aff_e2
))
4820 tree_to_aff_combination_expand (e1
, TREE_TYPE (e1
),
4821 &aff_e1
, &data
->name_expansion_cache
);
4822 aff_combination_add (&aff_e1
, &aff_offset
);
4823 return aff_combination_zero_p (&aff_e1
);
4825 case POINTER_PLUS_EXPR
:
4826 aff_combination_add (&aff_e2
, &aff_offset
);
4827 return aff_combination_zero_p (&aff_e2
);
4834 /* Tries to replace loop exit by one formulated in terms of a LT_EXPR
4835 comparison with CAND. NITER describes the number of iterations of
4836 the loops. If successful, the comparison in COMP_P is altered accordingly.
4838 We aim to handle the following situation:
4854 Here, the number of iterations of the loop is (a + 1 > b) ? 0 : b - a - 1.
4855 We aim to optimize this to
4863 while (p < p_0 - a + b);
4865 This preserves the correctness, since the pointer arithmetics does not
4866 overflow. More precisely:
4868 1) if a + 1 <= b, then p_0 - a + b is the final value of p, hence there is no
4869 overflow in computing it or the values of p.
4870 2) if a + 1 > b, then we need to verify that the expression p_0 - a does not
4871 overflow. To prove this, we use the fact that p_0 = base + a. */
4874 iv_elimination_compare_lt (struct ivopts_data
*data
,
4875 struct iv_cand
*cand
, enum tree_code
*comp_p
,
4876 struct tree_niter_desc
*niter
)
4878 tree cand_type
, a
, b
, mbz
, nit_type
= TREE_TYPE (niter
->niter
), offset
;
4879 struct aff_tree nit
, tmpa
, tmpb
;
4880 enum tree_code comp
;
4883 /* We need to know that the candidate induction variable does not overflow.
4884 While more complex analysis may be used to prove this, for now just
4885 check that the variable appears in the original program and that it
4886 is computed in a type that guarantees no overflows. */
4887 cand_type
= TREE_TYPE (cand
->iv
->base
);
4888 if (cand
->pos
!= IP_ORIGINAL
|| !nowrap_type_p (cand_type
))
4891 /* Make sure that the loop iterates till the loop bound is hit, as otherwise
4892 the calculation of the BOUND could overflow, making the comparison
4894 if (!data
->loop_single_exit_p
)
4897 /* We need to be able to decide whether candidate is increasing or decreasing
4898 in order to choose the right comparison operator. */
4899 if (!cst_and_fits_in_hwi (cand
->iv
->step
))
4901 step
= int_cst_value (cand
->iv
->step
);
4903 /* Check that the number of iterations matches the expected pattern:
4904 a + 1 > b ? 0 : b - a - 1. */
4905 mbz
= niter
->may_be_zero
;
4906 if (TREE_CODE (mbz
) == GT_EXPR
)
4908 /* Handle a + 1 > b. */
4909 tree op0
= TREE_OPERAND (mbz
, 0);
4910 if (TREE_CODE (op0
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op0
, 1)))
4912 a
= TREE_OPERAND (op0
, 0);
4913 b
= TREE_OPERAND (mbz
, 1);
4918 else if (TREE_CODE (mbz
) == LT_EXPR
)
4920 tree op1
= TREE_OPERAND (mbz
, 1);
4922 /* Handle b < a + 1. */
4923 if (TREE_CODE (op1
) == PLUS_EXPR
&& integer_onep (TREE_OPERAND (op1
, 1)))
4925 a
= TREE_OPERAND (op1
, 0);
4926 b
= TREE_OPERAND (mbz
, 0);
4934 /* Expected number of iterations is B - A - 1. Check that it matches
4935 the actual number, i.e., that B - A - NITER = 1. */
4936 tree_to_aff_combination (niter
->niter
, nit_type
, &nit
);
4937 tree_to_aff_combination (fold_convert (nit_type
, a
), nit_type
, &tmpa
);
4938 tree_to_aff_combination (fold_convert (nit_type
, b
), nit_type
, &tmpb
);
4939 aff_combination_scale (&nit
, -1);
4940 aff_combination_scale (&tmpa
, -1);
4941 aff_combination_add (&tmpb
, &tmpa
);
4942 aff_combination_add (&tmpb
, &nit
);
4943 if (tmpb
.n
!= 0 || tmpb
.offset
!= 1)
4946 /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
4948 offset
= fold_build2 (MULT_EXPR
, TREE_TYPE (cand
->iv
->step
),
4950 fold_convert (TREE_TYPE (cand
->iv
->step
), a
));
4951 if (!difference_cannot_overflow_p (data
, cand
->iv
->base
, offset
))
4954 /* Determine the new comparison operator. */
4955 comp
= step
< 0 ? GT_EXPR
: LT_EXPR
;
4956 if (*comp_p
== NE_EXPR
)
4958 else if (*comp_p
== EQ_EXPR
)
4959 *comp_p
= invert_tree_comparison (comp
, false);
4966 /* Check whether it is possible to express the condition in USE by comparison
4967 of candidate CAND. If so, store the value compared with to BOUND, and the
4968 comparison operator to COMP. */
4971 may_eliminate_iv (struct ivopts_data
*data
,
4972 struct iv_use
*use
, struct iv_cand
*cand
, tree
*bound
,
4973 enum tree_code
*comp
)
4978 struct loop
*loop
= data
->current_loop
;
4980 struct tree_niter_desc
*desc
= NULL
;
4982 if (TREE_CODE (cand
->iv
->step
) != INTEGER_CST
)
4985 /* For now works only for exits that dominate the loop latch.
4986 TODO: extend to other conditions inside loop body. */
4987 ex_bb
= gimple_bb (use
->stmt
);
4988 if (use
->stmt
!= last_stmt (ex_bb
)
4989 || gimple_code (use
->stmt
) != GIMPLE_COND
4990 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, ex_bb
))
4993 exit
= EDGE_SUCC (ex_bb
, 0);
4994 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4995 exit
= EDGE_SUCC (ex_bb
, 1);
4996 if (flow_bb_inside_loop_p (loop
, exit
->dest
))
4999 desc
= niter_for_exit (data
, exit
);
5003 /* Determine whether we can use the variable to test the exit condition.
5004 This is the case iff the period of the induction variable is greater
5005 than the number of iterations for which the exit condition is true. */
5006 period
= iv_period (cand
->iv
);
5008 /* If the number of iterations is constant, compare against it directly. */
5009 if (TREE_CODE (desc
->niter
) == INTEGER_CST
)
5011 /* See cand_value_at. */
5012 if (stmt_after_increment (loop
, cand
, use
->stmt
))
5014 if (!tree_int_cst_lt (desc
->niter
, period
))
5019 if (tree_int_cst_lt (period
, desc
->niter
))
5024 /* If not, and if this is the only possible exit of the loop, see whether
5025 we can get a conservative estimate on the number of iterations of the
5026 entire loop and compare against that instead. */
5029 widest_int period_value
, max_niter
;
5031 max_niter
= desc
->max
;
5032 if (stmt_after_increment (loop
, cand
, use
->stmt
))
5034 period_value
= wi::to_widest (period
);
5035 if (wi::gtu_p (max_niter
, period_value
))
5037 /* See if we can take advantage of inferred loop bound
5039 if (data
->loop_single_exit_p
)
5041 if (!max_loop_iterations (loop
, &max_niter
))
5043 /* The loop bound is already adjusted by adding 1. */
5044 if (wi::gtu_p (max_niter
, period_value
))
5052 cand_value_at (loop
, cand
, use
->stmt
, desc
->niter
, &bnd
);
5054 *bound
= fold_convert (TREE_TYPE (cand
->iv
->base
),
5055 aff_combination_to_tree (&bnd
));
5056 *comp
= iv_elimination_compare (data
, use
);
5058 /* It is unlikely that computing the number of iterations using division
5059 would be more profitable than keeping the original induction variable. */
5060 if (expression_expensive_p (*bound
))
5063 /* Sometimes, it is possible to handle the situation that the number of
5064 iterations may be zero unless additional assumptions by using <
5065 instead of != in the exit condition.
5067 TODO: we could also calculate the value MAY_BE_ZERO ? 0 : NITER and
5068 base the exit condition on it. However, that is often too
5070 if (!integer_zerop (desc
->may_be_zero
))
5071 return iv_elimination_compare_lt (data
, cand
, comp
, desc
);
5076 /* Calculates the cost of BOUND, if it is a PARM_DECL. A PARM_DECL must
5077 be copied, if it is used in the loop body and DATA->body_includes_call. */
5080 parm_decl_cost (struct ivopts_data
*data
, tree bound
)
5082 tree sbound
= bound
;
5083 STRIP_NOPS (sbound
);
5085 if (TREE_CODE (sbound
) == SSA_NAME
5086 && SSA_NAME_IS_DEFAULT_DEF (sbound
)
5087 && TREE_CODE (SSA_NAME_VAR (sbound
)) == PARM_DECL
5088 && data
->body_includes_call
)
5089 return COSTS_N_INSNS (1);
5094 /* Determines cost of computing the use in GROUP with CAND in a condition. */
5097 determine_group_iv_cost_cond (struct ivopts_data
*data
,
5098 struct iv_group
*group
, struct iv_cand
*cand
)
5100 tree bound
= NULL_TREE
;
5102 bitmap inv_exprs
= NULL
;
5103 bitmap inv_vars_elim
= NULL
, inv_vars_express
= NULL
, inv_vars
;
5104 comp_cost elim_cost
= infinite_cost
, express_cost
, cost
, bound_cost
;
5105 enum comp_iv_rewrite rewrite_type
;
5106 iv_inv_expr_ent
*inv_expr_elim
= NULL
, *inv_expr_express
= NULL
, *inv_expr
;
5107 tree
*control_var
, *bound_cst
;
5108 enum tree_code comp
= ERROR_MARK
;
5109 struct iv_use
*use
= group
->vuses
[0];
5111 /* Extract condition operands. */
5112 rewrite_type
= extract_cond_operands (data
, use
->stmt
, &control_var
,
5113 &bound_cst
, NULL
, &cmp_iv
);
5114 gcc_assert (rewrite_type
!= COMP_IV_NA
);
5116 /* Try iv elimination. */
5117 if (rewrite_type
== COMP_IV_ELIM
5118 && may_eliminate_iv (data
, use
, cand
, &bound
, &comp
))
5120 elim_cost
= force_var_cost (data
, bound
, &inv_vars_elim
);
5121 if (elim_cost
.cost
== 0)
5122 elim_cost
.cost
= parm_decl_cost (data
, bound
);
5123 else if (TREE_CODE (bound
) == INTEGER_CST
)
5125 /* If we replace a loop condition 'i < n' with 'p < base + n',
5126 inv_vars_elim will have 'base' and 'n' set, which implies that both
5127 'base' and 'n' will be live during the loop. More likely,
5128 'base + n' will be loop invariant, resulting in only one live value
5129 during the loop. So in that case we clear inv_vars_elim and set
5130 inv_expr_elim instead. */
5131 if (inv_vars_elim
&& bitmap_count_bits (inv_vars_elim
) > 1)
5133 inv_expr_elim
= get_loop_invariant_expr (data
, bound
);
5134 bitmap_clear (inv_vars_elim
);
5136 /* The bound is a loop invariant, so it will be only computed
5138 elim_cost
.cost
= adjust_setup_cost (data
, elim_cost
.cost
);
5141 /* When the condition is a comparison of the candidate IV against
5142 zero, prefer this IV.
5144 TODO: The constant that we're subtracting from the cost should
5145 be target-dependent. This information should be added to the
5146 target costs for each backend. */
5147 if (!elim_cost
.infinite_cost_p () /* Do not try to decrease infinite! */
5148 && integer_zerop (*bound_cst
)
5149 && (operand_equal_p (*control_var
, cand
->var_after
, 0)
5150 || operand_equal_p (*control_var
, cand
->var_before
, 0)))
5153 express_cost
= get_computation_cost (data
, use
, cand
, false,
5154 &inv_vars_express
, NULL
,
5157 find_inv_vars (data
, &cmp_iv
->base
, &inv_vars_express
);
5159 /* Count the cost of the original bound as well. */
5160 bound_cost
= force_var_cost (data
, *bound_cst
, NULL
);
5161 if (bound_cost
.cost
== 0)
5162 bound_cost
.cost
= parm_decl_cost (data
, *bound_cst
);
5163 else if (TREE_CODE (*bound_cst
) == INTEGER_CST
)
5164 bound_cost
.cost
= 0;
5165 express_cost
+= bound_cost
;
5167 /* Choose the better approach, preferring the eliminated IV. */
5168 if (elim_cost
<= express_cost
)
5171 inv_vars
= inv_vars_elim
;
5172 inv_vars_elim
= NULL
;
5173 inv_expr
= inv_expr_elim
;
5177 cost
= express_cost
;
5178 inv_vars
= inv_vars_express
;
5179 inv_vars_express
= NULL
;
5182 inv_expr
= inv_expr_express
;
5187 inv_exprs
= BITMAP_ALLOC (NULL
);
5188 bitmap_set_bit (inv_exprs
, inv_expr
->id
);
5190 set_group_iv_cost (data
, group
, cand
, cost
,
5191 inv_vars
, bound
, comp
, inv_exprs
);
5194 BITMAP_FREE (inv_vars_elim
);
5195 if (inv_vars_express
)
5196 BITMAP_FREE (inv_vars_express
);
5198 return !cost
.infinite_cost_p ();
5201 /* Determines cost of computing uses in GROUP with CAND. Returns false
5202 if USE cannot be represented with CAND. */
5205 determine_group_iv_cost (struct ivopts_data
*data
,
5206 struct iv_group
*group
, struct iv_cand
*cand
)
5208 switch (group
->type
)
5210 case USE_NONLINEAR_EXPR
:
5211 return determine_group_iv_cost_generic (data
, group
, cand
);
5214 return determine_group_iv_cost_address (data
, group
, cand
);
5217 return determine_group_iv_cost_cond (data
, group
, cand
);
5224 /* Return true if get_computation_cost indicates that autoincrement is
5225 a possibility for the pair of USE and CAND, false otherwise. */
5228 autoinc_possible_for_pair (struct ivopts_data
*data
, struct iv_use
*use
,
5229 struct iv_cand
*cand
)
5231 if (use
->type
!= USE_ADDRESS
)
5234 bool can_autoinc
= false;
5235 get_computation_cost (data
, use
, cand
, true, NULL
, &can_autoinc
, NULL
);
5239 /* Examine IP_ORIGINAL candidates to see if they are incremented next to a
5240 use that allows autoincrement, and set their AINC_USE if possible. */
5243 set_autoinc_for_original_candidates (struct ivopts_data
*data
)
5247 for (i
= 0; i
< data
->vcands
.length (); i
++)
5249 struct iv_cand
*cand
= data
->vcands
[i
];
5250 struct iv_use
*closest_before
= NULL
;
5251 struct iv_use
*closest_after
= NULL
;
5252 if (cand
->pos
!= IP_ORIGINAL
)
5255 for (j
= 0; j
< data
->vgroups
.length (); j
++)
5257 struct iv_group
*group
= data
->vgroups
[j
];
5258 struct iv_use
*use
= group
->vuses
[0];
5259 unsigned uid
= gimple_uid (use
->stmt
);
5261 if (gimple_bb (use
->stmt
) != gimple_bb (cand
->incremented_at
))
5264 if (uid
< gimple_uid (cand
->incremented_at
)
5265 && (closest_before
== NULL
5266 || uid
> gimple_uid (closest_before
->stmt
)))
5267 closest_before
= use
;
5269 if (uid
> gimple_uid (cand
->incremented_at
)
5270 && (closest_after
== NULL
5271 || uid
< gimple_uid (closest_after
->stmt
)))
5272 closest_after
= use
;
5275 if (closest_before
!= NULL
5276 && autoinc_possible_for_pair (data
, closest_before
, cand
))
5277 cand
->ainc_use
= closest_before
;
5278 else if (closest_after
!= NULL
5279 && autoinc_possible_for_pair (data
, closest_after
, cand
))
5280 cand
->ainc_use
= closest_after
;
5284 /* Relate compare use with all candidates. */
5287 relate_compare_use_with_all_cands (struct ivopts_data
*data
)
5289 unsigned i
, count
= data
->vcands
.length ();
5290 for (i
= 0; i
< data
->vgroups
.length (); i
++)
5292 struct iv_group
*group
= data
->vgroups
[i
];
5294 if (group
->type
== USE_COMPARE
)
5295 bitmap_set_range (group
->related_cands
, 0, count
);
5299 /* Finds the candidates for the induction variables. */
5302 find_iv_candidates (struct ivopts_data
*data
)
5304 /* Add commonly used ivs. */
5305 add_standard_iv_candidates (data
);
5307 /* Add old induction variables. */
5308 add_iv_candidate_for_bivs (data
);
5310 /* Add induction variables derived from uses. */
5311 add_iv_candidate_for_groups (data
);
5313 set_autoinc_for_original_candidates (data
);
5315 /* Record the important candidates. */
5316 record_important_candidates (data
);
5318 /* Relate compare iv_use with all candidates. */
5319 if (!data
->consider_all_candidates
)
5320 relate_compare_use_with_all_cands (data
);
5322 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5326 fprintf (dump_file
, "\n<Important Candidates>:\t");
5327 for (i
= 0; i
< data
->vcands
.length (); i
++)
5328 if (data
->vcands
[i
]->important
)
5329 fprintf (dump_file
, " %d,", data
->vcands
[i
]->id
);
5330 fprintf (dump_file
, "\n");
5332 fprintf (dump_file
, "\n<Group, Cand> Related:\n");
5333 for (i
= 0; i
< data
->vgroups
.length (); i
++)
5335 struct iv_group
*group
= data
->vgroups
[i
];
5337 if (group
->related_cands
)
5339 fprintf (dump_file
, " Group %d:\t", group
->id
);
5340 dump_bitmap (dump_file
, group
->related_cands
);
5343 fprintf (dump_file
, "\n");
5347 /* Determines costs of computing use of iv with an iv candidate. */
5350 determine_group_iv_costs (struct ivopts_data
*data
)
5353 struct iv_cand
*cand
;
5354 struct iv_group
*group
;
5355 bitmap to_clear
= BITMAP_ALLOC (NULL
);
5357 alloc_use_cost_map (data
);
5359 for (i
= 0; i
< data
->vgroups
.length (); i
++)
5361 group
= data
->vgroups
[i
];
5363 if (data
->consider_all_candidates
)
5365 for (j
= 0; j
< data
->vcands
.length (); j
++)
5367 cand
= data
->vcands
[j
];
5368 determine_group_iv_cost (data
, group
, cand
);
5375 EXECUTE_IF_SET_IN_BITMAP (group
->related_cands
, 0, j
, bi
)
5377 cand
= data
->vcands
[j
];
5378 if (!determine_group_iv_cost (data
, group
, cand
))
5379 bitmap_set_bit (to_clear
, j
);
5382 /* Remove the candidates for that the cost is infinite from
5383 the list of related candidates. */
5384 bitmap_and_compl_into (group
->related_cands
, to_clear
);
5385 bitmap_clear (to_clear
);
5389 BITMAP_FREE (to_clear
);
5391 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5395 /* Dump invariant variables. */
5396 fprintf (dump_file
, "\n<Invariant Vars>:\n");
5397 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
5399 struct version_info
*info
= ver_info (data
, i
);
5402 fprintf (dump_file
, "Inv %d:\t", info
->inv_id
);
5403 print_generic_expr (dump_file
, info
->name
, TDF_SLIM
);
5404 fprintf (dump_file
, "%s\n",
5405 info
->has_nonlin_use
? "" : "\t(eliminable)");
5409 /* Dump invariant expressions. */
5410 fprintf (dump_file
, "\n<Invariant Expressions>:\n");
5411 auto_vec
<iv_inv_expr_ent
*> list (data
->inv_expr_tab
->elements ());
5413 for (hash_table
<iv_inv_expr_hasher
>::iterator it
5414 = data
->inv_expr_tab
->begin (); it
!= data
->inv_expr_tab
->end ();
5416 list
.safe_push (*it
);
5418 list
.qsort (sort_iv_inv_expr_ent
);
5420 for (i
= 0; i
< list
.length (); ++i
)
5422 fprintf (dump_file
, "inv_expr %d: \t", list
[i
]->id
);
5423 print_generic_expr (dump_file
, list
[i
]->expr
, TDF_SLIM
);
5424 fprintf (dump_file
, "\n");
5427 fprintf (dump_file
, "\n<Group-candidate Costs>:\n");
5429 for (i
= 0; i
< data
->vgroups
.length (); i
++)
5431 group
= data
->vgroups
[i
];
5433 fprintf (dump_file
, "Group %d:\n", i
);
5434 fprintf (dump_file
, " cand\tcost\tcompl.\tinv.expr.\tinv.vars\n");
5435 for (j
= 0; j
< group
->n_map_members
; j
++)
5437 if (!group
->cost_map
[j
].cand
5438 || group
->cost_map
[j
].cost
.infinite_cost_p ())
5441 fprintf (dump_file
, " %d\t%d\t%d\t",
5442 group
->cost_map
[j
].cand
->id
,
5443 group
->cost_map
[j
].cost
.cost
,
5444 group
->cost_map
[j
].cost
.complexity
);
5445 if (!group
->cost_map
[j
].inv_exprs
5446 || bitmap_empty_p (group
->cost_map
[j
].inv_exprs
))
5447 fprintf (dump_file
, "NIL;\t");
5449 bitmap_print (dump_file
,
5450 group
->cost_map
[j
].inv_exprs
, "", ";\t");
5451 if (!group
->cost_map
[j
].inv_vars
5452 || bitmap_empty_p (group
->cost_map
[j
].inv_vars
))
5453 fprintf (dump_file
, "NIL;\n");
5455 bitmap_print (dump_file
,
5456 group
->cost_map
[j
].inv_vars
, "", "\n");
5459 fprintf (dump_file
, "\n");
5461 fprintf (dump_file
, "\n");
5465 /* Determines cost of the candidate CAND. */
5468 determine_iv_cost (struct ivopts_data
*data
, struct iv_cand
*cand
)
5470 comp_cost cost_base
;
5471 unsigned cost
, cost_step
;
5474 gcc_assert (cand
->iv
!= NULL
);
5476 /* There are two costs associated with the candidate -- its increment
5477 and its initialization. The second is almost negligible for any loop
5478 that rolls enough, so we take it just very little into account. */
5480 base
= cand
->iv
->base
;
5481 cost_base
= force_var_cost (data
, base
, NULL
);
5482 /* It will be exceptional that the iv register happens to be initialized with
5483 the proper value at no cost. In general, there will at least be a regcopy
5485 if (cost_base
.cost
== 0)
5486 cost_base
.cost
= COSTS_N_INSNS (1);
5487 cost_step
= add_cost (data
->speed
, TYPE_MODE (TREE_TYPE (base
)));
5489 cost
= cost_step
+ adjust_setup_cost (data
, cost_base
.cost
);
5491 /* Prefer the original ivs unless we may gain something by replacing it.
5492 The reason is to make debugging simpler; so this is not relevant for
5493 artificial ivs created by other optimization passes. */
5494 if (cand
->pos
!= IP_ORIGINAL
5495 || !SSA_NAME_VAR (cand
->var_before
)
5496 || DECL_ARTIFICIAL (SSA_NAME_VAR (cand
->var_before
)))
5499 /* Prefer not to insert statements into latch unless there are some
5500 already (so that we do not create unnecessary jumps). */
5501 if (cand
->pos
== IP_END
5502 && empty_block_p (ip_end_pos (data
->current_loop
)))
5506 cand
->cost_step
= cost_step
;
5509 /* Determines costs of computation of the candidates. */
5512 determine_iv_costs (struct ivopts_data
*data
)
5516 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5518 fprintf (dump_file
, "<Candidate Costs>:\n");
5519 fprintf (dump_file
, " cand\tcost\n");
5522 for (i
= 0; i
< data
->vcands
.length (); i
++)
5524 struct iv_cand
*cand
= data
->vcands
[i
];
5526 determine_iv_cost (data
, cand
);
5528 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5529 fprintf (dump_file
, " %d\t%d\n", i
, cand
->cost
);
5532 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5533 fprintf (dump_file
, "\n");
5536 /* Estimate register pressure for loop having N_INVS invariants and N_CANDS
5537 induction variables. Note N_INVS includes both invariant variables and
5538 invariant expressions. */
5541 ivopts_estimate_reg_pressure (struct ivopts_data
*data
, unsigned n_invs
,
5545 unsigned n_old
= data
->regs_used
, n_new
= n_invs
+ n_cands
;
5546 unsigned regs_needed
= n_new
+ n_old
, available_regs
= target_avail_regs
;
5547 bool speed
= data
->speed
;
5549 /* If there is a call in the loop body, the call-clobbered registers
5550 are not available for loop invariants. */
5551 if (data
->body_includes_call
)
5552 available_regs
= available_regs
- target_clobbered_regs
;
5554 /* If we have enough registers. */
5555 if (regs_needed
+ target_res_regs
< available_regs
)
5557 /* If close to running out of registers, try to preserve them. */
5558 else if (regs_needed
<= available_regs
)
5559 cost
= target_reg_cost
[speed
] * regs_needed
;
5560 /* If we run out of available registers but the number of candidates
5561 does not, we penalize extra registers using target_spill_cost. */
5562 else if (n_cands
<= available_regs
)
5563 cost
= target_reg_cost
[speed
] * available_regs
5564 + target_spill_cost
[speed
] * (regs_needed
- available_regs
);
5565 /* If the number of candidates runs out available registers, we penalize
5566 extra candidate registers using target_spill_cost * 2. Because it is
5567 more expensive to spill induction variable than invariant. */
5569 cost
= target_reg_cost
[speed
] * available_regs
5570 + target_spill_cost
[speed
] * (n_cands
- available_regs
) * 2
5571 + target_spill_cost
[speed
] * (regs_needed
- n_cands
);
5573 /* Finally, add the number of candidates, so that we prefer eliminating
5574 induction variables if possible. */
5575 return cost
+ n_cands
;
5578 /* For each size of the induction variable set determine the penalty. */
5581 determine_set_costs (struct ivopts_data
*data
)
5587 struct loop
*loop
= data
->current_loop
;
5590 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5592 fprintf (dump_file
, "<Global Costs>:\n");
5593 fprintf (dump_file
, " target_avail_regs %d\n", target_avail_regs
);
5594 fprintf (dump_file
, " target_clobbered_regs %d\n", target_clobbered_regs
);
5595 fprintf (dump_file
, " target_reg_cost %d\n", target_reg_cost
[data
->speed
]);
5596 fprintf (dump_file
, " target_spill_cost %d\n", target_spill_cost
[data
->speed
]);
5600 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
5603 op
= PHI_RESULT (phi
);
5605 if (virtual_operand_p (op
))
5608 if (get_iv (data
, op
))
5611 if (!POINTER_TYPE_P (TREE_TYPE (op
))
5612 && !INTEGRAL_TYPE_P (TREE_TYPE (op
)))
5618 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
5620 struct version_info
*info
= ver_info (data
, j
);
5622 if (info
->inv_id
&& info
->has_nonlin_use
)
5626 data
->regs_used
= n
;
5627 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5628 fprintf (dump_file
, " regs_used %d\n", n
);
5630 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5632 fprintf (dump_file
, " cost for size:\n");
5633 fprintf (dump_file
, " ivs\tcost\n");
5634 for (j
= 0; j
<= 2 * target_avail_regs
; j
++)
5635 fprintf (dump_file
, " %d\t%d\n", j
,
5636 ivopts_estimate_reg_pressure (data
, 0, j
));
5637 fprintf (dump_file
, "\n");
5641 /* Returns true if A is a cheaper cost pair than B. */
5644 cheaper_cost_pair (struct cost_pair
*a
, struct cost_pair
*b
)
5652 if (a
->cost
< b
->cost
)
5655 if (b
->cost
< a
->cost
)
5658 /* In case the costs are the same, prefer the cheaper candidate. */
5659 if (a
->cand
->cost
< b
->cand
->cost
)
5665 /* Compare if A is a more expensive cost pair than B. Return 1, 0 and -1
5666 for more expensive, equal and cheaper respectively. */
5669 compare_cost_pair (struct cost_pair
*a
, struct cost_pair
*b
)
5671 if (cheaper_cost_pair (a
, b
))
5673 if (cheaper_cost_pair (b
, a
))
5679 /* Returns candidate by that USE is expressed in IVS. */
5681 static struct cost_pair
*
5682 iv_ca_cand_for_group (struct iv_ca
*ivs
, struct iv_group
*group
)
5684 return ivs
->cand_for_group
[group
->id
];
5687 /* Computes the cost field of IVS structure. */
5690 iv_ca_recount_cost (struct ivopts_data
*data
, struct iv_ca
*ivs
)
5692 comp_cost cost
= ivs
->cand_use_cost
;
5694 cost
+= ivs
->cand_cost
;
5695 cost
+= ivopts_estimate_reg_pressure (data
, ivs
->n_invs
, ivs
->n_cands
);
5699 /* Remove use of invariants in set INVS by decreasing counter in N_INV_USES
5703 iv_ca_set_remove_invs (struct iv_ca
*ivs
, bitmap invs
, unsigned *n_inv_uses
)
5711 gcc_assert (n_inv_uses
!= NULL
);
5712 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
5715 if (n_inv_uses
[iid
] == 0)
5720 /* Set USE not to be expressed by any candidate in IVS. */
5723 iv_ca_set_no_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5724 struct iv_group
*group
)
5726 unsigned gid
= group
->id
, cid
;
5727 struct cost_pair
*cp
;
5729 cp
= ivs
->cand_for_group
[gid
];
5735 ivs
->cand_for_group
[gid
] = NULL
;
5736 ivs
->n_cand_uses
[cid
]--;
5738 if (ivs
->n_cand_uses
[cid
] == 0)
5740 bitmap_clear_bit (ivs
->cands
, cid
);
5742 ivs
->cand_cost
-= cp
->cand
->cost
;
5743 iv_ca_set_remove_invs (ivs
, cp
->cand
->inv_vars
, ivs
->n_inv_var_uses
);
5744 iv_ca_set_remove_invs (ivs
, cp
->cand
->inv_exprs
, ivs
->n_inv_expr_uses
);
5747 ivs
->cand_use_cost
-= cp
->cost
;
5748 iv_ca_set_remove_invs (ivs
, cp
->inv_vars
, ivs
->n_inv_var_uses
);
5749 iv_ca_set_remove_invs (ivs
, cp
->inv_exprs
, ivs
->n_inv_expr_uses
);
5750 iv_ca_recount_cost (data
, ivs
);
5753 /* Add use of invariants in set INVS by increasing counter in N_INV_USES and
5757 iv_ca_set_add_invs (struct iv_ca
*ivs
, bitmap invs
, unsigned *n_inv_uses
)
5765 gcc_assert (n_inv_uses
!= NULL
);
5766 EXECUTE_IF_SET_IN_BITMAP (invs
, 0, iid
, bi
)
5769 if (n_inv_uses
[iid
] == 1)
5774 /* Set cost pair for GROUP in set IVS to CP. */
5777 iv_ca_set_cp (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5778 struct iv_group
*group
, struct cost_pair
*cp
)
5780 unsigned gid
= group
->id
, cid
;
5782 if (ivs
->cand_for_group
[gid
] == cp
)
5785 if (ivs
->cand_for_group
[gid
])
5786 iv_ca_set_no_cp (data
, ivs
, group
);
5793 ivs
->cand_for_group
[gid
] = cp
;
5794 ivs
->n_cand_uses
[cid
]++;
5795 if (ivs
->n_cand_uses
[cid
] == 1)
5797 bitmap_set_bit (ivs
->cands
, cid
);
5799 ivs
->cand_cost
+= cp
->cand
->cost
;
5800 iv_ca_set_add_invs (ivs
, cp
->cand
->inv_vars
, ivs
->n_inv_var_uses
);
5801 iv_ca_set_add_invs (ivs
, cp
->cand
->inv_exprs
, ivs
->n_inv_expr_uses
);
5804 ivs
->cand_use_cost
+= cp
->cost
;
5805 iv_ca_set_add_invs (ivs
, cp
->inv_vars
, ivs
->n_inv_var_uses
);
5806 iv_ca_set_add_invs (ivs
, cp
->inv_exprs
, ivs
->n_inv_expr_uses
);
5807 iv_ca_recount_cost (data
, ivs
);
5811 /* Extend set IVS by expressing USE by some of the candidates in it
5812 if possible. Consider all important candidates if candidates in
5813 set IVS don't give any result. */
5816 iv_ca_add_group (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5817 struct iv_group
*group
)
5819 struct cost_pair
*best_cp
= NULL
, *cp
;
5822 struct iv_cand
*cand
;
5824 gcc_assert (ivs
->upto
>= group
->id
);
5828 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
5830 cand
= data
->vcands
[i
];
5831 cp
= get_group_iv_cost (data
, group
, cand
);
5832 if (cheaper_cost_pair (cp
, best_cp
))
5836 if (best_cp
== NULL
)
5838 EXECUTE_IF_SET_IN_BITMAP (data
->important_candidates
, 0, i
, bi
)
5840 cand
= data
->vcands
[i
];
5841 cp
= get_group_iv_cost (data
, group
, cand
);
5842 if (cheaper_cost_pair (cp
, best_cp
))
5847 iv_ca_set_cp (data
, ivs
, group
, best_cp
);
5850 /* Get cost for assignment IVS. */
5853 iv_ca_cost (struct iv_ca
*ivs
)
5855 /* This was a conditional expression but it triggered a bug in
5857 if (ivs
->bad_groups
)
5858 return infinite_cost
;
5863 /* Compare if applying NEW_CP to GROUP for IVS introduces more invariants
5864 than OLD_CP. Return 1, 0 and -1 for more, equal and fewer invariants
5868 iv_ca_compare_deps (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5869 struct iv_group
*group
, struct cost_pair
*old_cp
,
5870 struct cost_pair
*new_cp
)
5872 gcc_assert (old_cp
&& new_cp
&& old_cp
!= new_cp
);
5873 unsigned old_n_invs
= ivs
->n_invs
;
5874 iv_ca_set_cp (data
, ivs
, group
, new_cp
);
5875 unsigned new_n_invs
= ivs
->n_invs
;
5876 iv_ca_set_cp (data
, ivs
, group
, old_cp
);
5878 return new_n_invs
> old_n_invs
? 1 : (new_n_invs
< old_n_invs
? -1 : 0);
5881 /* Creates change of expressing GROUP by NEW_CP instead of OLD_CP and chains
5884 static struct iv_ca_delta
*
5885 iv_ca_delta_add (struct iv_group
*group
, struct cost_pair
*old_cp
,
5886 struct cost_pair
*new_cp
, struct iv_ca_delta
*next
)
5888 struct iv_ca_delta
*change
= XNEW (struct iv_ca_delta
);
5890 change
->group
= group
;
5891 change
->old_cp
= old_cp
;
5892 change
->new_cp
= new_cp
;
5893 change
->next
= next
;
5898 /* Joins two lists of changes L1 and L2. Destructive -- old lists
5901 static struct iv_ca_delta
*
5902 iv_ca_delta_join (struct iv_ca_delta
*l1
, struct iv_ca_delta
*l2
)
5904 struct iv_ca_delta
*last
;
5912 for (last
= l1
; last
->next
; last
= last
->next
)
5919 /* Reverse the list of changes DELTA, forming the inverse to it. */
5921 static struct iv_ca_delta
*
5922 iv_ca_delta_reverse (struct iv_ca_delta
*delta
)
5924 struct iv_ca_delta
*act
, *next
, *prev
= NULL
;
5926 for (act
= delta
; act
; act
= next
)
5932 std::swap (act
->old_cp
, act
->new_cp
);
5938 /* Commit changes in DELTA to IVS. If FORWARD is false, the changes are
5939 reverted instead. */
5942 iv_ca_delta_commit (struct ivopts_data
*data
, struct iv_ca
*ivs
,
5943 struct iv_ca_delta
*delta
, bool forward
)
5945 struct cost_pair
*from
, *to
;
5946 struct iv_ca_delta
*act
;
5949 delta
= iv_ca_delta_reverse (delta
);
5951 for (act
= delta
; act
; act
= act
->next
)
5955 gcc_assert (iv_ca_cand_for_group (ivs
, act
->group
) == from
);
5956 iv_ca_set_cp (data
, ivs
, act
->group
, to
);
5960 iv_ca_delta_reverse (delta
);
5963 /* Returns true if CAND is used in IVS. */
5966 iv_ca_cand_used_p (struct iv_ca
*ivs
, struct iv_cand
*cand
)
5968 return ivs
->n_cand_uses
[cand
->id
] > 0;
5971 /* Returns number of induction variable candidates in the set IVS. */
5974 iv_ca_n_cands (struct iv_ca
*ivs
)
5976 return ivs
->n_cands
;
5979 /* Free the list of changes DELTA. */
5982 iv_ca_delta_free (struct iv_ca_delta
**delta
)
5984 struct iv_ca_delta
*act
, *next
;
5986 for (act
= *delta
; act
; act
= next
)
5995 /* Allocates new iv candidates assignment. */
5997 static struct iv_ca
*
5998 iv_ca_new (struct ivopts_data
*data
)
6000 struct iv_ca
*nw
= XNEW (struct iv_ca
);
6004 nw
->cand_for_group
= XCNEWVEC (struct cost_pair
*,
6005 data
->vgroups
.length ());
6006 nw
->n_cand_uses
= XCNEWVEC (unsigned, data
->vcands
.length ());
6007 nw
->cands
= BITMAP_ALLOC (NULL
);
6010 nw
->cand_use_cost
= no_cost
;
6012 nw
->n_inv_var_uses
= XCNEWVEC (unsigned, data
->max_inv_var_id
+ 1);
6013 nw
->n_inv_expr_uses
= XCNEWVEC (unsigned, data
->max_inv_expr_id
+ 1);
6019 /* Free memory occupied by the set IVS. */
6022 iv_ca_free (struct iv_ca
**ivs
)
6024 free ((*ivs
)->cand_for_group
);
6025 free ((*ivs
)->n_cand_uses
);
6026 BITMAP_FREE ((*ivs
)->cands
);
6027 free ((*ivs
)->n_inv_var_uses
);
6028 free ((*ivs
)->n_inv_expr_uses
);
6033 /* Dumps IVS to FILE. */
6036 iv_ca_dump (struct ivopts_data
*data
, FILE *file
, struct iv_ca
*ivs
)
6039 comp_cost cost
= iv_ca_cost (ivs
);
6041 fprintf (file
, " cost: %d (complexity %d)\n", cost
.cost
,
6043 fprintf (file
, " cand_cost: %d\n cand_group_cost: %d (complexity %d)\n",
6044 ivs
->cand_cost
, ivs
->cand_use_cost
.cost
,
6045 ivs
->cand_use_cost
.complexity
);
6046 bitmap_print (file
, ivs
->cands
, " candidates: ","\n");
6048 for (i
= 0; i
< ivs
->upto
; i
++)
6050 struct iv_group
*group
= data
->vgroups
[i
];
6051 struct cost_pair
*cp
= iv_ca_cand_for_group (ivs
, group
);
6053 fprintf (file
, " group:%d --> iv_cand:%d, cost=(%d,%d)\n",
6054 group
->id
, cp
->cand
->id
, cp
->cost
.cost
,
6055 cp
->cost
.complexity
);
6057 fprintf (file
, " group:%d --> ??\n", group
->id
);
6060 const char *pref
= "";
6061 fprintf (file
, " invariant variables: ");
6062 for (i
= 1; i
<= data
->max_inv_var_id
; i
++)
6063 if (ivs
->n_inv_var_uses
[i
])
6065 fprintf (file
, "%s%d", pref
, i
);
6070 fprintf (file
, "\n invariant expressions: ");
6071 for (i
= 1; i
<= data
->max_inv_expr_id
; i
++)
6072 if (ivs
->n_inv_expr_uses
[i
])
6074 fprintf (file
, "%s%d", pref
, i
);
6078 fprintf (file
, "\n\n");
6081 /* Try changing candidate in IVS to CAND for each use. Return cost of the
6082 new set, and store differences in DELTA. Number of induction variables
6083 in the new set is stored to N_IVS. MIN_NCAND is a flag. When it is true
6084 the function will try to find a solution with mimimal iv candidates. */
6087 iv_ca_extend (struct ivopts_data
*data
, struct iv_ca
*ivs
,
6088 struct iv_cand
*cand
, struct iv_ca_delta
**delta
,
6089 unsigned *n_ivs
, bool min_ncand
)
6093 struct iv_group
*group
;
6094 struct cost_pair
*old_cp
, *new_cp
;
6097 for (i
= 0; i
< ivs
->upto
; i
++)
6099 group
= data
->vgroups
[i
];
6100 old_cp
= iv_ca_cand_for_group (ivs
, group
);
6103 && old_cp
->cand
== cand
)
6106 new_cp
= get_group_iv_cost (data
, group
, cand
);
6112 int cmp_invs
= iv_ca_compare_deps (data
, ivs
, group
, old_cp
, new_cp
);
6113 /* Skip if new_cp depends on more invariants. */
6117 int cmp_cost
= compare_cost_pair (new_cp
, old_cp
);
6118 /* Skip if new_cp is not cheaper. */
6119 if (cmp_cost
> 0 || (cmp_cost
== 0 && cmp_invs
== 0))
6123 *delta
= iv_ca_delta_add (group
, old_cp
, new_cp
, *delta
);
6126 iv_ca_delta_commit (data
, ivs
, *delta
, true);
6127 cost
= iv_ca_cost (ivs
);
6129 *n_ivs
= iv_ca_n_cands (ivs
);
6130 iv_ca_delta_commit (data
, ivs
, *delta
, false);
6135 /* Try narrowing set IVS by removing CAND. Return the cost of
6136 the new set and store the differences in DELTA. START is
6137 the candidate with which we start narrowing. */
6140 iv_ca_narrow (struct ivopts_data
*data
, struct iv_ca
*ivs
,
6141 struct iv_cand
*cand
, struct iv_cand
*start
,
6142 struct iv_ca_delta
**delta
)
6145 struct iv_group
*group
;
6146 struct cost_pair
*old_cp
, *new_cp
, *cp
;
6148 struct iv_cand
*cnd
;
6149 comp_cost cost
, best_cost
, acost
;
6152 for (i
= 0; i
< data
->vgroups
.length (); i
++)
6154 group
= data
->vgroups
[i
];
6156 old_cp
= iv_ca_cand_for_group (ivs
, group
);
6157 if (old_cp
->cand
!= cand
)
6160 best_cost
= iv_ca_cost (ivs
);
6161 /* Start narrowing with START. */
6162 new_cp
= get_group_iv_cost (data
, group
, start
);
6164 if (data
->consider_all_candidates
)
6166 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, ci
, bi
)
6168 if (ci
== cand
->id
|| (start
&& ci
== start
->id
))
6171 cnd
= data
->vcands
[ci
];
6173 cp
= get_group_iv_cost (data
, group
, cnd
);
6177 iv_ca_set_cp (data
, ivs
, group
, cp
);
6178 acost
= iv_ca_cost (ivs
);
6180 if (acost
< best_cost
)
6189 EXECUTE_IF_AND_IN_BITMAP (group
->related_cands
, ivs
->cands
, 0, ci
, bi
)
6191 if (ci
== cand
->id
|| (start
&& ci
== start
->id
))
6194 cnd
= data
->vcands
[ci
];
6196 cp
= get_group_iv_cost (data
, group
, cnd
);
6200 iv_ca_set_cp (data
, ivs
, group
, cp
);
6201 acost
= iv_ca_cost (ivs
);
6203 if (acost
< best_cost
)
6210 /* Restore to old cp for use. */
6211 iv_ca_set_cp (data
, ivs
, group
, old_cp
);
6215 iv_ca_delta_free (delta
);
6216 return infinite_cost
;
6219 *delta
= iv_ca_delta_add (group
, old_cp
, new_cp
, *delta
);
6222 iv_ca_delta_commit (data
, ivs
, *delta
, true);
6223 cost
= iv_ca_cost (ivs
);
6224 iv_ca_delta_commit (data
, ivs
, *delta
, false);
6229 /* Try optimizing the set of candidates IVS by removing candidates different
6230 from to EXCEPT_CAND from it. Return cost of the new set, and store
6231 differences in DELTA. */
6234 iv_ca_prune (struct ivopts_data
*data
, struct iv_ca
*ivs
,
6235 struct iv_cand
*except_cand
, struct iv_ca_delta
**delta
)
6238 struct iv_ca_delta
*act_delta
, *best_delta
;
6240 comp_cost best_cost
, acost
;
6241 struct iv_cand
*cand
;
6244 best_cost
= iv_ca_cost (ivs
);
6246 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
6248 cand
= data
->vcands
[i
];
6250 if (cand
== except_cand
)
6253 acost
= iv_ca_narrow (data
, ivs
, cand
, except_cand
, &act_delta
);
6255 if (acost
< best_cost
)
6258 iv_ca_delta_free (&best_delta
);
6259 best_delta
= act_delta
;
6262 iv_ca_delta_free (&act_delta
);
6271 /* Recurse to possibly remove other unnecessary ivs. */
6272 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
6273 best_cost
= iv_ca_prune (data
, ivs
, except_cand
, delta
);
6274 iv_ca_delta_commit (data
, ivs
, best_delta
, false);
6275 *delta
= iv_ca_delta_join (best_delta
, *delta
);
6279 /* Check if CAND_IDX is a candidate other than OLD_CAND and has
6280 cheaper local cost for GROUP than BEST_CP. Return pointer to
6281 the corresponding cost_pair, otherwise just return BEST_CP. */
6283 static struct cost_pair
*
6284 cheaper_cost_with_cand (struct ivopts_data
*data
, struct iv_group
*group
,
6285 unsigned int cand_idx
, struct iv_cand
*old_cand
,
6286 struct cost_pair
*best_cp
)
6288 struct iv_cand
*cand
;
6289 struct cost_pair
*cp
;
6291 gcc_assert (old_cand
!= NULL
&& best_cp
!= NULL
);
6292 if (cand_idx
== old_cand
->id
)
6295 cand
= data
->vcands
[cand_idx
];
6296 cp
= get_group_iv_cost (data
, group
, cand
);
6297 if (cp
!= NULL
&& cheaper_cost_pair (cp
, best_cp
))
6303 /* Try breaking local optimal fixed-point for IVS by replacing candidates
6304 which are used by more than one iv uses. For each of those candidates,
6305 this function tries to represent iv uses under that candidate using
6306 other ones with lower local cost, then tries to prune the new set.
6307 If the new set has lower cost, It returns the new cost after recording
6308 candidate replacement in list DELTA. */
6311 iv_ca_replace (struct ivopts_data
*data
, struct iv_ca
*ivs
,
6312 struct iv_ca_delta
**delta
)
6314 bitmap_iterator bi
, bj
;
6315 unsigned int i
, j
, k
;
6316 struct iv_cand
*cand
;
6317 comp_cost orig_cost
, acost
;
6318 struct iv_ca_delta
*act_delta
, *tmp_delta
;
6319 struct cost_pair
*old_cp
, *best_cp
= NULL
;
6322 orig_cost
= iv_ca_cost (ivs
);
6324 EXECUTE_IF_SET_IN_BITMAP (ivs
->cands
, 0, i
, bi
)
6326 if (ivs
->n_cand_uses
[i
] == 1
6327 || ivs
->n_cand_uses
[i
] > ALWAYS_PRUNE_CAND_SET_BOUND
)
6330 cand
= data
->vcands
[i
];
6333 /* Represent uses under current candidate using other ones with
6334 lower local cost. */
6335 for (j
= 0; j
< ivs
->upto
; j
++)
6337 struct iv_group
*group
= data
->vgroups
[j
];
6338 old_cp
= iv_ca_cand_for_group (ivs
, group
);
6340 if (old_cp
->cand
!= cand
)
6344 if (data
->consider_all_candidates
)
6345 for (k
= 0; k
< data
->vcands
.length (); k
++)
6346 best_cp
= cheaper_cost_with_cand (data
, group
, k
,
6347 old_cp
->cand
, best_cp
);
6349 EXECUTE_IF_SET_IN_BITMAP (group
->related_cands
, 0, k
, bj
)
6350 best_cp
= cheaper_cost_with_cand (data
, group
, k
,
6351 old_cp
->cand
, best_cp
);
6353 if (best_cp
== old_cp
)
6356 act_delta
= iv_ca_delta_add (group
, old_cp
, best_cp
, act_delta
);
6358 /* No need for further prune. */
6362 /* Prune the new candidate set. */
6363 iv_ca_delta_commit (data
, ivs
, act_delta
, true);
6364 acost
= iv_ca_prune (data
, ivs
, NULL
, &tmp_delta
);
6365 iv_ca_delta_commit (data
, ivs
, act_delta
, false);
6366 act_delta
= iv_ca_delta_join (act_delta
, tmp_delta
);
6368 if (acost
< orig_cost
)
6374 iv_ca_delta_free (&act_delta
);
6380 /* Tries to extend the sets IVS in the best possible way in order to
6381 express the GROUP. If ORIGINALP is true, prefer candidates from
6382 the original set of IVs, otherwise favor important candidates not
6383 based on any memory object. */
6386 try_add_cand_for (struct ivopts_data
*data
, struct iv_ca
*ivs
,
6387 struct iv_group
*group
, bool originalp
)
6389 comp_cost best_cost
, act_cost
;
6392 struct iv_cand
*cand
;
6393 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
;
6394 struct cost_pair
*cp
;
6396 iv_ca_add_group (data
, ivs
, group
);
6397 best_cost
= iv_ca_cost (ivs
);
6398 cp
= iv_ca_cand_for_group (ivs
, group
);
6401 best_delta
= iv_ca_delta_add (group
, NULL
, cp
, NULL
);
6402 iv_ca_set_no_cp (data
, ivs
, group
);
6405 /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
6406 first try important candidates not based on any memory object. Only if
6407 this fails, try the specific ones. Rationale -- in loops with many
6408 variables the best choice often is to use just one generic biv. If we
6409 added here many ivs specific to the uses, the optimization algorithm later
6410 would be likely to get stuck in a local minimum, thus causing us to create
6411 too many ivs. The approach from few ivs to more seems more likely to be
6412 successful -- starting from few ivs, replacing an expensive use by a
6413 specific iv should always be a win. */
6414 EXECUTE_IF_SET_IN_BITMAP (group
->related_cands
, 0, i
, bi
)
6416 cand
= data
->vcands
[i
];
6418 if (originalp
&& cand
->pos
!=IP_ORIGINAL
)
6421 if (!originalp
&& cand
->iv
->base_object
!= NULL_TREE
)
6424 if (iv_ca_cand_used_p (ivs
, cand
))
6427 cp
= get_group_iv_cost (data
, group
, cand
);
6431 iv_ca_set_cp (data
, ivs
, group
, cp
);
6432 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
,
6434 iv_ca_set_no_cp (data
, ivs
, group
);
6435 act_delta
= iv_ca_delta_add (group
, NULL
, cp
, act_delta
);
6437 if (act_cost
< best_cost
)
6439 best_cost
= act_cost
;
6441 iv_ca_delta_free (&best_delta
);
6442 best_delta
= act_delta
;
6445 iv_ca_delta_free (&act_delta
);
6448 if (best_cost
.infinite_cost_p ())
6450 for (i
= 0; i
< group
->n_map_members
; i
++)
6452 cp
= group
->cost_map
+ i
;
6457 /* Already tried this. */
6458 if (cand
->important
)
6460 if (originalp
&& cand
->pos
== IP_ORIGINAL
)
6462 if (!originalp
&& cand
->iv
->base_object
== NULL_TREE
)
6466 if (iv_ca_cand_used_p (ivs
, cand
))
6470 iv_ca_set_cp (data
, ivs
, group
, cp
);
6471 act_cost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, NULL
, true);
6472 iv_ca_set_no_cp (data
, ivs
, group
);
6473 act_delta
= iv_ca_delta_add (group
,
6474 iv_ca_cand_for_group (ivs
, group
),
6477 if (act_cost
< best_cost
)
6479 best_cost
= act_cost
;
6482 iv_ca_delta_free (&best_delta
);
6483 best_delta
= act_delta
;
6486 iv_ca_delta_free (&act_delta
);
6490 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
6491 iv_ca_delta_free (&best_delta
);
6493 return !best_cost
.infinite_cost_p ();
6496 /* Finds an initial assignment of candidates to uses. */
6498 static struct iv_ca
*
6499 get_initial_solution (struct ivopts_data
*data
, bool originalp
)
6502 struct iv_ca
*ivs
= iv_ca_new (data
);
6504 for (i
= 0; i
< data
->vgroups
.length (); i
++)
6505 if (!try_add_cand_for (data
, ivs
, data
->vgroups
[i
], originalp
))
6514 /* Tries to improve set of induction variables IVS. TRY_REPLACE_P
6515 points to a bool variable, this function tries to break local
6516 optimal fixed-point by replacing candidates in IVS if it's true. */
6519 try_improve_iv_set (struct ivopts_data
*data
,
6520 struct iv_ca
*ivs
, bool *try_replace_p
)
6523 comp_cost acost
, best_cost
= iv_ca_cost (ivs
);
6524 struct iv_ca_delta
*best_delta
= NULL
, *act_delta
, *tmp_delta
;
6525 struct iv_cand
*cand
;
6527 /* Try extending the set of induction variables by one. */
6528 for (i
= 0; i
< data
->vcands
.length (); i
++)
6530 cand
= data
->vcands
[i
];
6532 if (iv_ca_cand_used_p (ivs
, cand
))
6535 acost
= iv_ca_extend (data
, ivs
, cand
, &act_delta
, &n_ivs
, false);
6539 /* If we successfully added the candidate and the set is small enough,
6540 try optimizing it by removing other candidates. */
6541 if (n_ivs
<= ALWAYS_PRUNE_CAND_SET_BOUND
)
6543 iv_ca_delta_commit (data
, ivs
, act_delta
, true);
6544 acost
= iv_ca_prune (data
, ivs
, cand
, &tmp_delta
);
6545 iv_ca_delta_commit (data
, ivs
, act_delta
, false);
6546 act_delta
= iv_ca_delta_join (act_delta
, tmp_delta
);
6549 if (acost
< best_cost
)
6552 iv_ca_delta_free (&best_delta
);
6553 best_delta
= act_delta
;
6556 iv_ca_delta_free (&act_delta
);
6561 /* Try removing the candidates from the set instead. */
6562 best_cost
= iv_ca_prune (data
, ivs
, NULL
, &best_delta
);
6564 if (!best_delta
&& *try_replace_p
)
6566 *try_replace_p
= false;
6567 /* So far candidate selecting algorithm tends to choose fewer IVs
6568 so that it can handle cases in which loops have many variables
6569 but the best choice is often to use only one general biv. One
6570 weakness is it can't handle opposite cases, in which different
6571 candidates should be chosen with respect to each use. To solve
6572 the problem, we replace candidates in a manner described by the
6573 comments of iv_ca_replace, thus give general algorithm a chance
6574 to break local optimal fixed-point in these cases. */
6575 best_cost
= iv_ca_replace (data
, ivs
, &best_delta
);
6582 iv_ca_delta_commit (data
, ivs
, best_delta
, true);
6583 gcc_assert (best_cost
== iv_ca_cost (ivs
));
6584 iv_ca_delta_free (&best_delta
);
6588 /* Attempts to find the optimal set of induction variables. We do simple
6589 greedy heuristic -- we try to replace at most one candidate in the selected
6590 solution and remove the unused ivs while this improves the cost. */
6592 static struct iv_ca
*
6593 find_optimal_iv_set_1 (struct ivopts_data
*data
, bool originalp
)
6596 bool try_replace_p
= true;
6598 /* Get the initial solution. */
6599 set
= get_initial_solution (data
, originalp
);
6602 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6603 fprintf (dump_file
, "Unable to substitute for ivs, failed.\n");
6607 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6609 fprintf (dump_file
, "Initial set of candidates:\n");
6610 iv_ca_dump (data
, dump_file
, set
);
6613 while (try_improve_iv_set (data
, set
, &try_replace_p
))
6615 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6617 fprintf (dump_file
, "Improved to:\n");
6618 iv_ca_dump (data
, dump_file
, set
);
6625 static struct iv_ca
*
6626 find_optimal_iv_set (struct ivopts_data
*data
)
6629 comp_cost cost
, origcost
;
6630 struct iv_ca
*set
, *origset
;
6632 /* Determine the cost based on a strategy that starts with original IVs,
6633 and try again using a strategy that prefers candidates not based
6635 origset
= find_optimal_iv_set_1 (data
, true);
6636 set
= find_optimal_iv_set_1 (data
, false);
6638 if (!origset
&& !set
)
6641 origcost
= origset
? iv_ca_cost (origset
) : infinite_cost
;
6642 cost
= set
? iv_ca_cost (set
) : infinite_cost
;
6644 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6646 fprintf (dump_file
, "Original cost %d (complexity %d)\n\n",
6647 origcost
.cost
, origcost
.complexity
);
6648 fprintf (dump_file
, "Final cost %d (complexity %d)\n\n",
6649 cost
.cost
, cost
.complexity
);
6652 /* Choose the one with the best cost. */
6653 if (origcost
<= cost
)
6660 iv_ca_free (&origset
);
6662 for (i
= 0; i
< data
->vgroups
.length (); i
++)
6664 struct iv_group
*group
= data
->vgroups
[i
];
6665 group
->selected
= iv_ca_cand_for_group (set
, group
)->cand
;
6671 /* Creates a new induction variable corresponding to CAND. */
6674 create_new_iv (struct ivopts_data
*data
, struct iv_cand
*cand
)
6676 gimple_stmt_iterator incr_pos
;
6679 struct iv_group
*group
;
6682 gcc_assert (cand
->iv
!= NULL
);
6687 incr_pos
= gsi_last_bb (ip_normal_pos (data
->current_loop
));
6691 incr_pos
= gsi_last_bb (ip_end_pos (data
->current_loop
));
6699 incr_pos
= gsi_for_stmt (cand
->incremented_at
);
6703 /* Mark that the iv is preserved. */
6704 name_info (data
, cand
->var_before
)->preserve_biv
= true;
6705 name_info (data
, cand
->var_after
)->preserve_biv
= true;
6707 /* Rewrite the increment so that it uses var_before directly. */
6708 use
= find_interesting_uses_op (data
, cand
->var_after
);
6709 group
= data
->vgroups
[use
->group_id
];
6710 group
->selected
= cand
;
6714 gimple_add_tmp_var (cand
->var_before
);
6716 base
= unshare_expr (cand
->iv
->base
);
6718 create_iv (base
, unshare_expr (cand
->iv
->step
),
6719 cand
->var_before
, data
->current_loop
,
6720 &incr_pos
, after
, &cand
->var_before
, &cand
->var_after
);
6723 /* Creates new induction variables described in SET. */
6726 create_new_ivs (struct ivopts_data
*data
, struct iv_ca
*set
)
6729 struct iv_cand
*cand
;
6732 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
6734 cand
= data
->vcands
[i
];
6735 create_new_iv (data
, cand
);
6738 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6740 fprintf (dump_file
, "Selected IV set for loop %d",
6741 data
->current_loop
->num
);
6742 if (data
->loop_loc
!= UNKNOWN_LOCATION
)
6743 fprintf (dump_file
, " at %s:%d", LOCATION_FILE (data
->loop_loc
),
6744 LOCATION_LINE (data
->loop_loc
));
6745 fprintf (dump_file
, ", " HOST_WIDE_INT_PRINT_DEC
" avg niters",
6746 avg_loop_niter (data
->current_loop
));
6747 fprintf (dump_file
, ", %lu IVs:\n", bitmap_count_bits (set
->cands
));
6748 EXECUTE_IF_SET_IN_BITMAP (set
->cands
, 0, i
, bi
)
6750 cand
= data
->vcands
[i
];
6751 dump_cand (dump_file
, cand
);
6753 fprintf (dump_file
, "\n");
6757 /* Rewrites USE (definition of iv used in a nonlinear expression)
6758 using candidate CAND. */
6761 rewrite_use_nonlinear_expr (struct ivopts_data
*data
,
6762 struct iv_use
*use
, struct iv_cand
*cand
)
6765 gimple_stmt_iterator bsi
;
6766 tree comp
, type
= get_use_type (use
), tgt
;
6768 /* An important special case -- if we are asked to express value of
6769 the original iv by itself, just exit; there is no need to
6770 introduce a new computation (that might also need casting the
6771 variable to unsigned and back). */
6772 if (cand
->pos
== IP_ORIGINAL
6773 && cand
->incremented_at
== use
->stmt
)
6775 tree op
= NULL_TREE
;
6776 enum tree_code stmt_code
;
6778 gcc_assert (is_gimple_assign (use
->stmt
));
6779 gcc_assert (gimple_assign_lhs (use
->stmt
) == cand
->var_after
);
6781 /* Check whether we may leave the computation unchanged.
6782 This is the case only if it does not rely on other
6783 computations in the loop -- otherwise, the computation
6784 we rely upon may be removed in remove_unused_ivs,
6785 thus leading to ICE. */
6786 stmt_code
= gimple_assign_rhs_code (use
->stmt
);
6787 if (stmt_code
== PLUS_EXPR
6788 || stmt_code
== MINUS_EXPR
6789 || stmt_code
== POINTER_PLUS_EXPR
)
6791 if (gimple_assign_rhs1 (use
->stmt
) == cand
->var_before
)
6792 op
= gimple_assign_rhs2 (use
->stmt
);
6793 else if (gimple_assign_rhs2 (use
->stmt
) == cand
->var_before
)
6794 op
= gimple_assign_rhs1 (use
->stmt
);
6797 if (op
!= NULL_TREE
)
6799 if (expr_invariant_in_loop_p (data
->current_loop
, op
))
6801 if (TREE_CODE (op
) == SSA_NAME
)
6803 struct iv
*iv
= get_iv (data
, op
);
6804 if (iv
!= NULL
&& integer_zerop (iv
->step
))
6810 switch (gimple_code (use
->stmt
))
6813 tgt
= PHI_RESULT (use
->stmt
);
6815 /* If we should keep the biv, do not replace it. */
6816 if (name_info (data
, tgt
)->preserve_biv
)
6819 bsi
= gsi_after_labels (gimple_bb (use
->stmt
));
6823 tgt
= gimple_assign_lhs (use
->stmt
);
6824 bsi
= gsi_for_stmt (use
->stmt
);
6831 aff_tree aff_inv
, aff_var
;
6832 if (!get_computation_aff_1 (data
->current_loop
, use
->stmt
,
6833 use
, cand
, &aff_inv
, &aff_var
))
6836 unshare_aff_combination (&aff_inv
);
6837 unshare_aff_combination (&aff_var
);
6838 /* Prefer CSE opportunity than loop invariant by adding offset at last
6839 so that iv_uses have different offsets can be CSEed. */
6840 widest_int offset
= aff_inv
.offset
;
6843 gimple_seq stmt_list
= NULL
, seq
= NULL
;
6844 tree comp_op1
= aff_combination_to_tree (&aff_inv
);
6845 tree comp_op2
= aff_combination_to_tree (&aff_var
);
6846 gcc_assert (comp_op1
&& comp_op2
);
6848 comp_op1
= force_gimple_operand (comp_op1
, &seq
, true, NULL
);
6849 gimple_seq_add_seq (&stmt_list
, seq
);
6850 comp_op2
= force_gimple_operand (comp_op2
, &seq
, true, NULL
);
6851 gimple_seq_add_seq (&stmt_list
, seq
);
6853 if (POINTER_TYPE_P (TREE_TYPE (comp_op2
)))
6854 std::swap (comp_op1
, comp_op2
);
6856 if (POINTER_TYPE_P (TREE_TYPE (comp_op1
)))
6858 comp
= fold_build_pointer_plus (comp_op1
,
6859 fold_convert (sizetype
, comp_op2
));
6860 comp
= fold_build_pointer_plus (comp
,
6861 wide_int_to_tree (sizetype
, offset
));
6865 comp
= fold_build2 (PLUS_EXPR
, TREE_TYPE (comp_op1
), comp_op1
,
6866 fold_convert (TREE_TYPE (comp_op1
), comp_op2
));
6867 comp
= fold_build2 (PLUS_EXPR
, TREE_TYPE (comp_op1
), comp
,
6868 wide_int_to_tree (TREE_TYPE (comp_op1
), offset
));
6871 comp
= fold_convert (type
, comp
);
6872 if (!valid_gimple_rhs_p (comp
)
6873 || (gimple_code (use
->stmt
) != GIMPLE_PHI
6874 /* We can't allow re-allocating the stmt as it might be pointed
6876 && (get_gimple_rhs_num_ops (TREE_CODE (comp
))
6877 >= gimple_num_ops (gsi_stmt (bsi
)))))
6879 comp
= force_gimple_operand (comp
, &seq
, true, NULL
);
6880 gimple_seq_add_seq (&stmt_list
, seq
);
6881 if (POINTER_TYPE_P (TREE_TYPE (tgt
)))
6883 duplicate_ssa_name_ptr_info (comp
, SSA_NAME_PTR_INFO (tgt
));
6884 /* As this isn't a plain copy we have to reset alignment
6886 if (SSA_NAME_PTR_INFO (comp
))
6887 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (comp
));
6891 gsi_insert_seq_before (&bsi
, stmt_list
, GSI_SAME_STMT
);
6892 if (gimple_code (use
->stmt
) == GIMPLE_PHI
)
6894 ass
= gimple_build_assign (tgt
, comp
);
6895 gsi_insert_before (&bsi
, ass
, GSI_SAME_STMT
);
6897 bsi
= gsi_for_stmt (use
->stmt
);
6898 remove_phi_node (&bsi
, false);
6902 gimple_assign_set_rhs_from_tree (&bsi
, comp
);
6903 use
->stmt
= gsi_stmt (bsi
);
6907 /* Performs a peephole optimization to reorder the iv update statement with
6908 a mem ref to enable instruction combining in later phases. The mem ref uses
6909 the iv value before the update, so the reordering transformation requires
6910 adjustment of the offset. CAND is the selected IV_CAND.
6914 t = MEM_REF (base, iv1, 8, 16); // base, index, stride, offset
6922 directly propagating t over to (1) will introduce overlapping live range
6923 thus increase register pressure. This peephole transform it into:
6927 t = MEM_REF (base, iv2, 8, 8);
6934 adjust_iv_update_pos (struct iv_cand
*cand
, struct iv_use
*use
)
6937 gimple
*iv_update
, *stmt
;
6939 gimple_stmt_iterator gsi
, gsi_iv
;
6941 if (cand
->pos
!= IP_NORMAL
)
6944 var_after
= cand
->var_after
;
6945 iv_update
= SSA_NAME_DEF_STMT (var_after
);
6947 bb
= gimple_bb (iv_update
);
6948 gsi
= gsi_last_nondebug_bb (bb
);
6949 stmt
= gsi_stmt (gsi
);
6951 /* Only handle conditional statement for now. */
6952 if (gimple_code (stmt
) != GIMPLE_COND
)
6955 gsi_prev_nondebug (&gsi
);
6956 stmt
= gsi_stmt (gsi
);
6957 if (stmt
!= iv_update
)
6960 gsi_prev_nondebug (&gsi
);
6961 if (gsi_end_p (gsi
))
6964 stmt
= gsi_stmt (gsi
);
6965 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
6968 if (stmt
!= use
->stmt
)
6971 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
6974 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6976 fprintf (dump_file
, "Reordering \n");
6977 print_gimple_stmt (dump_file
, iv_update
, 0);
6978 print_gimple_stmt (dump_file
, use
->stmt
, 0);
6979 fprintf (dump_file
, "\n");
6982 gsi
= gsi_for_stmt (use
->stmt
);
6983 gsi_iv
= gsi_for_stmt (iv_update
);
6984 gsi_move_before (&gsi_iv
, &gsi
);
6986 cand
->pos
= IP_BEFORE_USE
;
6987 cand
->incremented_at
= use
->stmt
;
6990 /* Rewrites USE (address that is an iv) using candidate CAND. */
6993 rewrite_use_address (struct ivopts_data
*data
,
6994 struct iv_use
*use
, struct iv_cand
*cand
)
6999 adjust_iv_update_pos (cand
, use
);
7000 ok
= get_computation_aff (data
->current_loop
, use
->stmt
, use
, cand
, &aff
);
7002 unshare_aff_combination (&aff
);
7004 /* To avoid undefined overflow problems, all IV candidates use unsigned
7005 integer types. The drawback is that this makes it impossible for
7006 create_mem_ref to distinguish an IV that is based on a memory object
7007 from one that represents simply an offset.
7009 To work around this problem, we pass a hint to create_mem_ref that
7010 indicates which variable (if any) in aff is an IV based on a memory
7011 object. Note that we only consider the candidate. If this is not
7012 based on an object, the base of the reference is in some subexpression
7013 of the use -- but these will use pointer types, so they are recognized
7014 by the create_mem_ref heuristics anyway. */
7015 tree iv
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
7016 tree base_hint
= (cand
->iv
->base_object
) ? iv
: NULL_TREE
;
7017 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
7018 tree type
= TREE_TYPE (*use
->op_p
);
7019 unsigned int align
= get_object_alignment (*use
->op_p
);
7020 if (align
!= TYPE_ALIGN (type
))
7021 type
= build_aligned_type (type
, align
);
7023 tree ref
= create_mem_ref (&bsi
, type
, &aff
,
7024 reference_alias_ptr_type (*use
->op_p
),
7025 iv
, base_hint
, data
->speed
);
7027 copy_ref_info (ref
, *use
->op_p
);
7031 /* Rewrites USE (the condition such that one of the arguments is an iv) using
7035 rewrite_use_compare (struct ivopts_data
*data
,
7036 struct iv_use
*use
, struct iv_cand
*cand
)
7038 tree comp
, op
, bound
;
7039 gimple_stmt_iterator bsi
= gsi_for_stmt (use
->stmt
);
7040 enum tree_code compare
;
7041 struct iv_group
*group
= data
->vgroups
[use
->group_id
];
7042 struct cost_pair
*cp
= get_group_iv_cost (data
, group
, cand
);
7047 tree var
= var_at_stmt (data
->current_loop
, cand
, use
->stmt
);
7048 tree var_type
= TREE_TYPE (var
);
7051 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7053 fprintf (dump_file
, "Replacing exit test: ");
7054 print_gimple_stmt (dump_file
, use
->stmt
, 0, TDF_SLIM
);
7057 bound
= unshare_expr (fold_convert (var_type
, bound
));
7058 op
= force_gimple_operand (bound
, &stmts
, true, NULL_TREE
);
7060 gsi_insert_seq_on_edge_immediate (
7061 loop_preheader_edge (data
->current_loop
),
7064 gcond
*cond_stmt
= as_a
<gcond
*> (use
->stmt
);
7065 gimple_cond_set_lhs (cond_stmt
, var
);
7066 gimple_cond_set_code (cond_stmt
, compare
);
7067 gimple_cond_set_rhs (cond_stmt
, op
);
7071 /* The induction variable elimination failed; just express the original
7073 comp
= get_computation_at (data
->current_loop
, use
->stmt
, use
, cand
);
7074 gcc_assert (comp
!= NULL_TREE
);
7075 gcc_assert (use
->op_p
!= NULL
);
7076 *use
->op_p
= force_gimple_operand_gsi (&bsi
, comp
, true,
7077 SSA_NAME_VAR (*use
->op_p
),
7078 true, GSI_SAME_STMT
);
7081 /* Rewrite the groups using the selected induction variables. */
7084 rewrite_groups (struct ivopts_data
*data
)
7088 for (i
= 0; i
< data
->vgroups
.length (); i
++)
7090 struct iv_group
*group
= data
->vgroups
[i
];
7091 struct iv_cand
*cand
= group
->selected
;
7095 if (group
->type
== USE_NONLINEAR_EXPR
)
7097 for (j
= 0; j
< group
->vuses
.length (); j
++)
7099 rewrite_use_nonlinear_expr (data
, group
->vuses
[j
], cand
);
7100 update_stmt (group
->vuses
[j
]->stmt
);
7103 else if (group
->type
== USE_ADDRESS
)
7105 for (j
= 0; j
< group
->vuses
.length (); j
++)
7107 rewrite_use_address (data
, group
->vuses
[j
], cand
);
7108 update_stmt (group
->vuses
[j
]->stmt
);
7113 gcc_assert (group
->type
== USE_COMPARE
);
7115 for (j
= 0; j
< group
->vuses
.length (); j
++)
7117 rewrite_use_compare (data
, group
->vuses
[j
], cand
);
7118 update_stmt (group
->vuses
[j
]->stmt
);
7124 /* Removes the ivs that are not used after rewriting. */
7127 remove_unused_ivs (struct ivopts_data
*data
)
7131 bitmap toremove
= BITMAP_ALLOC (NULL
);
7133 /* Figure out an order in which to release SSA DEFs so that we don't
7134 release something that we'd have to propagate into a debug stmt
7136 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, j
, bi
)
7138 struct version_info
*info
;
7140 info
= ver_info (data
, j
);
7142 && !integer_zerop (info
->iv
->step
)
7144 && !info
->iv
->nonlin_use
7145 && !info
->preserve_biv
)
7147 bitmap_set_bit (toremove
, SSA_NAME_VERSION (info
->iv
->ssa_name
));
7149 tree def
= info
->iv
->ssa_name
;
7151 if (MAY_HAVE_DEBUG_STMTS
&& SSA_NAME_DEF_STMT (def
))
7153 imm_use_iterator imm_iter
;
7154 use_operand_p use_p
;
7158 FOR_EACH_IMM_USE_STMT (stmt
, imm_iter
, def
)
7160 if (!gimple_debug_bind_p (stmt
))
7163 /* We just want to determine whether to do nothing
7164 (count == 0), to substitute the computed
7165 expression into a single use of the SSA DEF by
7166 itself (count == 1), or to use a debug temp
7167 because the SSA DEF is used multiple times or as
7168 part of a larger expression (count > 1). */
7170 if (gimple_debug_bind_get_value (stmt
) != def
)
7174 BREAK_FROM_IMM_USE_STMT (imm_iter
);
7180 struct iv_use dummy_use
;
7181 struct iv_cand
*best_cand
= NULL
, *cand
;
7182 unsigned i
, best_pref
= 0, cand_pref
;
7184 memset (&dummy_use
, 0, sizeof (dummy_use
));
7185 dummy_use
.iv
= info
->iv
;
7186 for (i
= 0; i
< data
->vgroups
.length () && i
< 64; i
++)
7188 cand
= data
->vgroups
[i
]->selected
;
7189 if (cand
== best_cand
)
7191 cand_pref
= operand_equal_p (cand
->iv
->step
,
7195 += TYPE_MODE (TREE_TYPE (cand
->iv
->base
))
7196 == TYPE_MODE (TREE_TYPE (info
->iv
->base
))
7199 += TREE_CODE (cand
->iv
->base
) == INTEGER_CST
7201 if (best_cand
== NULL
|| best_pref
< cand_pref
)
7204 best_pref
= cand_pref
;
7211 tree comp
= get_computation_at (data
->current_loop
,
7212 SSA_NAME_DEF_STMT (def
),
7213 &dummy_use
, best_cand
);
7219 tree vexpr
= make_node (DEBUG_EXPR_DECL
);
7220 DECL_ARTIFICIAL (vexpr
) = 1;
7221 TREE_TYPE (vexpr
) = TREE_TYPE (comp
);
7222 if (SSA_NAME_VAR (def
))
7223 SET_DECL_MODE (vexpr
, DECL_MODE (SSA_NAME_VAR (def
)));
7225 SET_DECL_MODE (vexpr
, TYPE_MODE (TREE_TYPE (vexpr
)));
7227 = gimple_build_debug_bind (vexpr
, comp
, NULL
);
7228 gimple_stmt_iterator gsi
;
7230 if (gimple_code (SSA_NAME_DEF_STMT (def
)) == GIMPLE_PHI
)
7231 gsi
= gsi_after_labels (gimple_bb
7232 (SSA_NAME_DEF_STMT (def
)));
7234 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (def
));
7236 gsi_insert_before (&gsi
, def_temp
, GSI_SAME_STMT
);
7240 FOR_EACH_IMM_USE_STMT (stmt
, imm_iter
, def
)
7242 if (!gimple_debug_bind_p (stmt
))
7245 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
7246 SET_USE (use_p
, comp
);
7254 release_defs_bitset (toremove
);
7256 BITMAP_FREE (toremove
);
7259 /* Frees memory occupied by struct tree_niter_desc in *VALUE. Callback
7260 for hash_map::traverse. */
7263 free_tree_niter_desc (edge
const &, tree_niter_desc
*const &value
, void *)
7269 /* Frees data allocated by the optimization of a single loop. */
7272 free_loop_data (struct ivopts_data
*data
)
7280 data
->niters
->traverse
<void *, free_tree_niter_desc
> (NULL
);
7281 delete data
->niters
;
7282 data
->niters
= NULL
;
7285 EXECUTE_IF_SET_IN_BITMAP (data
->relevant
, 0, i
, bi
)
7287 struct version_info
*info
;
7289 info
= ver_info (data
, i
);
7291 info
->has_nonlin_use
= false;
7292 info
->preserve_biv
= false;
7295 bitmap_clear (data
->relevant
);
7296 bitmap_clear (data
->important_candidates
);
7298 for (i
= 0; i
< data
->vgroups
.length (); i
++)
7300 struct iv_group
*group
= data
->vgroups
[i
];
7302 for (j
= 0; j
< group
->vuses
.length (); j
++)
7303 free (group
->vuses
[j
]);
7304 group
->vuses
.release ();
7306 BITMAP_FREE (group
->related_cands
);
7307 for (j
= 0; j
< group
->n_map_members
; j
++)
7309 if (group
->cost_map
[j
].inv_vars
)
7310 BITMAP_FREE (group
->cost_map
[j
].inv_vars
);
7311 if (group
->cost_map
[j
].inv_exprs
)
7312 BITMAP_FREE (group
->cost_map
[j
].inv_exprs
);
7315 free (group
->cost_map
);
7318 data
->vgroups
.truncate (0);
7320 for (i
= 0; i
< data
->vcands
.length (); i
++)
7322 struct iv_cand
*cand
= data
->vcands
[i
];
7325 BITMAP_FREE (cand
->inv_vars
);
7326 if (cand
->inv_exprs
)
7327 BITMAP_FREE (cand
->inv_exprs
);
7330 data
->vcands
.truncate (0);
7332 if (data
->version_info_size
< num_ssa_names
)
7334 data
->version_info_size
= 2 * num_ssa_names
;
7335 free (data
->version_info
);
7336 data
->version_info
= XCNEWVEC (struct version_info
, data
->version_info_size
);
7339 data
->max_inv_var_id
= 0;
7340 data
->max_inv_expr_id
= 0;
7342 FOR_EACH_VEC_ELT (decl_rtl_to_reset
, i
, obj
)
7343 SET_DECL_RTL (obj
, NULL_RTX
);
7345 decl_rtl_to_reset
.truncate (0);
7347 data
->inv_expr_tab
->empty ();
7349 data
->iv_common_cand_tab
->empty ();
7350 data
->iv_common_cands
.truncate (0);
7353 /* Finalizes data structures used by the iv optimization pass. LOOPS is the
7357 tree_ssa_iv_optimize_finalize (struct ivopts_data
*data
)
7359 free_loop_data (data
);
7360 free (data
->version_info
);
7361 BITMAP_FREE (data
->relevant
);
7362 BITMAP_FREE (data
->important_candidates
);
7364 decl_rtl_to_reset
.release ();
7365 data
->vgroups
.release ();
7366 data
->vcands
.release ();
7367 delete data
->inv_expr_tab
;
7368 data
->inv_expr_tab
= NULL
;
7369 free_affine_expand_cache (&data
->name_expansion_cache
);
7370 delete data
->iv_common_cand_tab
;
7371 data
->iv_common_cand_tab
= NULL
;
7372 data
->iv_common_cands
.release ();
7373 obstack_free (&data
->iv_obstack
, NULL
);
7376 /* Returns true if the loop body BODY includes any function calls. */
7379 loop_body_includes_call (basic_block
*body
, unsigned num_nodes
)
7381 gimple_stmt_iterator gsi
;
7384 for (i
= 0; i
< num_nodes
; i
++)
7385 for (gsi
= gsi_start_bb (body
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
7387 gimple
*stmt
= gsi_stmt (gsi
);
7388 if (is_gimple_call (stmt
)
7389 && !gimple_call_internal_p (stmt
)
7390 && !is_inexpensive_builtin (gimple_call_fndecl (stmt
)))
7396 /* Optimizes the LOOP. Returns true if anything changed. */
7399 tree_ssa_iv_optimize_loop (struct ivopts_data
*data
, struct loop
*loop
)
7401 bool changed
= false;
7402 struct iv_ca
*iv_ca
;
7403 edge exit
= single_dom_exit (loop
);
7406 gcc_assert (!data
->niters
);
7407 data
->current_loop
= loop
;
7408 data
->loop_loc
= find_loop_location (loop
);
7409 data
->speed
= optimize_loop_for_speed_p (loop
);
7411 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7413 fprintf (dump_file
, "Processing loop %d", loop
->num
);
7414 if (data
->loop_loc
!= UNKNOWN_LOCATION
)
7415 fprintf (dump_file
, " at %s:%d", LOCATION_FILE (data
->loop_loc
),
7416 LOCATION_LINE (data
->loop_loc
));
7417 fprintf (dump_file
, "\n");
7421 fprintf (dump_file
, " single exit %d -> %d, exit condition ",
7422 exit
->src
->index
, exit
->dest
->index
);
7423 print_gimple_stmt (dump_file
, last_stmt (exit
->src
), 0, TDF_SLIM
);
7424 fprintf (dump_file
, "\n");
7427 fprintf (dump_file
, "\n");
7430 body
= get_loop_body (loop
);
7431 data
->body_includes_call
= loop_body_includes_call (body
, loop
->num_nodes
);
7432 renumber_gimple_stmt_uids_in_blocks (body
, loop
->num_nodes
);
7435 data
->loop_single_exit_p
= exit
!= NULL
&& loop_only_exit_p (loop
, exit
);
7437 /* For each ssa name determines whether it behaves as an induction variable
7439 if (!find_induction_variables (data
))
7442 /* Finds interesting uses (item 1). */
7443 find_interesting_uses (data
);
7444 if (data
->vgroups
.length () > MAX_CONSIDERED_GROUPS
)
7447 /* Finds candidates for the induction variables (item 2). */
7448 find_iv_candidates (data
);
7450 /* Calculates the costs (item 3, part 1). */
7451 determine_iv_costs (data
);
7452 determine_group_iv_costs (data
);
7453 determine_set_costs (data
);
7455 /* Find the optimal set of induction variables (item 3, part 2). */
7456 iv_ca
= find_optimal_iv_set (data
);
7461 /* Create the new induction variables (item 4, part 1). */
7462 create_new_ivs (data
, iv_ca
);
7463 iv_ca_free (&iv_ca
);
7465 /* Rewrite the uses (item 4, part 2). */
7466 rewrite_groups (data
);
7468 /* Remove the ivs that are unused after rewriting. */
7469 remove_unused_ivs (data
);
7471 /* We have changed the structure of induction variables; it might happen
7472 that definitions in the scev database refer to some of them that were
7477 free_loop_data (data
);
7482 /* Main entry point. Optimizes induction variables in loops. */
7485 tree_ssa_iv_optimize (void)
7488 struct ivopts_data data
;
7490 tree_ssa_iv_optimize_init (&data
);
7492 /* Optimize the loops starting with the innermost ones. */
7493 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
7495 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7496 flow_loop_dump (loop
, dump_file
, NULL
, 1);
7498 tree_ssa_iv_optimize_loop (&data
, loop
);
7501 tree_ssa_iv_optimize_finalize (&data
);
7504 #include "gt-tree-ssa-loop-ivopts.h"