2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "tree-pass.h"
30 #include "gimple-ssa.h"
31 #include "optabs-query.h"
32 #include "tree-pretty-print.h"
33 #include "fold-const.h"
34 #include "stor-layout.h"
36 #include "gimple-iterator.h"
37 #include "gimplify-me.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-manip.h"
40 #include "tree-ssa-loop-niter.h"
41 #include "tree-ssa-loop.h"
43 #include "tree-into-ssa.h"
45 #include "tree-scalar-evolution.h"
47 #include "langhooks.h"
48 #include "tree-inline.h"
49 #include "tree-data-ref.h"
50 #include "diagnostic-core.h"
53 /* This pass inserts prefetch instructions to optimize cache usage during
54 accesses to arrays in loops. It processes loops sequentially and:
56 1) Gathers all memory references in the single loop.
57 2) For each of the references it decides when it is profitable to prefetch
58 it. To do it, we evaluate the reuse among the accesses, and determines
59 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
60 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
61 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
62 iterations of the loop that are zero modulo PREFETCH_MOD). For example
63 (assuming cache line size is 64 bytes, char has size 1 byte and there
64 is no hardware sequential prefetch):
67 for (i = 0; i < max; i++)
74 a[187*i + 50] = ...; (5)
77 (0) obviously has PREFETCH_BEFORE 1
78 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
79 location 64 iterations before it, and PREFETCH_MOD 64 (since
80 it hits the same cache line otherwise).
81 (2) has PREFETCH_MOD 64
82 (3) has PREFETCH_MOD 4
83 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
84 the cache line accessed by (5) is the same with probability only
86 (5) has PREFETCH_MOD 1 as well.
88 Additionally, we use data dependence analysis to determine for each
89 reference the distance till the first reuse; this information is used
90 to determine the temporality of the issued prefetch instruction.
92 3) We determine how much ahead we need to prefetch. The number of
93 iterations needed is time to fetch / time spent in one iteration of
94 the loop. The problem is that we do not know either of these values,
95 so we just make a heuristic guess based on a magic (possibly)
96 target-specific constant and size of the loop.
98 4) Determine which of the references we prefetch. We take into account
99 that there is a maximum number of simultaneous prefetches (provided
100 by machine description). We prefetch as many prefetches as possible
101 while still within this bound (starting with those with lowest
102 prefetch_mod, since they are responsible for most of the cache
105 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
106 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
107 prefetching nonaccessed memory.
108 TODO -- actually implement peeling.
110 6) We actually emit the prefetch instructions. ??? Perhaps emit the
111 prefetch instructions with guards in cases where 5) was not sufficient
112 to satisfy the constraints?
114 A cost model is implemented to determine whether or not prefetching is
115 profitable for a given loop. The cost model has three heuristics:
117 1. Function trip_count_to_ahead_ratio_too_small_p implements a
118 heuristic that determines whether or not the loop has too few
119 iterations (compared to ahead). Prefetching is not likely to be
120 beneficial if the trip count to ahead ratio is below a certain
123 2. Function mem_ref_count_reasonable_p implements a heuristic that
124 determines whether the given loop has enough CPU ops that can be
125 overlapped with cache missing memory ops. If not, the loop
126 won't benefit from prefetching. In the implementation,
127 prefetching is not considered beneficial if the ratio between
128 the instruction count and the mem ref count is below a certain
131 3. Function insn_to_prefetch_ratio_too_small_p implements a
132 heuristic that disables prefetching in a loop if the prefetching
133 cost is above a certain limit. The relative prefetching cost is
134 estimated by taking the ratio between the prefetch count and the
135 total intruction count (this models the I-cache cost).
137 The limits used in these heuristics are defined as parameters with
138 reasonable default values. Machine-specific default values will be
142 -- write and use more general reuse analysis (that could be also used
143 in other cache aimed loop optimizations)
144 -- make it behave sanely together with the prefetches given by user
145 (now we just ignore them; at the very least we should avoid
146 optimizing loops in that user put his own prefetches)
147 -- we assume cache line size alignment of arrays; this could be
150 /* Magic constants follow. These should be replaced by machine specific
153 /* True if write can be prefetched by a read prefetch. */
155 #ifndef WRITE_CAN_USE_READ_PREFETCH
156 #define WRITE_CAN_USE_READ_PREFETCH 1
159 /* True if read can be prefetched by a write prefetch. */
161 #ifndef READ_CAN_USE_WRITE_PREFETCH
162 #define READ_CAN_USE_WRITE_PREFETCH 0
165 /* The size of the block loaded by a single prefetch. Usually, this is
166 the same as cache line size (at the moment, we only consider one level
167 of cache hierarchy). */
169 #ifndef PREFETCH_BLOCK
170 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
173 /* Do we have a forward hardware sequential prefetching? */
175 #ifndef HAVE_FORWARD_PREFETCH
176 #define HAVE_FORWARD_PREFETCH 0
179 /* Do we have a backward hardware sequential prefetching? */
181 #ifndef HAVE_BACKWARD_PREFETCH
182 #define HAVE_BACKWARD_PREFETCH 0
185 /* In some cases we are only able to determine that there is a certain
186 probability that the two accesses hit the same cache line. In this
187 case, we issue the prefetches for both of them if this probability
188 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
190 #ifndef ACCEPTABLE_MISS_RATE
191 #define ACCEPTABLE_MISS_RATE 50
194 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
195 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
197 /* We consider a memory access nontemporal if it is not reused sooner than
198 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
199 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
200 so that we use nontemporal prefetches e.g. if single memory location
201 is accessed several times in a single iteration of the loop. */
202 #define NONTEMPORAL_FRACTION 16
204 /* In case we have to emit a memory fence instruction after the loop that
205 uses nontemporal stores, this defines the builtin to use. */
207 #ifndef FENCE_FOLLOWING_MOVNT
208 #define FENCE_FOLLOWING_MOVNT NULL_TREE
211 /* It is not profitable to prefetch when the trip count is not at
212 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
213 For example, in a loop with a prefetch ahead distance of 10,
214 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
215 profitable to prefetch when the trip count is greater or equal to
216 40. In that case, 30 out of the 40 iterations will benefit from
219 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
220 #define TRIP_COUNT_TO_AHEAD_RATIO 4
223 /* The group of references between that reuse may occur. */
227 tree base
; /* Base of the reference. */
228 tree step
; /* Step of the reference. */
229 struct mem_ref
*refs
; /* References in the group. */
230 struct mem_ref_group
*next
; /* Next group of references. */
231 unsigned int uid
; /* Group UID, used only for debugging. */
234 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
236 #define PREFETCH_ALL HOST_WIDE_INT_M1U
238 /* Do not generate a prefetch if the unroll factor is significantly less
239 than what is required by the prefetch. This is to avoid redundant
240 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
241 2, prefetching requires unrolling the loop 16 times, but
242 the loop is actually unrolled twice. In this case (ratio = 8),
243 prefetching is not likely to be beneficial. */
245 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
246 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
249 /* Some of the prefetch computations have quadratic complexity. We want to
250 avoid huge compile times and, therefore, want to limit the amount of
251 memory references per loop where we consider prefetching. */
253 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
254 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
257 /* The memory reference. */
261 gimple
*stmt
; /* Statement in that the reference appears. */
262 tree mem
; /* The reference. */
263 HOST_WIDE_INT delta
; /* Constant offset of the reference. */
264 struct mem_ref_group
*group
; /* The group of references it belongs to. */
265 unsigned HOST_WIDE_INT prefetch_mod
;
266 /* Prefetch only each PREFETCH_MOD-th
268 unsigned HOST_WIDE_INT prefetch_before
;
269 /* Prefetch only first PREFETCH_BEFORE
271 unsigned reuse_distance
; /* The amount of data accessed before the first
272 reuse of this value. */
273 struct mem_ref
*next
; /* The next reference in the group. */
274 unsigned int uid
; /* Ref UID, used only for debugging. */
275 unsigned write_p
: 1; /* Is it a write? */
276 unsigned independent_p
: 1; /* True if the reference is independent on
277 all other references inside the loop. */
278 unsigned issue_prefetch_p
: 1; /* Should we really issue the prefetch? */
279 unsigned storent_p
: 1; /* True if we changed the store to a
283 /* Dumps information about memory reference */
285 dump_mem_details (FILE *file
, tree base
, tree step
,
286 HOST_WIDE_INT delta
, bool write_p
)
288 fprintf (file
, "(base ");
289 print_generic_expr (file
, base
, TDF_SLIM
);
290 fprintf (file
, ", step ");
291 if (cst_and_fits_in_hwi (step
))
292 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, int_cst_value (step
));
294 print_generic_expr (file
, step
, TDF_SLIM
);
295 fprintf (file
, ")\n");
296 fprintf (file
, " delta " HOST_WIDE_INT_PRINT_DEC
"\n", delta
);
297 fprintf (file
, " %s\n\n", write_p
? "write" : "read");
300 /* Dumps information about reference REF to FILE. */
303 dump_mem_ref (FILE *file
, struct mem_ref
*ref
)
305 fprintf (file
, "reference %u:%u (", ref
->group
->uid
, ref
->uid
);
306 print_generic_expr (file
, ref
->mem
, TDF_SLIM
);
307 fprintf (file
, ")\n");
310 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
313 static struct mem_ref_group
*
314 find_or_create_group (struct mem_ref_group
**groups
, tree base
, tree step
)
316 /* Global count for setting struct mem_ref_group->uid. */
317 static unsigned int last_mem_ref_group_uid
= 0;
319 struct mem_ref_group
*group
;
321 for (; *groups
; groups
= &(*groups
)->next
)
323 if (operand_equal_p ((*groups
)->step
, step
, 0)
324 && operand_equal_p ((*groups
)->base
, base
, 0))
327 /* If step is an integer constant, keep the list of groups sorted
328 by decreasing step. */
329 if (cst_and_fits_in_hwi ((*groups
)->step
) && cst_and_fits_in_hwi (step
)
330 && int_cst_value ((*groups
)->step
) < int_cst_value (step
))
334 group
= XNEW (struct mem_ref_group
);
338 group
->uid
= ++last_mem_ref_group_uid
;
339 group
->next
= *groups
;
345 /* Records a memory reference MEM in GROUP with offset DELTA and write status
346 WRITE_P. The reference occurs in statement STMT. */
349 record_ref (struct mem_ref_group
*group
, gimple
*stmt
, tree mem
,
350 HOST_WIDE_INT delta
, bool write_p
)
352 unsigned int last_mem_ref_uid
= 0;
353 struct mem_ref
**aref
;
355 /* Do not record the same address twice. */
356 for (aref
= &group
->refs
; *aref
; aref
= &(*aref
)->next
)
358 last_mem_ref_uid
= (*aref
)->uid
;
360 /* It does not have to be possible for write reference to reuse the read
361 prefetch, or vice versa. */
362 if (!WRITE_CAN_USE_READ_PREFETCH
364 && !(*aref
)->write_p
)
366 if (!READ_CAN_USE_WRITE_PREFETCH
371 if ((*aref
)->delta
== delta
)
375 (*aref
) = XNEW (struct mem_ref
);
376 (*aref
)->stmt
= stmt
;
378 (*aref
)->delta
= delta
;
379 (*aref
)->write_p
= write_p
;
380 (*aref
)->prefetch_before
= PREFETCH_ALL
;
381 (*aref
)->prefetch_mod
= 1;
382 (*aref
)->reuse_distance
= 0;
383 (*aref
)->issue_prefetch_p
= false;
384 (*aref
)->group
= group
;
385 (*aref
)->next
= NULL
;
386 (*aref
)->independent_p
= false;
387 (*aref
)->storent_p
= false;
388 (*aref
)->uid
= last_mem_ref_uid
+ 1;
390 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
392 dump_mem_ref (dump_file
, *aref
);
394 fprintf (dump_file
, " group %u ", group
->uid
);
395 dump_mem_details (dump_file
, group
->base
, group
->step
, delta
,
400 /* Release memory references in GROUPS. */
403 release_mem_refs (struct mem_ref_group
*groups
)
405 struct mem_ref_group
*next_g
;
406 struct mem_ref
*ref
, *next_r
;
408 for (; groups
; groups
= next_g
)
410 next_g
= groups
->next
;
411 for (ref
= groups
->refs
; ref
; ref
= next_r
)
420 /* A structure used to pass arguments to idx_analyze_ref. */
424 struct loop
*loop
; /* Loop of the reference. */
425 gimple
*stmt
; /* Statement of the reference. */
426 tree
*step
; /* Step of the memory reference. */
427 HOST_WIDE_INT
*delta
; /* Offset of the memory reference. */
430 /* Analyzes a single INDEX of a memory reference to obtain information
431 described at analyze_ref. Callback for for_each_index. */
434 idx_analyze_ref (tree base
, tree
*index
, void *data
)
436 struct ar_data
*ar_data
= (struct ar_data
*) data
;
437 tree ibase
, step
, stepsize
;
438 HOST_WIDE_INT idelta
= 0, imult
= 1;
441 if (!simple_iv (ar_data
->loop
, loop_containing_stmt (ar_data
->stmt
),
447 if (TREE_CODE (ibase
) == POINTER_PLUS_EXPR
448 && cst_and_fits_in_hwi (TREE_OPERAND (ibase
, 1)))
450 idelta
= int_cst_value (TREE_OPERAND (ibase
, 1));
451 ibase
= TREE_OPERAND (ibase
, 0);
453 if (cst_and_fits_in_hwi (ibase
))
455 idelta
+= int_cst_value (ibase
);
456 ibase
= build_int_cst (TREE_TYPE (ibase
), 0);
459 if (TREE_CODE (base
) == ARRAY_REF
)
461 stepsize
= array_ref_element_size (base
);
462 if (!cst_and_fits_in_hwi (stepsize
))
464 imult
= int_cst_value (stepsize
);
465 step
= fold_build2 (MULT_EXPR
, sizetype
,
466 fold_convert (sizetype
, step
),
467 fold_convert (sizetype
, stepsize
));
471 if (*ar_data
->step
== NULL_TREE
)
472 *ar_data
->step
= step
;
474 *ar_data
->step
= fold_build2 (PLUS_EXPR
, sizetype
,
475 fold_convert (sizetype
, *ar_data
->step
),
476 fold_convert (sizetype
, step
));
477 *ar_data
->delta
+= idelta
;
483 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
484 STEP are integer constants and iter is number of iterations of LOOP. The
485 reference occurs in statement STMT. Strips nonaddressable component
486 references from REF_P. */
489 analyze_ref (struct loop
*loop
, tree
*ref_p
, tree
*base
,
490 tree
*step
, HOST_WIDE_INT
*delta
,
493 struct ar_data ar_data
;
495 HOST_WIDE_INT bit_offset
;
501 /* First strip off the component references. Ignore bitfields.
502 Also strip off the real and imagine parts of a complex, so that
503 they can have the same base. */
504 if (TREE_CODE (ref
) == REALPART_EXPR
505 || TREE_CODE (ref
) == IMAGPART_EXPR
506 || (TREE_CODE (ref
) == COMPONENT_REF
507 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref
, 1))))
509 if (TREE_CODE (ref
) == IMAGPART_EXPR
)
510 *delta
+= int_size_in_bytes (TREE_TYPE (ref
));
511 ref
= TREE_OPERAND (ref
, 0);
516 for (; TREE_CODE (ref
) == COMPONENT_REF
; ref
= TREE_OPERAND (ref
, 0))
518 off
= DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref
, 1));
519 bit_offset
= TREE_INT_CST_LOW (off
);
520 gcc_assert (bit_offset
% BITS_PER_UNIT
== 0);
522 *delta
+= bit_offset
/ BITS_PER_UNIT
;
525 *base
= unshare_expr (ref
);
529 ar_data
.delta
= delta
;
530 return for_each_index (base
, idx_analyze_ref
, &ar_data
);
533 /* Record a memory reference REF to the list REFS. The reference occurs in
534 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
535 reference was recorded, false otherwise. */
538 gather_memory_references_ref (struct loop
*loop
, struct mem_ref_group
**refs
,
539 tree ref
, bool write_p
, gimple
*stmt
)
543 struct mem_ref_group
*agrp
;
545 if (get_base_address (ref
) == NULL
)
548 if (!analyze_ref (loop
, &ref
, &base
, &step
, &delta
, stmt
))
550 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
551 if (step
== NULL_TREE
)
554 /* Stop if the address of BASE could not be taken. */
555 if (may_be_nonaddressable_p (base
))
558 /* Limit non-constant step prefetching only to the innermost loops and
559 only when the step is loop invariant in the entire loop nest. */
560 if (!cst_and_fits_in_hwi (step
))
562 if (loop
->inner
!= NULL
)
564 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
566 fprintf (dump_file
, "Memory expression %p\n",(void *) ref
);
567 print_generic_expr (dump_file
, ref
, TDF_SLIM
);
568 fprintf (dump_file
,":");
569 dump_mem_details (dump_file
, base
, step
, delta
, write_p
);
571 "Ignoring %p, non-constant step prefetching is "
572 "limited to inner most loops \n",
579 if (!expr_invariant_in_loop_p (loop_outermost (loop
), step
))
581 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
583 fprintf (dump_file
, "Memory expression %p\n",(void *) ref
);
584 print_generic_expr (dump_file
, ref
, TDF_SLIM
);
585 fprintf (dump_file
,":");
586 dump_mem_details (dump_file
, base
, step
, delta
, write_p
);
588 "Not prefetching, ignoring %p due to "
589 "loop variant step\n",
597 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
598 are integer constants. */
599 agrp
= find_or_create_group (refs
, base
, step
);
600 record_ref (agrp
, stmt
, ref
, delta
, write_p
);
605 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
606 true if there are no other memory references inside the loop. */
608 static struct mem_ref_group
*
609 gather_memory_references (struct loop
*loop
, bool *no_other_refs
, unsigned *ref_count
)
611 basic_block
*body
= get_loop_body_in_dom_order (loop
);
614 gimple_stmt_iterator bsi
;
617 struct mem_ref_group
*refs
= NULL
;
619 *no_other_refs
= true;
622 /* Scan the loop body in order, so that the former references precede the
624 for (i
= 0; i
< loop
->num_nodes
; i
++)
627 if (bb
->loop_father
!= loop
)
630 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
632 stmt
= gsi_stmt (bsi
);
634 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
636 if (gimple_vuse (stmt
)
637 || (is_gimple_call (stmt
)
638 && !(gimple_call_flags (stmt
) & ECF_CONST
)))
639 *no_other_refs
= false;
643 if (! gimple_vuse (stmt
))
646 lhs
= gimple_assign_lhs (stmt
);
647 rhs
= gimple_assign_rhs1 (stmt
);
649 if (REFERENCE_CLASS_P (rhs
))
651 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
655 if (REFERENCE_CLASS_P (lhs
))
657 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
668 /* Prune the prefetch candidate REF using the self-reuse. */
671 prune_ref_by_self_reuse (struct mem_ref
*ref
)
676 /* If the step size is non constant, we cannot calculate prefetch_mod. */
677 if (!cst_and_fits_in_hwi (ref
->group
->step
))
680 step
= int_cst_value (ref
->group
->step
);
686 /* Prefetch references to invariant address just once. */
687 ref
->prefetch_before
= 1;
694 if (step
> PREFETCH_BLOCK
)
697 if ((backward
&& HAVE_BACKWARD_PREFETCH
)
698 || (!backward
&& HAVE_FORWARD_PREFETCH
))
700 ref
->prefetch_before
= 1;
704 ref
->prefetch_mod
= PREFETCH_BLOCK
/ step
;
707 /* Divides X by BY, rounding down. */
710 ddown (HOST_WIDE_INT x
, unsigned HOST_WIDE_INT by
)
715 return x
/ (HOST_WIDE_INT
) by
;
717 return (x
+ (HOST_WIDE_INT
) by
- 1) / (HOST_WIDE_INT
) by
;
720 /* Given a CACHE_LINE_SIZE and two inductive memory references
721 with a common STEP greater than CACHE_LINE_SIZE and an address
722 difference DELTA, compute the probability that they will fall
723 in different cache lines. Return true if the computed miss rate
724 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
725 number of distinct iterations after which the pattern repeats itself.
726 ALIGN_UNIT is the unit of alignment in bytes. */
729 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size
,
730 HOST_WIDE_INT step
, HOST_WIDE_INT delta
,
731 unsigned HOST_WIDE_INT distinct_iters
,
734 unsigned align
, iter
;
735 int total_positions
, miss_positions
, max_allowed_miss_positions
;
736 int address1
, address2
, cache_line1
, cache_line2
;
738 /* It always misses if delta is greater than or equal to the cache
740 if (delta
>= (HOST_WIDE_INT
) cache_line_size
)
744 total_positions
= (cache_line_size
/ align_unit
) * distinct_iters
;
745 max_allowed_miss_positions
= (ACCEPTABLE_MISS_RATE
* total_positions
) / 1000;
747 /* Iterate through all possible alignments of the first
748 memory reference within its cache line. */
749 for (align
= 0; align
< cache_line_size
; align
+= align_unit
)
751 /* Iterate through all distinct iterations. */
752 for (iter
= 0; iter
< distinct_iters
; iter
++)
754 address1
= align
+ step
* iter
;
755 address2
= address1
+ delta
;
756 cache_line1
= address1
/ cache_line_size
;
757 cache_line2
= address2
/ cache_line_size
;
758 if (cache_line1
!= cache_line2
)
761 if (miss_positions
> max_allowed_miss_positions
)
768 /* Prune the prefetch candidate REF using the reuse with BY.
769 If BY_IS_BEFORE is true, BY is before REF in the loop. */
772 prune_ref_by_group_reuse (struct mem_ref
*ref
, struct mem_ref
*by
,
777 HOST_WIDE_INT delta_r
= ref
->delta
, delta_b
= by
->delta
;
778 HOST_WIDE_INT delta
= delta_b
- delta_r
;
779 HOST_WIDE_INT hit_from
;
780 unsigned HOST_WIDE_INT prefetch_before
, prefetch_block
;
781 HOST_WIDE_INT reduced_step
;
782 unsigned HOST_WIDE_INT reduced_prefetch_block
;
786 /* If the step is non constant we cannot calculate prefetch_before. */
787 if (!cst_and_fits_in_hwi (ref
->group
->step
)) {
791 step
= int_cst_value (ref
->group
->step
);
798 /* If the references has the same address, only prefetch the
801 ref
->prefetch_before
= 0;
808 /* If the reference addresses are invariant and fall into the
809 same cache line, prefetch just the first one. */
813 if (ddown (ref
->delta
, PREFETCH_BLOCK
)
814 != ddown (by
->delta
, PREFETCH_BLOCK
))
817 ref
->prefetch_before
= 0;
821 /* Only prune the reference that is behind in the array. */
827 /* Transform the data so that we may assume that the accesses
831 delta_r
= PREFETCH_BLOCK
- 1 - delta_r
;
832 delta_b
= PREFETCH_BLOCK
- 1 - delta_b
;
840 /* Check whether the two references are likely to hit the same cache
841 line, and how distant the iterations in that it occurs are from
844 if (step
<= PREFETCH_BLOCK
)
846 /* The accesses are sure to meet. Let us check when. */
847 hit_from
= ddown (delta_b
, PREFETCH_BLOCK
) * PREFETCH_BLOCK
;
848 prefetch_before
= (hit_from
- delta_r
+ step
- 1) / step
;
850 /* Do not reduce prefetch_before if we meet beyond cache size. */
851 if (prefetch_before
> absu_hwi (L2_CACHE_SIZE_BYTES
/ step
))
852 prefetch_before
= PREFETCH_ALL
;
853 if (prefetch_before
< ref
->prefetch_before
)
854 ref
->prefetch_before
= prefetch_before
;
859 /* A more complicated case with step > prefetch_block. First reduce
860 the ratio between the step and the cache line size to its simplest
861 terms. The resulting denominator will then represent the number of
862 distinct iterations after which each address will go back to its
863 initial location within the cache line. This computation assumes
864 that PREFETCH_BLOCK is a power of two. */
865 prefetch_block
= PREFETCH_BLOCK
;
866 reduced_prefetch_block
= prefetch_block
;
868 while ((reduced_step
& 1) == 0
869 && reduced_prefetch_block
> 1)
872 reduced_prefetch_block
>>= 1;
875 prefetch_before
= delta
/ step
;
877 ref_type
= TREE_TYPE (ref
->mem
);
878 align_unit
= TYPE_ALIGN (ref_type
) / 8;
879 if (is_miss_rate_acceptable (prefetch_block
, step
, delta
,
880 reduced_prefetch_block
, align_unit
))
882 /* Do not reduce prefetch_before if we meet beyond cache size. */
883 if (prefetch_before
> L2_CACHE_SIZE_BYTES
/ PREFETCH_BLOCK
)
884 prefetch_before
= PREFETCH_ALL
;
885 if (prefetch_before
< ref
->prefetch_before
)
886 ref
->prefetch_before
= prefetch_before
;
891 /* Try also the following iteration. */
893 delta
= step
- delta
;
894 if (is_miss_rate_acceptable (prefetch_block
, step
, delta
,
895 reduced_prefetch_block
, align_unit
))
897 if (prefetch_before
< ref
->prefetch_before
)
898 ref
->prefetch_before
= prefetch_before
;
903 /* The ref probably does not reuse by. */
907 /* Prune the prefetch candidate REF using the reuses with other references
911 prune_ref_by_reuse (struct mem_ref
*ref
, struct mem_ref
*refs
)
913 struct mem_ref
*prune_by
;
916 prune_ref_by_self_reuse (ref
);
918 for (prune_by
= refs
; prune_by
; prune_by
= prune_by
->next
)
926 if (!WRITE_CAN_USE_READ_PREFETCH
928 && !prune_by
->write_p
)
930 if (!READ_CAN_USE_WRITE_PREFETCH
932 && prune_by
->write_p
)
935 prune_ref_by_group_reuse (ref
, prune_by
, before
);
939 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
942 prune_group_by_reuse (struct mem_ref_group
*group
)
944 struct mem_ref
*ref_pruned
;
946 for (ref_pruned
= group
->refs
; ref_pruned
; ref_pruned
= ref_pruned
->next
)
948 prune_ref_by_reuse (ref_pruned
, group
->refs
);
950 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
952 dump_mem_ref (dump_file
, ref_pruned
);
954 if (ref_pruned
->prefetch_before
== PREFETCH_ALL
955 && ref_pruned
->prefetch_mod
== 1)
956 fprintf (dump_file
, " no restrictions");
957 else if (ref_pruned
->prefetch_before
== 0)
958 fprintf (dump_file
, " do not prefetch");
959 else if (ref_pruned
->prefetch_before
<= ref_pruned
->prefetch_mod
)
960 fprintf (dump_file
, " prefetch once");
963 if (ref_pruned
->prefetch_before
!= PREFETCH_ALL
)
965 fprintf (dump_file
, " prefetch before ");
966 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
967 ref_pruned
->prefetch_before
);
969 if (ref_pruned
->prefetch_mod
!= 1)
971 fprintf (dump_file
, " prefetch mod ");
972 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
973 ref_pruned
->prefetch_mod
);
976 fprintf (dump_file
, "\n");
981 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
984 prune_by_reuse (struct mem_ref_group
*groups
)
986 for (; groups
; groups
= groups
->next
)
987 prune_group_by_reuse (groups
);
990 /* Returns true if we should issue prefetch for REF. */
993 should_issue_prefetch_p (struct mem_ref
*ref
)
995 /* Do we want to issue prefetches for non-constant strides? */
996 if (!cst_and_fits_in_hwi (ref
->group
->step
) && PREFETCH_DYNAMIC_STRIDES
== 0)
998 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1000 "Skipping non-constant step for reference %u:%u\n",
1001 ref
->group
->uid
, ref
->uid
);
1005 /* Some processors may have a hardware prefetcher that may conflict with
1006 prefetch hints for a range of strides. Make sure we don't issue
1007 prefetches for such cases if the stride is within this particular
1009 if (cst_and_fits_in_hwi (ref
->group
->step
)
1010 && abs_hwi (int_cst_value (ref
->group
->step
))
1011 < (HOST_WIDE_INT
) PREFETCH_MINIMUM_STRIDE
)
1013 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1015 "Step for reference %u:%u (" HOST_WIDE_INT_PRINT_DEC
1016 ") is less than the mininum required stride of %d\n",
1017 ref
->group
->uid
, ref
->uid
, int_cst_value (ref
->group
->step
),
1018 PREFETCH_MINIMUM_STRIDE
);
1022 /* For now do not issue prefetches for only first few of the
1024 if (ref
->prefetch_before
!= PREFETCH_ALL
)
1026 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1027 fprintf (dump_file
, "Ignoring reference %u:%u due to prefetch_before\n",
1028 ref
->group
->uid
, ref
->uid
);
1032 /* Do not prefetch nontemporal stores. */
1035 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1036 fprintf (dump_file
, "Ignoring nontemporal store reference %u:%u\n", ref
->group
->uid
, ref
->uid
);
1043 /* Decide which of the prefetch candidates in GROUPS to prefetch.
1044 AHEAD is the number of iterations to prefetch ahead (which corresponds
1045 to the number of simultaneous instances of one prefetch running at a
1046 time). UNROLL_FACTOR is the factor by that the loop is going to be
1047 unrolled. Returns true if there is anything to prefetch. */
1050 schedule_prefetches (struct mem_ref_group
*groups
, unsigned unroll_factor
,
1053 unsigned remaining_prefetch_slots
, n_prefetches
, prefetch_slots
;
1054 unsigned slots_per_prefetch
;
1055 struct mem_ref
*ref
;
1058 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
1059 remaining_prefetch_slots
= SIMULTANEOUS_PREFETCHES
;
1061 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1062 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
1063 it will need a prefetch slot. */
1064 slots_per_prefetch
= (ahead
+ unroll_factor
/ 2) / unroll_factor
;
1065 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1066 fprintf (dump_file
, "Each prefetch instruction takes %u prefetch slots.\n",
1067 slots_per_prefetch
);
1069 /* For now we just take memory references one by one and issue
1070 prefetches for as many as possible. The groups are sorted
1071 starting with the largest step, since the references with
1072 large step are more likely to cause many cache misses. */
1074 for (; groups
; groups
= groups
->next
)
1075 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1077 if (!should_issue_prefetch_p (ref
))
1080 /* The loop is far from being sufficiently unrolled for this
1081 prefetch. Do not generate prefetch to avoid many redudant
1083 if (ref
->prefetch_mod
/ unroll_factor
> PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
)
1086 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1087 and we unroll the loop UNROLL_FACTOR times, we need to insert
1088 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1090 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1091 / ref
->prefetch_mod
);
1092 prefetch_slots
= n_prefetches
* slots_per_prefetch
;
1094 /* If more than half of the prefetches would be lost anyway, do not
1095 issue the prefetch. */
1096 if (2 * remaining_prefetch_slots
< prefetch_slots
)
1099 /* Stop prefetching if debug counter is activated. */
1100 if (!dbg_cnt (prefetch
))
1103 ref
->issue_prefetch_p
= true;
1104 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1105 fprintf (dump_file
, "Decided to issue prefetch for reference %u:%u\n",
1106 ref
->group
->uid
, ref
->uid
);
1108 if (remaining_prefetch_slots
<= prefetch_slots
)
1110 remaining_prefetch_slots
-= prefetch_slots
;
1117 /* Return TRUE if no prefetch is going to be generated in the given
1121 nothing_to_prefetch_p (struct mem_ref_group
*groups
)
1123 struct mem_ref
*ref
;
1125 for (; groups
; groups
= groups
->next
)
1126 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1127 if (should_issue_prefetch_p (ref
))
1133 /* Estimate the number of prefetches in the given GROUPS.
1134 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
1137 estimate_prefetch_count (struct mem_ref_group
*groups
, unsigned unroll_factor
)
1139 struct mem_ref
*ref
;
1140 unsigned n_prefetches
;
1141 int prefetch_count
= 0;
1143 for (; groups
; groups
= groups
->next
)
1144 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1145 if (should_issue_prefetch_p (ref
))
1147 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1148 / ref
->prefetch_mod
);
1149 prefetch_count
+= n_prefetches
;
1152 return prefetch_count
;
1155 /* Issue prefetches for the reference REF into loop as decided before.
1156 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1157 is the factor by which LOOP was unrolled. */
1160 issue_prefetch_ref (struct mem_ref
*ref
, unsigned unroll_factor
, unsigned ahead
)
1162 HOST_WIDE_INT delta
;
1163 tree addr
, addr_base
, write_p
, local
, forward
;
1165 gimple_stmt_iterator bsi
;
1166 unsigned n_prefetches
, ap
;
1167 bool nontemporal
= ref
->reuse_distance
>= L2_CACHE_SIZE_BYTES
;
1169 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1170 fprintf (dump_file
, "Issued%s prefetch for reference %u:%u.\n",
1171 nontemporal
? " nontemporal" : "",
1172 ref
->group
->uid
, ref
->uid
);
1174 bsi
= gsi_for_stmt (ref
->stmt
);
1176 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1177 / ref
->prefetch_mod
);
1178 addr_base
= build_fold_addr_expr_with_type (ref
->mem
, ptr_type_node
);
1179 addr_base
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr_base
),
1180 true, NULL
, true, GSI_SAME_STMT
);
1181 write_p
= ref
->write_p
? integer_one_node
: integer_zero_node
;
1182 local
= nontemporal
? integer_zero_node
: integer_three_node
;
1184 for (ap
= 0; ap
< n_prefetches
; ap
++)
1186 if (cst_and_fits_in_hwi (ref
->group
->step
))
1188 /* Determine the address to prefetch. */
1189 delta
= (ahead
+ ap
* ref
->prefetch_mod
) *
1190 int_cst_value (ref
->group
->step
);
1191 addr
= fold_build_pointer_plus_hwi (addr_base
, delta
);
1192 addr
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr
), true,
1193 NULL
, true, GSI_SAME_STMT
);
1197 /* The step size is non-constant but loop-invariant. We use the
1198 heuristic to simply prefetch ahead iterations ahead. */
1199 forward
= fold_build2 (MULT_EXPR
, sizetype
,
1200 fold_convert (sizetype
, ref
->group
->step
),
1201 fold_convert (sizetype
, size_int (ahead
)));
1202 addr
= fold_build_pointer_plus (addr_base
, forward
);
1203 addr
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr
), true,
1204 NULL
, true, GSI_SAME_STMT
);
1207 if (addr_base
!= addr
1208 && TREE_CODE (addr_base
) == SSA_NAME
1209 && TREE_CODE (addr
) == SSA_NAME
)
1211 duplicate_ssa_name_ptr_info (addr
, SSA_NAME_PTR_INFO (addr_base
));
1212 /* As this isn't a plain copy we have to reset alignment
1214 if (SSA_NAME_PTR_INFO (addr
))
1215 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr
));
1218 /* Create the prefetch instruction. */
1219 prefetch
= gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH
),
1220 3, addr
, write_p
, local
);
1221 gsi_insert_before (&bsi
, prefetch
, GSI_SAME_STMT
);
1225 /* Issue prefetches for the references in GROUPS into loop as decided before.
1226 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1227 factor by that LOOP was unrolled. */
1230 issue_prefetches (struct mem_ref_group
*groups
,
1231 unsigned unroll_factor
, unsigned ahead
)
1233 struct mem_ref
*ref
;
1235 for (; groups
; groups
= groups
->next
)
1236 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1237 if (ref
->issue_prefetch_p
)
1238 issue_prefetch_ref (ref
, unroll_factor
, ahead
);
1241 /* Returns true if REF is a memory write for that a nontemporal store insn
1245 nontemporal_store_p (struct mem_ref
*ref
)
1248 enum insn_code code
;
1250 /* REF must be a write that is not reused. We require it to be independent
1251 on all other memory references in the loop, as the nontemporal stores may
1252 be reordered with respect to other memory references. */
1254 || !ref
->independent_p
1255 || ref
->reuse_distance
< L2_CACHE_SIZE_BYTES
)
1258 /* Check that we have the storent instruction for the mode. */
1259 mode
= TYPE_MODE (TREE_TYPE (ref
->mem
));
1260 if (mode
== BLKmode
)
1263 code
= optab_handler (storent_optab
, mode
);
1264 return code
!= CODE_FOR_nothing
;
1267 /* If REF is a nontemporal store, we mark the corresponding modify statement
1268 and return true. Otherwise, we return false. */
1271 mark_nontemporal_store (struct mem_ref
*ref
)
1273 if (!nontemporal_store_p (ref
))
1276 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1277 fprintf (dump_file
, "Marked reference %u:%u as a nontemporal store.\n",
1278 ref
->group
->uid
, ref
->uid
);
1280 gimple_assign_set_nontemporal_move (ref
->stmt
, true);
1281 ref
->storent_p
= true;
1286 /* Issue a memory fence instruction after LOOP. */
1289 emit_mfence_after_loop (struct loop
*loop
)
1291 vec
<edge
> exits
= get_loop_exit_edges (loop
);
1294 gimple_stmt_iterator bsi
;
1297 FOR_EACH_VEC_ELT (exits
, i
, exit
)
1299 call
= gimple_build_call (FENCE_FOLLOWING_MOVNT
, 0);
1301 if (!single_pred_p (exit
->dest
)
1302 /* If possible, we prefer not to insert the fence on other paths
1304 && !(exit
->flags
& EDGE_ABNORMAL
))
1305 split_loop_exit_edge (exit
);
1306 bsi
= gsi_after_labels (exit
->dest
);
1308 gsi_insert_before (&bsi
, call
, GSI_NEW_STMT
);
1312 update_ssa (TODO_update_ssa_only_virtuals
);
1315 /* Returns true if we can use storent in loop, false otherwise. */
1318 may_use_storent_in_loop_p (struct loop
*loop
)
1322 if (loop
->inner
!= NULL
)
1325 /* If we must issue a mfence insn after using storent, check that there
1326 is a suitable place for it at each of the loop exits. */
1327 if (FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1329 vec
<edge
> exits
= get_loop_exit_edges (loop
);
1333 FOR_EACH_VEC_ELT (exits
, i
, exit
)
1334 if ((exit
->flags
& EDGE_ABNORMAL
)
1335 && exit
->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
1344 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1345 references in the loop. */
1348 mark_nontemporal_stores (struct loop
*loop
, struct mem_ref_group
*groups
)
1350 struct mem_ref
*ref
;
1353 if (!may_use_storent_in_loop_p (loop
))
1356 for (; groups
; groups
= groups
->next
)
1357 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1358 any
|= mark_nontemporal_store (ref
);
1360 if (any
&& FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1361 emit_mfence_after_loop (loop
);
1364 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1365 this is the case, fill in DESC by the description of number of
1369 should_unroll_loop_p (struct loop
*loop
, struct tree_niter_desc
*desc
,
1372 if (!can_unroll_loop_p (loop
, factor
, desc
))
1375 /* We only consider loops without control flow for unrolling. This is not
1376 a hard restriction -- tree_unroll_loop works with arbitrary loops
1377 as well; but the unrolling/prefetching is usually more profitable for
1378 loops consisting of a single basic block, and we want to limit the
1380 if (loop
->num_nodes
> 2)
1386 /* Determine the coefficient by that unroll LOOP, from the information
1387 contained in the list of memory references REFS. Description of
1388 number of iterations of LOOP is stored to DESC. NINSNS is the number of
1389 insns of the LOOP. EST_NITER is the estimated number of iterations of
1390 the loop, or -1 if no estimate is available. */
1393 determine_unroll_factor (struct loop
*loop
, struct mem_ref_group
*refs
,
1394 unsigned ninsns
, struct tree_niter_desc
*desc
,
1395 HOST_WIDE_INT est_niter
)
1397 unsigned upper_bound
;
1398 unsigned nfactor
, factor
, mod_constraint
;
1399 struct mem_ref_group
*agp
;
1400 struct mem_ref
*ref
;
1402 /* First check whether the loop is not too large to unroll. We ignore
1403 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1404 from unrolling them enough to make exactly one cache line covered by each
1405 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1406 us from unrolling the loops too many times in cases where we only expect
1407 gains from better scheduling and decreasing loop overhead, which is not
1409 upper_bound
= PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS
) / ninsns
;
1411 /* If we unrolled the loop more times than it iterates, the unrolled version
1412 of the loop would be never entered. */
1413 if (est_niter
>= 0 && est_niter
< (HOST_WIDE_INT
) upper_bound
)
1414 upper_bound
= est_niter
;
1416 if (upper_bound
<= 1)
1419 /* Choose the factor so that we may prefetch each cache just once,
1420 but bound the unrolling by UPPER_BOUND. */
1422 for (agp
= refs
; agp
; agp
= agp
->next
)
1423 for (ref
= agp
->refs
; ref
; ref
= ref
->next
)
1424 if (should_issue_prefetch_p (ref
))
1426 mod_constraint
= ref
->prefetch_mod
;
1427 nfactor
= least_common_multiple (mod_constraint
, factor
);
1428 if (nfactor
<= upper_bound
)
1432 if (!should_unroll_loop_p (loop
, desc
, factor
))
1438 /* Returns the total volume of the memory references REFS, taking into account
1439 reuses in the innermost loop and cache line size. TODO -- we should also
1440 take into account reuses across the iterations of the loops in the loop
1444 volume_of_references (struct mem_ref_group
*refs
)
1446 unsigned volume
= 0;
1447 struct mem_ref_group
*gr
;
1448 struct mem_ref
*ref
;
1450 for (gr
= refs
; gr
; gr
= gr
->next
)
1451 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1453 /* Almost always reuses another value? */
1454 if (ref
->prefetch_before
!= PREFETCH_ALL
)
1457 /* If several iterations access the same cache line, use the size of
1458 the line divided by this number. Otherwise, a cache line is
1459 accessed in each iteration. TODO -- in the latter case, we should
1460 take the size of the reference into account, rounding it up on cache
1461 line size multiple. */
1462 volume
+= L1_CACHE_LINE_SIZE
/ ref
->prefetch_mod
;
1467 /* Returns the volume of memory references accessed across VEC iterations of
1468 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1469 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1472 volume_of_dist_vector (lambda_vector vec
, unsigned *loop_sizes
, unsigned n
)
1476 for (i
= 0; i
< n
; i
++)
1483 gcc_assert (vec
[i
] > 0);
1485 /* We ignore the parts of the distance vector in subloops, since usually
1486 the numbers of iterations are much smaller. */
1487 return loop_sizes
[i
] * vec
[i
];
1490 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1491 at the position corresponding to the loop of the step. N is the depth
1492 of the considered loop nest, and, LOOP is its innermost loop. */
1495 add_subscript_strides (tree access_fn
, unsigned stride
,
1496 HOST_WIDE_INT
*strides
, unsigned n
, struct loop
*loop
)
1500 HOST_WIDE_INT astep
;
1501 unsigned min_depth
= loop_depth (loop
) - n
;
1503 while (TREE_CODE (access_fn
) == POLYNOMIAL_CHREC
)
1505 aloop
= get_chrec_loop (access_fn
);
1506 step
= CHREC_RIGHT (access_fn
);
1507 access_fn
= CHREC_LEFT (access_fn
);
1509 if ((unsigned) loop_depth (aloop
) <= min_depth
)
1512 if (tree_fits_shwi_p (step
))
1513 astep
= tree_to_shwi (step
);
1515 astep
= L1_CACHE_LINE_SIZE
;
1517 strides
[n
- 1 - loop_depth (loop
) + loop_depth (aloop
)] += astep
* stride
;
1522 /* Returns the volume of memory references accessed between two consecutive
1523 self-reuses of the reference DR. We consider the subscripts of DR in N
1524 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1525 loops. LOOP is the innermost loop of the current loop nest. */
1528 self_reuse_distance (data_reference_p dr
, unsigned *loop_sizes
, unsigned n
,
1531 tree stride
, access_fn
;
1532 HOST_WIDE_INT
*strides
, astride
;
1533 vec
<tree
> access_fns
;
1534 tree ref
= DR_REF (dr
);
1535 unsigned i
, ret
= ~0u;
1537 /* In the following example:
1539 for (i = 0; i < N; i++)
1540 for (j = 0; j < N; j++)
1542 the same cache line is accessed each N steps (except if the change from
1543 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1544 we cannot rely purely on the results of the data dependence analysis.
1546 Instead, we compute the stride of the reference in each loop, and consider
1547 the innermost loop in that the stride is less than cache size. */
1549 strides
= XCNEWVEC (HOST_WIDE_INT
, n
);
1550 access_fns
= DR_ACCESS_FNS (dr
);
1552 FOR_EACH_VEC_ELT (access_fns
, i
, access_fn
)
1554 /* Keep track of the reference corresponding to the subscript, so that we
1556 while (handled_component_p (ref
) && TREE_CODE (ref
) != ARRAY_REF
)
1557 ref
= TREE_OPERAND (ref
, 0);
1559 if (TREE_CODE (ref
) == ARRAY_REF
)
1561 stride
= TYPE_SIZE_UNIT (TREE_TYPE (ref
));
1562 if (tree_fits_uhwi_p (stride
))
1563 astride
= tree_to_uhwi (stride
);
1565 astride
= L1_CACHE_LINE_SIZE
;
1567 ref
= TREE_OPERAND (ref
, 0);
1572 add_subscript_strides (access_fn
, astride
, strides
, n
, loop
);
1575 for (i
= n
; i
-- > 0; )
1577 unsigned HOST_WIDE_INT s
;
1579 s
= strides
[i
] < 0 ? -strides
[i
] : strides
[i
];
1581 if (s
< (unsigned) L1_CACHE_LINE_SIZE
1583 > (unsigned) (L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)))
1585 ret
= loop_sizes
[i
];
1594 /* Determines the distance till the first reuse of each reference in REFS
1595 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1596 memory references in the loop. Return false if the analysis fails. */
1599 determine_loop_nest_reuse (struct loop
*loop
, struct mem_ref_group
*refs
,
1602 struct loop
*nest
, *aloop
;
1603 vec
<data_reference_p
> datarefs
= vNULL
;
1604 vec
<ddr_p
> dependences
= vNULL
;
1605 struct mem_ref_group
*gr
;
1606 struct mem_ref
*ref
, *refb
;
1607 auto_vec
<loop_p
> vloops
;
1608 unsigned *loop_data_size
;
1610 unsigned volume
, dist
, adist
;
1612 data_reference_p dr
;
1618 /* Find the outermost loop of the loop nest of loop (we require that
1619 there are no sibling loops inside the nest). */
1623 aloop
= loop_outer (nest
);
1625 if (aloop
== current_loops
->tree_root
1626 || aloop
->inner
->next
)
1632 /* For each loop, determine the amount of data accessed in each iteration.
1633 We use this to estimate whether the reference is evicted from the
1634 cache before its reuse. */
1635 find_loop_nest (nest
, &vloops
);
1636 n
= vloops
.length ();
1637 loop_data_size
= XNEWVEC (unsigned, n
);
1638 volume
= volume_of_references (refs
);
1642 loop_data_size
[i
] = volume
;
1643 /* Bound the volume by the L2 cache size, since above this bound,
1644 all dependence distances are equivalent. */
1645 if (volume
> L2_CACHE_SIZE_BYTES
)
1649 vol
= estimated_stmt_executions_int (aloop
);
1651 vol
= expected_loop_iterations (aloop
);
1655 /* Prepare the references in the form suitable for data dependence
1656 analysis. We ignore unanalyzable data references (the results
1657 are used just as a heuristics to estimate temporality of the
1658 references, hence we do not need to worry about correctness). */
1659 for (gr
= refs
; gr
; gr
= gr
->next
)
1660 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1662 dr
= create_data_ref (loop_preheader_edge (nest
),
1663 loop_containing_stmt (ref
->stmt
),
1664 ref
->mem
, ref
->stmt
, !ref
->write_p
, false);
1668 ref
->reuse_distance
= volume
;
1670 datarefs
.safe_push (dr
);
1673 no_other_refs
= false;
1676 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
1678 dist
= self_reuse_distance (dr
, loop_data_size
, n
, loop
);
1679 ref
= (struct mem_ref
*) dr
->aux
;
1680 if (ref
->reuse_distance
> dist
)
1681 ref
->reuse_distance
= dist
;
1684 ref
->independent_p
= true;
1687 if (!compute_all_dependences (datarefs
, &dependences
, vloops
, true))
1690 FOR_EACH_VEC_ELT (dependences
, i
, dep
)
1692 if (DDR_ARE_DEPENDENT (dep
) == chrec_known
)
1695 ref
= (struct mem_ref
*) DDR_A (dep
)->aux
;
1696 refb
= (struct mem_ref
*) DDR_B (dep
)->aux
;
1698 if (DDR_ARE_DEPENDENT (dep
) == chrec_dont_know
1699 || DDR_COULD_BE_INDEPENDENT_P (dep
)
1700 || DDR_NUM_DIST_VECTS (dep
) == 0)
1702 /* If the dependence cannot be analyzed, assume that there might be
1706 ref
->independent_p
= false;
1707 refb
->independent_p
= false;
1711 /* The distance vectors are normalized to be always lexicographically
1712 positive, hence we cannot tell just from them whether DDR_A comes
1713 before DDR_B or vice versa. However, it is not important,
1714 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1715 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1716 in cache (and marking it as nontemporal would not affect
1720 for (j
= 0; j
< DDR_NUM_DIST_VECTS (dep
); j
++)
1722 adist
= volume_of_dist_vector (DDR_DIST_VECT (dep
, j
),
1725 /* If this is a dependence in the innermost loop (i.e., the
1726 distances in all superloops are zero) and it is not
1727 the trivial self-dependence with distance zero, record that
1728 the references are not completely independent. */
1729 if (lambda_vector_zerop (DDR_DIST_VECT (dep
, j
), n
- 1)
1731 || DDR_DIST_VECT (dep
, j
)[n
-1] != 0))
1733 ref
->independent_p
= false;
1734 refb
->independent_p
= false;
1737 /* Ignore accesses closer than
1738 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1739 so that we use nontemporal prefetches e.g. if single memory
1740 location is accessed several times in a single iteration of
1742 if (adist
< L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)
1750 if (ref
->reuse_distance
> dist
)
1751 ref
->reuse_distance
= dist
;
1752 if (refb
->reuse_distance
> dist
)
1753 refb
->reuse_distance
= dist
;
1756 free_dependence_relations (dependences
);
1757 free_data_refs (datarefs
);
1758 free (loop_data_size
);
1760 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1762 fprintf (dump_file
, "Reuse distances:\n");
1763 for (gr
= refs
; gr
; gr
= gr
->next
)
1764 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1765 fprintf (dump_file
, " reference %u:%u distance %u\n",
1766 ref
->group
->uid
, ref
->uid
, ref
->reuse_distance
);
1772 /* Determine whether or not the trip count to ahead ratio is too small based
1773 on prefitablility consideration.
1774 AHEAD: the iteration ahead distance,
1775 EST_NITER: the estimated trip count. */
1778 trip_count_to_ahead_ratio_too_small_p (unsigned ahead
, HOST_WIDE_INT est_niter
)
1780 /* Assume trip count to ahead ratio is big enough if the trip count could not
1781 be estimated at compile time. */
1785 if (est_niter
< (HOST_WIDE_INT
) (TRIP_COUNT_TO_AHEAD_RATIO
* ahead
))
1787 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1789 "Not prefetching -- loop estimated to roll only %d times\n",
1797 /* Determine whether or not the number of memory references in the loop is
1798 reasonable based on the profitablity and compilation time considerations.
1799 NINSNS: estimated number of instructions in the loop,
1800 MEM_REF_COUNT: total number of memory references in the loop. */
1803 mem_ref_count_reasonable_p (unsigned ninsns
, unsigned mem_ref_count
)
1805 int insn_to_mem_ratio
;
1807 if (mem_ref_count
== 0)
1810 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1811 (compute_all_dependences) have high costs based on quadratic complexity.
1812 To avoid huge compilation time, we give up prefetching if mem_ref_count
1814 if (mem_ref_count
> PREFETCH_MAX_MEM_REFS_PER_LOOP
)
1817 /* Prefetching improves performance by overlapping cache missing
1818 memory accesses with CPU operations. If the loop does not have
1819 enough CPU operations to overlap with memory operations, prefetching
1820 won't give a significant benefit. One approximate way of checking
1821 this is to require the ratio of instructions to memory references to
1822 be above a certain limit. This approximation works well in practice.
1823 TODO: Implement a more precise computation by estimating the time
1824 for each CPU or memory op in the loop. Time estimates for memory ops
1825 should account for cache misses. */
1826 insn_to_mem_ratio
= ninsns
/ mem_ref_count
;
1828 if (insn_to_mem_ratio
< PREFETCH_MIN_INSN_TO_MEM_RATIO
)
1830 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1832 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1840 /* Determine whether or not the instruction to prefetch ratio in the loop is
1841 too small based on the profitablity consideration.
1842 NINSNS: estimated number of instructions in the loop,
1843 PREFETCH_COUNT: an estimate of the number of prefetches,
1844 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1847 insn_to_prefetch_ratio_too_small_p (unsigned ninsns
, unsigned prefetch_count
,
1848 unsigned unroll_factor
)
1850 int insn_to_prefetch_ratio
;
1852 /* Prefetching most likely causes performance degradation when the instruction
1853 to prefetch ratio is too small. Too many prefetch instructions in a loop
1854 may reduce the I-cache performance.
1855 (unroll_factor * ninsns) is used to estimate the number of instructions in
1856 the unrolled loop. This implementation is a bit simplistic -- the number
1857 of issued prefetch instructions is also affected by unrolling. So,
1858 prefetch_mod and the unroll factor should be taken into account when
1859 determining prefetch_count. Also, the number of insns of the unrolled
1860 loop will usually be significantly smaller than the number of insns of the
1861 original loop * unroll_factor (at least the induction variable increases
1862 and the exit branches will get eliminated), so it might be better to use
1863 tree_estimate_loop_size + estimated_unrolled_size. */
1864 insn_to_prefetch_ratio
= (unroll_factor
* ninsns
) / prefetch_count
;
1865 if (insn_to_prefetch_ratio
< MIN_INSN_TO_PREFETCH_RATIO
)
1867 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1869 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1870 insn_to_prefetch_ratio
);
1878 /* Issue prefetch instructions for array references in LOOP. Returns
1879 true if the LOOP was unrolled. */
1882 loop_prefetch_arrays (struct loop
*loop
)
1884 struct mem_ref_group
*refs
;
1885 unsigned ahead
, ninsns
, time
, unroll_factor
;
1886 HOST_WIDE_INT est_niter
;
1887 struct tree_niter_desc desc
;
1888 bool unrolled
= false, no_other_refs
;
1889 unsigned prefetch_count
;
1890 unsigned mem_ref_count
;
1892 if (optimize_loop_nest_for_size_p (loop
))
1894 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1895 fprintf (dump_file
, " ignored (cold area)\n");
1899 /* FIXME: the time should be weighted by the probabilities of the blocks in
1901 time
= tree_num_loop_insns (loop
, &eni_time_weights
);
1905 ahead
= (PREFETCH_LATENCY
+ time
- 1) / time
;
1906 est_niter
= estimated_stmt_executions_int (loop
);
1907 if (est_niter
== -1)
1908 est_niter
= likely_max_stmt_executions_int (loop
);
1910 /* Prefetching is not likely to be profitable if the trip count to ahead
1911 ratio is too small. */
1912 if (trip_count_to_ahead_ratio_too_small_p (ahead
, est_niter
))
1915 ninsns
= tree_num_loop_insns (loop
, &eni_size_weights
);
1917 /* Step 1: gather the memory references. */
1918 refs
= gather_memory_references (loop
, &no_other_refs
, &mem_ref_count
);
1920 /* Give up prefetching if the number of memory references in the
1921 loop is not reasonable based on profitablity and compilation time
1923 if (!mem_ref_count_reasonable_p (ninsns
, mem_ref_count
))
1926 /* Step 2: estimate the reuse effects. */
1927 prune_by_reuse (refs
);
1929 if (nothing_to_prefetch_p (refs
))
1932 if (!determine_loop_nest_reuse (loop
, refs
, no_other_refs
))
1935 /* Step 3: determine unroll factor. */
1936 unroll_factor
= determine_unroll_factor (loop
, refs
, ninsns
, &desc
,
1939 /* Estimate prefetch count for the unrolled loop. */
1940 prefetch_count
= estimate_prefetch_count (refs
, unroll_factor
);
1941 if (prefetch_count
== 0)
1944 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1945 fprintf (dump_file
, "Ahead %d, unroll factor %d, trip count "
1946 HOST_WIDE_INT_PRINT_DEC
"\n"
1947 "insn count %d, mem ref count %d, prefetch count %d\n",
1948 ahead
, unroll_factor
, est_niter
,
1949 ninsns
, mem_ref_count
, prefetch_count
);
1951 /* Prefetching is not likely to be profitable if the instruction to prefetch
1952 ratio is too small. */
1953 if (insn_to_prefetch_ratio_too_small_p (ninsns
, prefetch_count
,
1957 mark_nontemporal_stores (loop
, refs
);
1959 /* Step 4: what to prefetch? */
1960 if (!schedule_prefetches (refs
, unroll_factor
, ahead
))
1963 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1964 iterations so that we do not issue superfluous prefetches. */
1965 if (unroll_factor
!= 1)
1967 tree_unroll_loop (loop
, unroll_factor
,
1968 single_dom_exit (loop
), &desc
);
1972 /* Step 6: issue the prefetches. */
1973 issue_prefetches (refs
, unroll_factor
, ahead
);
1976 release_mem_refs (refs
);
1980 /* Issue prefetch instructions for array references in loops. */
1983 tree_ssa_prefetch_arrays (void)
1986 bool unrolled
= false;
1989 if (!targetm
.have_prefetch ()
1990 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1991 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1992 of processor costs and i486 does not have prefetch, but
1993 -march=pentium4 causes targetm.have_prefetch to be true. Ugh. */
1994 || PREFETCH_BLOCK
== 0)
1997 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1999 fprintf (dump_file
, "Prefetching parameters:\n");
2000 fprintf (dump_file
, " simultaneous prefetches: %d\n",
2001 SIMULTANEOUS_PREFETCHES
);
2002 fprintf (dump_file
, " prefetch latency: %d\n", PREFETCH_LATENCY
);
2003 fprintf (dump_file
, " prefetch block size: %d\n", PREFETCH_BLOCK
);
2004 fprintf (dump_file
, " L1 cache size: %d lines, %d kB\n",
2005 L1_CACHE_SIZE_BYTES
/ L1_CACHE_LINE_SIZE
, L1_CACHE_SIZE
);
2006 fprintf (dump_file
, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE
);
2007 fprintf (dump_file
, " L2 cache size: %d kB\n", L2_CACHE_SIZE
);
2008 fprintf (dump_file
, " min insn-to-prefetch ratio: %d \n",
2009 MIN_INSN_TO_PREFETCH_RATIO
);
2010 fprintf (dump_file
, " min insn-to-mem ratio: %d \n",
2011 PREFETCH_MIN_INSN_TO_MEM_RATIO
);
2012 fprintf (dump_file
, "\n");
2015 initialize_original_copy_tables ();
2017 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH
))
2019 tree type
= build_function_type_list (void_type_node
,
2020 const_ptr_type_node
, NULL_TREE
);
2021 tree decl
= add_builtin_function ("__builtin_prefetch", type
,
2022 BUILT_IN_PREFETCH
, BUILT_IN_NORMAL
,
2024 DECL_IS_NOVOPS (decl
) = true;
2025 set_builtin_decl (BUILT_IN_PREFETCH
, decl
, false);
2028 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
2030 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2031 fprintf (dump_file
, "Processing loop %d:\n", loop
->num
);
2033 unrolled
|= loop_prefetch_arrays (loop
);
2035 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2036 fprintf (dump_file
, "\n\n");
2042 todo_flags
|= TODO_cleanup_cfg
;
2045 free_original_copy_tables ();
2053 const pass_data pass_data_loop_prefetch
=
2055 GIMPLE_PASS
, /* type */
2056 "aprefetch", /* name */
2057 OPTGROUP_LOOP
, /* optinfo_flags */
2058 TV_TREE_PREFETCH
, /* tv_id */
2059 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2060 0, /* properties_provided */
2061 0, /* properties_destroyed */
2062 0, /* todo_flags_start */
2063 0, /* todo_flags_finish */
2066 class pass_loop_prefetch
: public gimple_opt_pass
2069 pass_loop_prefetch (gcc::context
*ctxt
)
2070 : gimple_opt_pass (pass_data_loop_prefetch
, ctxt
)
2073 /* opt_pass methods: */
2074 virtual bool gate (function
*) { return flag_prefetch_loop_arrays
> 0; }
2075 virtual unsigned int execute (function
*);
2077 }; // class pass_loop_prefetch
2080 pass_loop_prefetch::execute (function
*fun
)
2082 if (number_of_loops (fun
) <= 1)
2085 if ((PREFETCH_BLOCK
& (PREFETCH_BLOCK
- 1)) != 0)
2087 static bool warned
= false;
2091 warning (OPT_Wdisabled_optimization
,
2092 "%<l1-cache-size%> parameter is not a power of two %d",
2099 return tree_ssa_prefetch_arrays ();
2105 make_pass_loop_prefetch (gcc::context
*ctxt
)
2107 return new pass_loop_prefetch (ctxt
);