2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
26 #include "basic-block.h"
27 #include "tree-pretty-print.h"
28 #include "tree-flow.h"
30 #include "tree-pass.h"
31 #include "insn-config.h"
33 #include "tree-chrec.h"
34 #include "tree-scalar-evolution.h"
35 #include "diagnostic-core.h"
37 #include "langhooks.h"
38 #include "tree-inline.h"
39 #include "tree-data-ref.h"
42 /* FIXME: Needed for optabs, but this should all be moved to a TBD interface
43 between the GIMPLE and RTL worlds. */
48 /* This pass inserts prefetch instructions to optimize cache usage during
49 accesses to arrays in loops. It processes loops sequentially and:
51 1) Gathers all memory references in the single loop.
52 2) For each of the references it decides when it is profitable to prefetch
53 it. To do it, we evaluate the reuse among the accesses, and determines
54 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
55 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
56 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
57 iterations of the loop that are zero modulo PREFETCH_MOD). For example
58 (assuming cache line size is 64 bytes, char has size 1 byte and there
59 is no hardware sequential prefetch):
62 for (i = 0; i < max; i++)
69 a[187*i + 50] = ...; (5)
72 (0) obviously has PREFETCH_BEFORE 1
73 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
74 location 64 iterations before it, and PREFETCH_MOD 64 (since
75 it hits the same cache line otherwise).
76 (2) has PREFETCH_MOD 64
77 (3) has PREFETCH_MOD 4
78 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
79 the cache line accessed by (5) is the same with probability only
81 (5) has PREFETCH_MOD 1 as well.
83 Additionally, we use data dependence analysis to determine for each
84 reference the distance till the first reuse; this information is used
85 to determine the temporality of the issued prefetch instruction.
87 3) We determine how much ahead we need to prefetch. The number of
88 iterations needed is time to fetch / time spent in one iteration of
89 the loop. The problem is that we do not know either of these values,
90 so we just make a heuristic guess based on a magic (possibly)
91 target-specific constant and size of the loop.
93 4) Determine which of the references we prefetch. We take into account
94 that there is a maximum number of simultaneous prefetches (provided
95 by machine description). We prefetch as many prefetches as possible
96 while still within this bound (starting with those with lowest
97 prefetch_mod, since they are responsible for most of the cache
100 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
101 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
102 prefetching nonaccessed memory.
103 TODO -- actually implement peeling.
105 6) We actually emit the prefetch instructions. ??? Perhaps emit the
106 prefetch instructions with guards in cases where 5) was not sufficient
107 to satisfy the constraints?
109 A cost model is implemented to determine whether or not prefetching is
110 profitable for a given loop. The cost model has three heuristics:
112 1. Function trip_count_to_ahead_ratio_too_small_p implements a
113 heuristic that determines whether or not the loop has too few
114 iterations (compared to ahead). Prefetching is not likely to be
115 beneficial if the trip count to ahead ratio is below a certain
118 2. Function mem_ref_count_reasonable_p implements a heuristic that
119 determines whether the given loop has enough CPU ops that can be
120 overlapped with cache missing memory ops. If not, the loop
121 won't benefit from prefetching. In the implementation,
122 prefetching is not considered beneficial if the ratio between
123 the instruction count and the mem ref count is below a certain
126 3. Function insn_to_prefetch_ratio_too_small_p implements a
127 heuristic that disables prefetching in a loop if the prefetching
128 cost is above a certain limit. The relative prefetching cost is
129 estimated by taking the ratio between the prefetch count and the
130 total intruction count (this models the I-cache cost).
132 The limits used in these heuristics are defined as parameters with
133 reasonable default values. Machine-specific default values will be
137 -- write and use more general reuse analysis (that could be also used
138 in other cache aimed loop optimizations)
139 -- make it behave sanely together with the prefetches given by user
140 (now we just ignore them; at the very least we should avoid
141 optimizing loops in that user put his own prefetches)
142 -- we assume cache line size alignment of arrays; this could be
145 /* Magic constants follow. These should be replaced by machine specific
148 /* True if write can be prefetched by a read prefetch. */
150 #ifndef WRITE_CAN_USE_READ_PREFETCH
151 #define WRITE_CAN_USE_READ_PREFETCH 1
154 /* True if read can be prefetched by a write prefetch. */
156 #ifndef READ_CAN_USE_WRITE_PREFETCH
157 #define READ_CAN_USE_WRITE_PREFETCH 0
160 /* The size of the block loaded by a single prefetch. Usually, this is
161 the same as cache line size (at the moment, we only consider one level
162 of cache hierarchy). */
164 #ifndef PREFETCH_BLOCK
165 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
168 /* Do we have a forward hardware sequential prefetching? */
170 #ifndef HAVE_FORWARD_PREFETCH
171 #define HAVE_FORWARD_PREFETCH 0
174 /* Do we have a backward hardware sequential prefetching? */
176 #ifndef HAVE_BACKWARD_PREFETCH
177 #define HAVE_BACKWARD_PREFETCH 0
180 /* In some cases we are only able to determine that there is a certain
181 probability that the two accesses hit the same cache line. In this
182 case, we issue the prefetches for both of them if this probability
183 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
185 #ifndef ACCEPTABLE_MISS_RATE
186 #define ACCEPTABLE_MISS_RATE 50
189 #ifndef HAVE_prefetch
190 #define HAVE_prefetch 0
193 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
194 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
196 /* We consider a memory access nontemporal if it is not reused sooner than
197 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
198 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
199 so that we use nontemporal prefetches e.g. if single memory location
200 is accessed several times in a single iteration of the loop. */
201 #define NONTEMPORAL_FRACTION 16
203 /* In case we have to emit a memory fence instruction after the loop that
204 uses nontemporal stores, this defines the builtin to use. */
206 #ifndef FENCE_FOLLOWING_MOVNT
207 #define FENCE_FOLLOWING_MOVNT NULL_TREE
210 /* It is not profitable to prefetch when the trip count is not at
211 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
212 For example, in a loop with a prefetch ahead distance of 10,
213 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
214 profitable to prefetch when the trip count is greater or equal to
215 40. In that case, 30 out of the 40 iterations will benefit from
218 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
219 #define TRIP_COUNT_TO_AHEAD_RATIO 4
222 /* The group of references between that reuse may occur. */
226 tree base
; /* Base of the reference. */
227 tree step
; /* Step of the reference. */
228 struct mem_ref
*refs
; /* References in the group. */
229 struct mem_ref_group
*next
; /* Next group of references. */
232 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
234 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
236 /* Do not generate a prefetch if the unroll factor is significantly less
237 than what is required by the prefetch. This is to avoid redundant
238 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
239 2, prefetching requires unrolling the loop 16 times, but
240 the loop is actually unrolled twice. In this case (ratio = 8),
241 prefetching is not likely to be beneficial. */
243 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
244 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
247 /* Some of the prefetch computations have quadratic complexity. We want to
248 avoid huge compile times and, therefore, want to limit the amount of
249 memory references per loop where we consider prefetching. */
251 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
252 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
255 /* The memory reference. */
259 gimple stmt
; /* Statement in that the reference appears. */
260 tree mem
; /* The reference. */
261 HOST_WIDE_INT delta
; /* Constant offset of the reference. */
262 struct mem_ref_group
*group
; /* The group of references it belongs to. */
263 unsigned HOST_WIDE_INT prefetch_mod
;
264 /* Prefetch only each PREFETCH_MOD-th
266 unsigned HOST_WIDE_INT prefetch_before
;
267 /* Prefetch only first PREFETCH_BEFORE
269 unsigned reuse_distance
; /* The amount of data accessed before the first
270 reuse of this value. */
271 struct mem_ref
*next
; /* The next reference in the group. */
272 unsigned write_p
: 1; /* Is it a write? */
273 unsigned independent_p
: 1; /* True if the reference is independent on
274 all other references inside the loop. */
275 unsigned issue_prefetch_p
: 1; /* Should we really issue the prefetch? */
276 unsigned storent_p
: 1; /* True if we changed the store to a
280 /* Dumps information about memory reference */
282 dump_mem_details (FILE *file
, tree base
, tree step
,
283 HOST_WIDE_INT delta
, bool write_p
)
285 fprintf (file
, "(base ");
286 print_generic_expr (file
, base
, TDF_SLIM
);
287 fprintf (file
, ", step ");
288 if (cst_and_fits_in_hwi (step
))
289 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, int_cst_value (step
));
291 print_generic_expr (file
, step
, TDF_TREE
);
292 fprintf (file
, ")\n");
293 fprintf (file
, " delta ");
294 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, delta
);
295 fprintf (file
, "\n");
296 fprintf (file
, " %s\n", write_p
? "write" : "read");
297 fprintf (file
, "\n");
300 /* Dumps information about reference REF to FILE. */
303 dump_mem_ref (FILE *file
, struct mem_ref
*ref
)
305 fprintf (file
, "Reference %p:\n", (void *) ref
);
307 fprintf (file
, " group %p ", (void *) ref
->group
);
309 dump_mem_details (file
, ref
->group
->base
, ref
->group
->step
, ref
->delta
,
313 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
316 static struct mem_ref_group
*
317 find_or_create_group (struct mem_ref_group
**groups
, tree base
, tree step
)
319 struct mem_ref_group
*group
;
321 for (; *groups
; groups
= &(*groups
)->next
)
323 if (operand_equal_p ((*groups
)->step
, step
, 0)
324 && operand_equal_p ((*groups
)->base
, base
, 0))
327 /* If step is an integer constant, keep the list of groups sorted
328 by decreasing step. */
329 if (cst_and_fits_in_hwi ((*groups
)->step
) && cst_and_fits_in_hwi (step
)
330 && int_cst_value ((*groups
)->step
) < int_cst_value (step
))
334 group
= XNEW (struct mem_ref_group
);
338 group
->next
= *groups
;
344 /* Records a memory reference MEM in GROUP with offset DELTA and write status
345 WRITE_P. The reference occurs in statement STMT. */
348 record_ref (struct mem_ref_group
*group
, gimple stmt
, tree mem
,
349 HOST_WIDE_INT delta
, bool write_p
)
351 struct mem_ref
**aref
;
353 /* Do not record the same address twice. */
354 for (aref
= &group
->refs
; *aref
; aref
= &(*aref
)->next
)
356 /* It does not have to be possible for write reference to reuse the read
357 prefetch, or vice versa. */
358 if (!WRITE_CAN_USE_READ_PREFETCH
360 && !(*aref
)->write_p
)
362 if (!READ_CAN_USE_WRITE_PREFETCH
367 if ((*aref
)->delta
== delta
)
371 (*aref
) = XNEW (struct mem_ref
);
372 (*aref
)->stmt
= stmt
;
374 (*aref
)->delta
= delta
;
375 (*aref
)->write_p
= write_p
;
376 (*aref
)->prefetch_before
= PREFETCH_ALL
;
377 (*aref
)->prefetch_mod
= 1;
378 (*aref
)->reuse_distance
= 0;
379 (*aref
)->issue_prefetch_p
= false;
380 (*aref
)->group
= group
;
381 (*aref
)->next
= NULL
;
382 (*aref
)->independent_p
= false;
383 (*aref
)->storent_p
= false;
385 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
386 dump_mem_ref (dump_file
, *aref
);
389 /* Release memory references in GROUPS. */
392 release_mem_refs (struct mem_ref_group
*groups
)
394 struct mem_ref_group
*next_g
;
395 struct mem_ref
*ref
, *next_r
;
397 for (; groups
; groups
= next_g
)
399 next_g
= groups
->next
;
400 for (ref
= groups
->refs
; ref
; ref
= next_r
)
409 /* A structure used to pass arguments to idx_analyze_ref. */
413 struct loop
*loop
; /* Loop of the reference. */
414 gimple stmt
; /* Statement of the reference. */
415 tree
*step
; /* Step of the memory reference. */
416 HOST_WIDE_INT
*delta
; /* Offset of the memory reference. */
419 /* Analyzes a single INDEX of a memory reference to obtain information
420 described at analyze_ref. Callback for for_each_index. */
423 idx_analyze_ref (tree base
, tree
*index
, void *data
)
425 struct ar_data
*ar_data
= (struct ar_data
*) data
;
426 tree ibase
, step
, stepsize
;
427 HOST_WIDE_INT idelta
= 0, imult
= 1;
430 if (!simple_iv (ar_data
->loop
, loop_containing_stmt (ar_data
->stmt
),
436 if (TREE_CODE (ibase
) == POINTER_PLUS_EXPR
437 && cst_and_fits_in_hwi (TREE_OPERAND (ibase
, 1)))
439 idelta
= int_cst_value (TREE_OPERAND (ibase
, 1));
440 ibase
= TREE_OPERAND (ibase
, 0);
442 if (cst_and_fits_in_hwi (ibase
))
444 idelta
+= int_cst_value (ibase
);
445 ibase
= build_int_cst (TREE_TYPE (ibase
), 0);
448 if (TREE_CODE (base
) == ARRAY_REF
)
450 stepsize
= array_ref_element_size (base
);
451 if (!cst_and_fits_in_hwi (stepsize
))
453 imult
= int_cst_value (stepsize
);
454 step
= fold_build2 (MULT_EXPR
, sizetype
,
455 fold_convert (sizetype
, step
),
456 fold_convert (sizetype
, stepsize
));
460 if (*ar_data
->step
== NULL_TREE
)
461 *ar_data
->step
= step
;
463 *ar_data
->step
= fold_build2 (PLUS_EXPR
, sizetype
,
464 fold_convert (sizetype
, *ar_data
->step
),
465 fold_convert (sizetype
, step
));
466 *ar_data
->delta
+= idelta
;
472 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
473 STEP are integer constants and iter is number of iterations of LOOP. The
474 reference occurs in statement STMT. Strips nonaddressable component
475 references from REF_P. */
478 analyze_ref (struct loop
*loop
, tree
*ref_p
, tree
*base
,
479 tree
*step
, HOST_WIDE_INT
*delta
,
482 struct ar_data ar_data
;
484 HOST_WIDE_INT bit_offset
;
490 /* First strip off the component references. Ignore bitfields.
491 Also strip off the real and imagine parts of a complex, so that
492 they can have the same base. */
493 if (TREE_CODE (ref
) == REALPART_EXPR
494 || TREE_CODE (ref
) == IMAGPART_EXPR
495 || (TREE_CODE (ref
) == COMPONENT_REF
496 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref
, 1))))
498 if (TREE_CODE (ref
) == IMAGPART_EXPR
)
499 *delta
+= int_size_in_bytes (TREE_TYPE (ref
));
500 ref
= TREE_OPERAND (ref
, 0);
505 for (; TREE_CODE (ref
) == COMPONENT_REF
; ref
= TREE_OPERAND (ref
, 0))
507 off
= DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref
, 1));
508 bit_offset
= TREE_INT_CST_LOW (off
);
509 gcc_assert (bit_offset
% BITS_PER_UNIT
== 0);
511 *delta
+= bit_offset
/ BITS_PER_UNIT
;
514 *base
= unshare_expr (ref
);
518 ar_data
.delta
= delta
;
519 return for_each_index (base
, idx_analyze_ref
, &ar_data
);
522 /* Record a memory reference REF to the list REFS. The reference occurs in
523 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
524 reference was recorded, false otherwise. */
527 gather_memory_references_ref (struct loop
*loop
, struct mem_ref_group
**refs
,
528 tree ref
, bool write_p
, gimple stmt
)
532 struct mem_ref_group
*agrp
;
534 if (get_base_address (ref
) == NULL
)
537 if (!analyze_ref (loop
, &ref
, &base
, &step
, &delta
, stmt
))
539 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
540 if (step
== NULL_TREE
)
543 /* Stop if the address of BASE could not be taken. */
544 if (may_be_nonaddressable_p (base
))
547 /* Limit non-constant step prefetching only to the innermost loops and
548 only when the step is loop invariant in the entire loop nest. */
549 if (!cst_and_fits_in_hwi (step
))
551 if (loop
->inner
!= NULL
)
553 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
555 fprintf (dump_file
, "Memory expression %p\n",(void *) ref
);
556 print_generic_expr (dump_file
, ref
, TDF_TREE
);
557 fprintf (dump_file
,":");
558 dump_mem_details( dump_file
, base
, step
, delta
, write_p
);
560 "Ignoring %p, non-constant step prefetching is "
561 "limited to inner most loops \n",
568 if (!expr_invariant_in_loop_p (loop_outermost (loop
), step
))
570 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
572 fprintf (dump_file
, "Memory expression %p\n",(void *) ref
);
573 print_generic_expr (dump_file
, ref
, TDF_TREE
);
574 fprintf (dump_file
,":");
575 dump_mem_details(dump_file
, base
, step
, delta
, write_p
);
577 "Not prefetching, ignoring %p due to "
578 "loop variant step\n",
586 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
587 are integer constants. */
588 agrp
= find_or_create_group (refs
, base
, step
);
589 record_ref (agrp
, stmt
, ref
, delta
, write_p
);
594 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
595 true if there are no other memory references inside the loop. */
597 static struct mem_ref_group
*
598 gather_memory_references (struct loop
*loop
, bool *no_other_refs
, unsigned *ref_count
)
600 basic_block
*body
= get_loop_body_in_dom_order (loop
);
603 gimple_stmt_iterator bsi
;
606 struct mem_ref_group
*refs
= NULL
;
608 *no_other_refs
= true;
611 /* Scan the loop body in order, so that the former references precede the
613 for (i
= 0; i
< loop
->num_nodes
; i
++)
616 if (bb
->loop_father
!= loop
)
619 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
621 stmt
= gsi_stmt (bsi
);
623 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
625 if (gimple_vuse (stmt
)
626 || (is_gimple_call (stmt
)
627 && !(gimple_call_flags (stmt
) & ECF_CONST
)))
628 *no_other_refs
= false;
632 lhs
= gimple_assign_lhs (stmt
);
633 rhs
= gimple_assign_rhs1 (stmt
);
635 if (REFERENCE_CLASS_P (rhs
))
637 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
641 if (REFERENCE_CLASS_P (lhs
))
643 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
654 /* Prune the prefetch candidate REF using the self-reuse. */
657 prune_ref_by_self_reuse (struct mem_ref
*ref
)
662 /* If the step size is non constant, we cannot calculate prefetch_mod. */
663 if (!cst_and_fits_in_hwi (ref
->group
->step
))
666 step
= int_cst_value (ref
->group
->step
);
672 /* Prefetch references to invariant address just once. */
673 ref
->prefetch_before
= 1;
680 if (step
> PREFETCH_BLOCK
)
683 if ((backward
&& HAVE_BACKWARD_PREFETCH
)
684 || (!backward
&& HAVE_FORWARD_PREFETCH
))
686 ref
->prefetch_before
= 1;
690 ref
->prefetch_mod
= PREFETCH_BLOCK
/ step
;
693 /* Divides X by BY, rounding down. */
696 ddown (HOST_WIDE_INT x
, unsigned HOST_WIDE_INT by
)
703 return (x
+ by
- 1) / by
;
706 /* Given a CACHE_LINE_SIZE and two inductive memory references
707 with a common STEP greater than CACHE_LINE_SIZE and an address
708 difference DELTA, compute the probability that they will fall
709 in different cache lines. Return true if the computed miss rate
710 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
711 number of distinct iterations after which the pattern repeats itself.
712 ALIGN_UNIT is the unit of alignment in bytes. */
715 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size
,
716 HOST_WIDE_INT step
, HOST_WIDE_INT delta
,
717 unsigned HOST_WIDE_INT distinct_iters
,
720 unsigned align
, iter
;
721 int total_positions
, miss_positions
, max_allowed_miss_positions
;
722 int address1
, address2
, cache_line1
, cache_line2
;
724 /* It always misses if delta is greater than or equal to the cache
726 if (delta
>= (HOST_WIDE_INT
) cache_line_size
)
730 total_positions
= (cache_line_size
/ align_unit
) * distinct_iters
;
731 max_allowed_miss_positions
= (ACCEPTABLE_MISS_RATE
* total_positions
) / 1000;
733 /* Iterate through all possible alignments of the first
734 memory reference within its cache line. */
735 for (align
= 0; align
< cache_line_size
; align
+= align_unit
)
737 /* Iterate through all distinct iterations. */
738 for (iter
= 0; iter
< distinct_iters
; iter
++)
740 address1
= align
+ step
* iter
;
741 address2
= address1
+ delta
;
742 cache_line1
= address1
/ cache_line_size
;
743 cache_line2
= address2
/ cache_line_size
;
744 if (cache_line1
!= cache_line2
)
747 if (miss_positions
> max_allowed_miss_positions
)
754 /* Prune the prefetch candidate REF using the reuse with BY.
755 If BY_IS_BEFORE is true, BY is before REF in the loop. */
758 prune_ref_by_group_reuse (struct mem_ref
*ref
, struct mem_ref
*by
,
763 HOST_WIDE_INT delta_r
= ref
->delta
, delta_b
= by
->delta
;
764 HOST_WIDE_INT delta
= delta_b
- delta_r
;
765 HOST_WIDE_INT hit_from
;
766 unsigned HOST_WIDE_INT prefetch_before
, prefetch_block
;
767 HOST_WIDE_INT reduced_step
;
768 unsigned HOST_WIDE_INT reduced_prefetch_block
;
772 /* If the step is non constant we cannot calculate prefetch_before. */
773 if (!cst_and_fits_in_hwi (ref
->group
->step
)) {
777 step
= int_cst_value (ref
->group
->step
);
784 /* If the references has the same address, only prefetch the
787 ref
->prefetch_before
= 0;
794 /* If the reference addresses are invariant and fall into the
795 same cache line, prefetch just the first one. */
799 if (ddown (ref
->delta
, PREFETCH_BLOCK
)
800 != ddown (by
->delta
, PREFETCH_BLOCK
))
803 ref
->prefetch_before
= 0;
807 /* Only prune the reference that is behind in the array. */
813 /* Transform the data so that we may assume that the accesses
817 delta_r
= PREFETCH_BLOCK
- 1 - delta_r
;
818 delta_b
= PREFETCH_BLOCK
- 1 - delta_b
;
826 /* Check whether the two references are likely to hit the same cache
827 line, and how distant the iterations in that it occurs are from
830 if (step
<= PREFETCH_BLOCK
)
832 /* The accesses are sure to meet. Let us check when. */
833 hit_from
= ddown (delta_b
, PREFETCH_BLOCK
) * PREFETCH_BLOCK
;
834 prefetch_before
= (hit_from
- delta_r
+ step
- 1) / step
;
836 /* Do not reduce prefetch_before if we meet beyond cache size. */
837 if (prefetch_before
> absu_hwi (L2_CACHE_SIZE_BYTES
/ step
))
838 prefetch_before
= PREFETCH_ALL
;
839 if (prefetch_before
< ref
->prefetch_before
)
840 ref
->prefetch_before
= prefetch_before
;
845 /* A more complicated case with step > prefetch_block. First reduce
846 the ratio between the step and the cache line size to its simplest
847 terms. The resulting denominator will then represent the number of
848 distinct iterations after which each address will go back to its
849 initial location within the cache line. This computation assumes
850 that PREFETCH_BLOCK is a power of two. */
851 prefetch_block
= PREFETCH_BLOCK
;
852 reduced_prefetch_block
= prefetch_block
;
854 while ((reduced_step
& 1) == 0
855 && reduced_prefetch_block
> 1)
858 reduced_prefetch_block
>>= 1;
861 prefetch_before
= delta
/ step
;
863 ref_type
= TREE_TYPE (ref
->mem
);
864 align_unit
= TYPE_ALIGN (ref_type
) / 8;
865 if (is_miss_rate_acceptable (prefetch_block
, step
, delta
,
866 reduced_prefetch_block
, align_unit
))
868 /* Do not reduce prefetch_before if we meet beyond cache size. */
869 if (prefetch_before
> L2_CACHE_SIZE_BYTES
/ PREFETCH_BLOCK
)
870 prefetch_before
= PREFETCH_ALL
;
871 if (prefetch_before
< ref
->prefetch_before
)
872 ref
->prefetch_before
= prefetch_before
;
877 /* Try also the following iteration. */
879 delta
= step
- delta
;
880 if (is_miss_rate_acceptable (prefetch_block
, step
, delta
,
881 reduced_prefetch_block
, align_unit
))
883 if (prefetch_before
< ref
->prefetch_before
)
884 ref
->prefetch_before
= prefetch_before
;
889 /* The ref probably does not reuse by. */
893 /* Prune the prefetch candidate REF using the reuses with other references
897 prune_ref_by_reuse (struct mem_ref
*ref
, struct mem_ref
*refs
)
899 struct mem_ref
*prune_by
;
902 prune_ref_by_self_reuse (ref
);
904 for (prune_by
= refs
; prune_by
; prune_by
= prune_by
->next
)
912 if (!WRITE_CAN_USE_READ_PREFETCH
914 && !prune_by
->write_p
)
916 if (!READ_CAN_USE_WRITE_PREFETCH
918 && prune_by
->write_p
)
921 prune_ref_by_group_reuse (ref
, prune_by
, before
);
925 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
928 prune_group_by_reuse (struct mem_ref_group
*group
)
930 struct mem_ref
*ref_pruned
;
932 for (ref_pruned
= group
->refs
; ref_pruned
; ref_pruned
= ref_pruned
->next
)
934 prune_ref_by_reuse (ref_pruned
, group
->refs
);
936 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
938 fprintf (dump_file
, "Reference %p:", (void *) ref_pruned
);
940 if (ref_pruned
->prefetch_before
== PREFETCH_ALL
941 && ref_pruned
->prefetch_mod
== 1)
942 fprintf (dump_file
, " no restrictions");
943 else if (ref_pruned
->prefetch_before
== 0)
944 fprintf (dump_file
, " do not prefetch");
945 else if (ref_pruned
->prefetch_before
<= ref_pruned
->prefetch_mod
)
946 fprintf (dump_file
, " prefetch once");
949 if (ref_pruned
->prefetch_before
!= PREFETCH_ALL
)
951 fprintf (dump_file
, " prefetch before ");
952 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
953 ref_pruned
->prefetch_before
);
955 if (ref_pruned
->prefetch_mod
!= 1)
957 fprintf (dump_file
, " prefetch mod ");
958 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
959 ref_pruned
->prefetch_mod
);
962 fprintf (dump_file
, "\n");
967 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
970 prune_by_reuse (struct mem_ref_group
*groups
)
972 for (; groups
; groups
= groups
->next
)
973 prune_group_by_reuse (groups
);
976 /* Returns true if we should issue prefetch for REF. */
979 should_issue_prefetch_p (struct mem_ref
*ref
)
981 /* For now do not issue prefetches for only first few of the
983 if (ref
->prefetch_before
!= PREFETCH_ALL
)
985 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
986 fprintf (dump_file
, "Ignoring %p due to prefetch_before\n",
991 /* Do not prefetch nontemporal stores. */
994 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
995 fprintf (dump_file
, "Ignoring nontemporal store %p\n", (void *) ref
);
1002 /* Decide which of the prefetch candidates in GROUPS to prefetch.
1003 AHEAD is the number of iterations to prefetch ahead (which corresponds
1004 to the number of simultaneous instances of one prefetch running at a
1005 time). UNROLL_FACTOR is the factor by that the loop is going to be
1006 unrolled. Returns true if there is anything to prefetch. */
1009 schedule_prefetches (struct mem_ref_group
*groups
, unsigned unroll_factor
,
1012 unsigned remaining_prefetch_slots
, n_prefetches
, prefetch_slots
;
1013 unsigned slots_per_prefetch
;
1014 struct mem_ref
*ref
;
1017 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
1018 remaining_prefetch_slots
= SIMULTANEOUS_PREFETCHES
;
1020 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1021 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
1022 it will need a prefetch slot. */
1023 slots_per_prefetch
= (ahead
+ unroll_factor
/ 2) / unroll_factor
;
1024 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1025 fprintf (dump_file
, "Each prefetch instruction takes %u prefetch slots.\n",
1026 slots_per_prefetch
);
1028 /* For now we just take memory references one by one and issue
1029 prefetches for as many as possible. The groups are sorted
1030 starting with the largest step, since the references with
1031 large step are more likely to cause many cache misses. */
1033 for (; groups
; groups
= groups
->next
)
1034 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1036 if (!should_issue_prefetch_p (ref
))
1039 /* The loop is far from being sufficiently unrolled for this
1040 prefetch. Do not generate prefetch to avoid many redudant
1042 if (ref
->prefetch_mod
/ unroll_factor
> PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
)
1045 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1046 and we unroll the loop UNROLL_FACTOR times, we need to insert
1047 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1049 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1050 / ref
->prefetch_mod
);
1051 prefetch_slots
= n_prefetches
* slots_per_prefetch
;
1053 /* If more than half of the prefetches would be lost anyway, do not
1054 issue the prefetch. */
1055 if (2 * remaining_prefetch_slots
< prefetch_slots
)
1058 ref
->issue_prefetch_p
= true;
1060 if (remaining_prefetch_slots
<= prefetch_slots
)
1062 remaining_prefetch_slots
-= prefetch_slots
;
1069 /* Return TRUE if no prefetch is going to be generated in the given
1073 nothing_to_prefetch_p (struct mem_ref_group
*groups
)
1075 struct mem_ref
*ref
;
1077 for (; groups
; groups
= groups
->next
)
1078 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1079 if (should_issue_prefetch_p (ref
))
1085 /* Estimate the number of prefetches in the given GROUPS.
1086 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
1089 estimate_prefetch_count (struct mem_ref_group
*groups
, unsigned unroll_factor
)
1091 struct mem_ref
*ref
;
1092 unsigned n_prefetches
;
1093 int prefetch_count
= 0;
1095 for (; groups
; groups
= groups
->next
)
1096 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1097 if (should_issue_prefetch_p (ref
))
1099 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1100 / ref
->prefetch_mod
);
1101 prefetch_count
+= n_prefetches
;
1104 return prefetch_count
;
1107 /* Issue prefetches for the reference REF into loop as decided before.
1108 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1109 is the factor by which LOOP was unrolled. */
1112 issue_prefetch_ref (struct mem_ref
*ref
, unsigned unroll_factor
, unsigned ahead
)
1114 HOST_WIDE_INT delta
;
1115 tree addr
, addr_base
, write_p
, local
, forward
;
1117 gimple_stmt_iterator bsi
;
1118 unsigned n_prefetches
, ap
;
1119 bool nontemporal
= ref
->reuse_distance
>= L2_CACHE_SIZE_BYTES
;
1121 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1122 fprintf (dump_file
, "Issued%s prefetch for %p.\n",
1123 nontemporal
? " nontemporal" : "",
1126 bsi
= gsi_for_stmt (ref
->stmt
);
1128 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1129 / ref
->prefetch_mod
);
1130 addr_base
= build_fold_addr_expr_with_type (ref
->mem
, ptr_type_node
);
1131 addr_base
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr_base
),
1132 true, NULL
, true, GSI_SAME_STMT
);
1133 write_p
= ref
->write_p
? integer_one_node
: integer_zero_node
;
1134 local
= nontemporal
? integer_zero_node
: integer_three_node
;
1136 for (ap
= 0; ap
< n_prefetches
; ap
++)
1138 if (cst_and_fits_in_hwi (ref
->group
->step
))
1140 /* Determine the address to prefetch. */
1141 delta
= (ahead
+ ap
* ref
->prefetch_mod
) *
1142 int_cst_value (ref
->group
->step
);
1143 addr
= fold_build_pointer_plus_hwi (addr_base
, delta
);
1144 addr
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr
), true, NULL
,
1145 true, GSI_SAME_STMT
);
1149 /* The step size is non-constant but loop-invariant. We use the
1150 heuristic to simply prefetch ahead iterations ahead. */
1151 forward
= fold_build2 (MULT_EXPR
, sizetype
,
1152 fold_convert (sizetype
, ref
->group
->step
),
1153 fold_convert (sizetype
, size_int (ahead
)));
1154 addr
= fold_build_pointer_plus (addr_base
, forward
);
1155 addr
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr
), true,
1156 NULL
, true, GSI_SAME_STMT
);
1158 /* Create the prefetch instruction. */
1159 prefetch
= gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH
),
1160 3, addr
, write_p
, local
);
1161 gsi_insert_before (&bsi
, prefetch
, GSI_SAME_STMT
);
1165 /* Issue prefetches for the references in GROUPS into loop as decided before.
1166 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1167 factor by that LOOP was unrolled. */
1170 issue_prefetches (struct mem_ref_group
*groups
,
1171 unsigned unroll_factor
, unsigned ahead
)
1173 struct mem_ref
*ref
;
1175 for (; groups
; groups
= groups
->next
)
1176 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1177 if (ref
->issue_prefetch_p
)
1178 issue_prefetch_ref (ref
, unroll_factor
, ahead
);
1181 /* Returns true if REF is a memory write for that a nontemporal store insn
1185 nontemporal_store_p (struct mem_ref
*ref
)
1187 enum machine_mode mode
;
1188 enum insn_code code
;
1190 /* REF must be a write that is not reused. We require it to be independent
1191 on all other memory references in the loop, as the nontemporal stores may
1192 be reordered with respect to other memory references. */
1194 || !ref
->independent_p
1195 || ref
->reuse_distance
< L2_CACHE_SIZE_BYTES
)
1198 /* Check that we have the storent instruction for the mode. */
1199 mode
= TYPE_MODE (TREE_TYPE (ref
->mem
));
1200 if (mode
== BLKmode
)
1203 code
= optab_handler (storent_optab
, mode
);
1204 return code
!= CODE_FOR_nothing
;
1207 /* If REF is a nontemporal store, we mark the corresponding modify statement
1208 and return true. Otherwise, we return false. */
1211 mark_nontemporal_store (struct mem_ref
*ref
)
1213 if (!nontemporal_store_p (ref
))
1216 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1217 fprintf (dump_file
, "Marked reference %p as a nontemporal store.\n",
1220 gimple_assign_set_nontemporal_move (ref
->stmt
, true);
1221 ref
->storent_p
= true;
1226 /* Issue a memory fence instruction after LOOP. */
1229 emit_mfence_after_loop (struct loop
*loop
)
1231 vec
<edge
> exits
= get_loop_exit_edges (loop
);
1234 gimple_stmt_iterator bsi
;
1237 FOR_EACH_VEC_ELT (exits
, i
, exit
)
1239 call
= gimple_build_call (FENCE_FOLLOWING_MOVNT
, 0);
1241 if (!single_pred_p (exit
->dest
)
1242 /* If possible, we prefer not to insert the fence on other paths
1244 && !(exit
->flags
& EDGE_ABNORMAL
))
1245 split_loop_exit_edge (exit
);
1246 bsi
= gsi_after_labels (exit
->dest
);
1248 gsi_insert_before (&bsi
, call
, GSI_NEW_STMT
);
1252 update_ssa (TODO_update_ssa_only_virtuals
);
1255 /* Returns true if we can use storent in loop, false otherwise. */
1258 may_use_storent_in_loop_p (struct loop
*loop
)
1262 if (loop
->inner
!= NULL
)
1265 /* If we must issue a mfence insn after using storent, check that there
1266 is a suitable place for it at each of the loop exits. */
1267 if (FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1269 vec
<edge
> exits
= get_loop_exit_edges (loop
);
1273 FOR_EACH_VEC_ELT (exits
, i
, exit
)
1274 if ((exit
->flags
& EDGE_ABNORMAL
)
1275 && exit
->dest
== EXIT_BLOCK_PTR
)
1284 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1285 references in the loop. */
1288 mark_nontemporal_stores (struct loop
*loop
, struct mem_ref_group
*groups
)
1290 struct mem_ref
*ref
;
1293 if (!may_use_storent_in_loop_p (loop
))
1296 for (; groups
; groups
= groups
->next
)
1297 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1298 any
|= mark_nontemporal_store (ref
);
1300 if (any
&& FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1301 emit_mfence_after_loop (loop
);
1304 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1305 this is the case, fill in DESC by the description of number of
1309 should_unroll_loop_p (struct loop
*loop
, struct tree_niter_desc
*desc
,
1312 if (!can_unroll_loop_p (loop
, factor
, desc
))
1315 /* We only consider loops without control flow for unrolling. This is not
1316 a hard restriction -- tree_unroll_loop works with arbitrary loops
1317 as well; but the unrolling/prefetching is usually more profitable for
1318 loops consisting of a single basic block, and we want to limit the
1320 if (loop
->num_nodes
> 2)
1326 /* Determine the coefficient by that unroll LOOP, from the information
1327 contained in the list of memory references REFS. Description of
1328 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1329 insns of the LOOP. EST_NITER is the estimated number of iterations of
1330 the loop, or -1 if no estimate is available. */
1333 determine_unroll_factor (struct loop
*loop
, struct mem_ref_group
*refs
,
1334 unsigned ninsns
, struct tree_niter_desc
*desc
,
1335 HOST_WIDE_INT est_niter
)
1337 unsigned upper_bound
;
1338 unsigned nfactor
, factor
, mod_constraint
;
1339 struct mem_ref_group
*agp
;
1340 struct mem_ref
*ref
;
1342 /* First check whether the loop is not too large to unroll. We ignore
1343 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1344 from unrolling them enough to make exactly one cache line covered by each
1345 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1346 us from unrolling the loops too many times in cases where we only expect
1347 gains from better scheduling and decreasing loop overhead, which is not
1349 upper_bound
= PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS
) / ninsns
;
1351 /* If we unrolled the loop more times than it iterates, the unrolled version
1352 of the loop would be never entered. */
1353 if (est_niter
>= 0 && est_niter
< (HOST_WIDE_INT
) upper_bound
)
1354 upper_bound
= est_niter
;
1356 if (upper_bound
<= 1)
1359 /* Choose the factor so that we may prefetch each cache just once,
1360 but bound the unrolling by UPPER_BOUND. */
1362 for (agp
= refs
; agp
; agp
= agp
->next
)
1363 for (ref
= agp
->refs
; ref
; ref
= ref
->next
)
1364 if (should_issue_prefetch_p (ref
))
1366 mod_constraint
= ref
->prefetch_mod
;
1367 nfactor
= least_common_multiple (mod_constraint
, factor
);
1368 if (nfactor
<= upper_bound
)
1372 if (!should_unroll_loop_p (loop
, desc
, factor
))
1378 /* Returns the total volume of the memory references REFS, taking into account
1379 reuses in the innermost loop and cache line size. TODO -- we should also
1380 take into account reuses across the iterations of the loops in the loop
1384 volume_of_references (struct mem_ref_group
*refs
)
1386 unsigned volume
= 0;
1387 struct mem_ref_group
*gr
;
1388 struct mem_ref
*ref
;
1390 for (gr
= refs
; gr
; gr
= gr
->next
)
1391 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1393 /* Almost always reuses another value? */
1394 if (ref
->prefetch_before
!= PREFETCH_ALL
)
1397 /* If several iterations access the same cache line, use the size of
1398 the line divided by this number. Otherwise, a cache line is
1399 accessed in each iteration. TODO -- in the latter case, we should
1400 take the size of the reference into account, rounding it up on cache
1401 line size multiple. */
1402 volume
+= L1_CACHE_LINE_SIZE
/ ref
->prefetch_mod
;
1407 /* Returns the volume of memory references accessed across VEC iterations of
1408 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1409 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1412 volume_of_dist_vector (lambda_vector vec
, unsigned *loop_sizes
, unsigned n
)
1416 for (i
= 0; i
< n
; i
++)
1423 gcc_assert (vec
[i
] > 0);
1425 /* We ignore the parts of the distance vector in subloops, since usually
1426 the numbers of iterations are much smaller. */
1427 return loop_sizes
[i
] * vec
[i
];
1430 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1431 at the position corresponding to the loop of the step. N is the depth
1432 of the considered loop nest, and, LOOP is its innermost loop. */
1435 add_subscript_strides (tree access_fn
, unsigned stride
,
1436 HOST_WIDE_INT
*strides
, unsigned n
, struct loop
*loop
)
1440 HOST_WIDE_INT astep
;
1441 unsigned min_depth
= loop_depth (loop
) - n
;
1443 while (TREE_CODE (access_fn
) == POLYNOMIAL_CHREC
)
1445 aloop
= get_chrec_loop (access_fn
);
1446 step
= CHREC_RIGHT (access_fn
);
1447 access_fn
= CHREC_LEFT (access_fn
);
1449 if ((unsigned) loop_depth (aloop
) <= min_depth
)
1452 if (host_integerp (step
, 0))
1453 astep
= tree_low_cst (step
, 0);
1455 astep
= L1_CACHE_LINE_SIZE
;
1457 strides
[n
- 1 - loop_depth (loop
) + loop_depth (aloop
)] += astep
* stride
;
1462 /* Returns the volume of memory references accessed between two consecutive
1463 self-reuses of the reference DR. We consider the subscripts of DR in N
1464 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1465 loops. LOOP is the innermost loop of the current loop nest. */
1468 self_reuse_distance (data_reference_p dr
, unsigned *loop_sizes
, unsigned n
,
1471 tree stride
, access_fn
;
1472 HOST_WIDE_INT
*strides
, astride
;
1473 vec
<tree
> access_fns
;
1474 tree ref
= DR_REF (dr
);
1475 unsigned i
, ret
= ~0u;
1477 /* In the following example:
1479 for (i = 0; i < N; i++)
1480 for (j = 0; j < N; j++)
1482 the same cache line is accessed each N steps (except if the change from
1483 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1484 we cannot rely purely on the results of the data dependence analysis.
1486 Instead, we compute the stride of the reference in each loop, and consider
1487 the innermost loop in that the stride is less than cache size. */
1489 strides
= XCNEWVEC (HOST_WIDE_INT
, n
);
1490 access_fns
= DR_ACCESS_FNS (dr
);
1492 FOR_EACH_VEC_ELT (access_fns
, i
, access_fn
)
1494 /* Keep track of the reference corresponding to the subscript, so that we
1496 while (handled_component_p (ref
) && TREE_CODE (ref
) != ARRAY_REF
)
1497 ref
= TREE_OPERAND (ref
, 0);
1499 if (TREE_CODE (ref
) == ARRAY_REF
)
1501 stride
= TYPE_SIZE_UNIT (TREE_TYPE (ref
));
1502 if (host_integerp (stride
, 1))
1503 astride
= tree_low_cst (stride
, 1);
1505 astride
= L1_CACHE_LINE_SIZE
;
1507 ref
= TREE_OPERAND (ref
, 0);
1512 add_subscript_strides (access_fn
, astride
, strides
, n
, loop
);
1515 for (i
= n
; i
-- > 0; )
1517 unsigned HOST_WIDE_INT s
;
1519 s
= strides
[i
] < 0 ? -strides
[i
] : strides
[i
];
1521 if (s
< (unsigned) L1_CACHE_LINE_SIZE
1523 > (unsigned) (L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)))
1525 ret
= loop_sizes
[i
];
1534 /* Determines the distance till the first reuse of each reference in REFS
1535 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1536 memory references in the loop. Return false if the analysis fails. */
1539 determine_loop_nest_reuse (struct loop
*loop
, struct mem_ref_group
*refs
,
1542 struct loop
*nest
, *aloop
;
1543 vec
<data_reference_p
> datarefs
= vNULL
;
1544 vec
<ddr_p
> dependences
= vNULL
;
1545 struct mem_ref_group
*gr
;
1546 struct mem_ref
*ref
, *refb
;
1547 vec
<loop_p
> vloops
= vNULL
;
1548 unsigned *loop_data_size
;
1550 unsigned volume
, dist
, adist
;
1552 data_reference_p dr
;
1558 /* Find the outermost loop of the loop nest of loop (we require that
1559 there are no sibling loops inside the nest). */
1563 aloop
= loop_outer (nest
);
1565 if (aloop
== current_loops
->tree_root
1566 || aloop
->inner
->next
)
1572 /* For each loop, determine the amount of data accessed in each iteration.
1573 We use this to estimate whether the reference is evicted from the
1574 cache before its reuse. */
1575 find_loop_nest (nest
, &vloops
);
1576 n
= vloops
.length ();
1577 loop_data_size
= XNEWVEC (unsigned, n
);
1578 volume
= volume_of_references (refs
);
1582 loop_data_size
[i
] = volume
;
1583 /* Bound the volume by the L2 cache size, since above this bound,
1584 all dependence distances are equivalent. */
1585 if (volume
> L2_CACHE_SIZE_BYTES
)
1589 vol
= estimated_stmt_executions_int (aloop
);
1591 vol
= expected_loop_iterations (aloop
);
1595 /* Prepare the references in the form suitable for data dependence
1596 analysis. We ignore unanalyzable data references (the results
1597 are used just as a heuristics to estimate temporality of the
1598 references, hence we do not need to worry about correctness). */
1599 for (gr
= refs
; gr
; gr
= gr
->next
)
1600 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1602 dr
= create_data_ref (nest
, loop_containing_stmt (ref
->stmt
),
1603 ref
->mem
, ref
->stmt
, !ref
->write_p
);
1607 ref
->reuse_distance
= volume
;
1609 datarefs
.safe_push (dr
);
1612 no_other_refs
= false;
1615 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
1617 dist
= self_reuse_distance (dr
, loop_data_size
, n
, loop
);
1618 ref
= (struct mem_ref
*) dr
->aux
;
1619 if (ref
->reuse_distance
> dist
)
1620 ref
->reuse_distance
= dist
;
1623 ref
->independent_p
= true;
1626 if (!compute_all_dependences (datarefs
, &dependences
, vloops
, true))
1629 FOR_EACH_VEC_ELT (dependences
, i
, dep
)
1631 if (DDR_ARE_DEPENDENT (dep
) == chrec_known
)
1634 ref
= (struct mem_ref
*) DDR_A (dep
)->aux
;
1635 refb
= (struct mem_ref
*) DDR_B (dep
)->aux
;
1637 if (DDR_ARE_DEPENDENT (dep
) == chrec_dont_know
1638 || DDR_NUM_DIST_VECTS (dep
) == 0)
1640 /* If the dependence cannot be analyzed, assume that there might be
1644 ref
->independent_p
= false;
1645 refb
->independent_p
= false;
1649 /* The distance vectors are normalized to be always lexicographically
1650 positive, hence we cannot tell just from them whether DDR_A comes
1651 before DDR_B or vice versa. However, it is not important,
1652 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1653 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1654 in cache (and marking it as nontemporal would not affect
1658 for (j
= 0; j
< DDR_NUM_DIST_VECTS (dep
); j
++)
1660 adist
= volume_of_dist_vector (DDR_DIST_VECT (dep
, j
),
1663 /* If this is a dependence in the innermost loop (i.e., the
1664 distances in all superloops are zero) and it is not
1665 the trivial self-dependence with distance zero, record that
1666 the references are not completely independent. */
1667 if (lambda_vector_zerop (DDR_DIST_VECT (dep
, j
), n
- 1)
1669 || DDR_DIST_VECT (dep
, j
)[n
-1] != 0))
1671 ref
->independent_p
= false;
1672 refb
->independent_p
= false;
1675 /* Ignore accesses closer than
1676 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1677 so that we use nontemporal prefetches e.g. if single memory
1678 location is accessed several times in a single iteration of
1680 if (adist
< L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)
1688 if (ref
->reuse_distance
> dist
)
1689 ref
->reuse_distance
= dist
;
1690 if (refb
->reuse_distance
> dist
)
1691 refb
->reuse_distance
= dist
;
1694 free_dependence_relations (dependences
);
1695 free_data_refs (datarefs
);
1696 free (loop_data_size
);
1698 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1700 fprintf (dump_file
, "Reuse distances:\n");
1701 for (gr
= refs
; gr
; gr
= gr
->next
)
1702 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1703 fprintf (dump_file
, " ref %p distance %u\n",
1704 (void *) ref
, ref
->reuse_distance
);
1710 /* Determine whether or not the trip count to ahead ratio is too small based
1711 on prefitablility consideration.
1712 AHEAD: the iteration ahead distance,
1713 EST_NITER: the estimated trip count. */
1716 trip_count_to_ahead_ratio_too_small_p (unsigned ahead
, HOST_WIDE_INT est_niter
)
1718 /* Assume trip count to ahead ratio is big enough if the trip count could not
1719 be estimated at compile time. */
1723 if (est_niter
< (HOST_WIDE_INT
) (TRIP_COUNT_TO_AHEAD_RATIO
* ahead
))
1725 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1727 "Not prefetching -- loop estimated to roll only %d times\n",
1735 /* Determine whether or not the number of memory references in the loop is
1736 reasonable based on the profitablity and compilation time considerations.
1737 NINSNS: estimated number of instructions in the loop,
1738 MEM_REF_COUNT: total number of memory references in the loop. */
1741 mem_ref_count_reasonable_p (unsigned ninsns
, unsigned mem_ref_count
)
1743 int insn_to_mem_ratio
;
1745 if (mem_ref_count
== 0)
1748 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1749 (compute_all_dependences) have high costs based on quadratic complexity.
1750 To avoid huge compilation time, we give up prefetching if mem_ref_count
1752 if (mem_ref_count
> PREFETCH_MAX_MEM_REFS_PER_LOOP
)
1755 /* Prefetching improves performance by overlapping cache missing
1756 memory accesses with CPU operations. If the loop does not have
1757 enough CPU operations to overlap with memory operations, prefetching
1758 won't give a significant benefit. One approximate way of checking
1759 this is to require the ratio of instructions to memory references to
1760 be above a certain limit. This approximation works well in practice.
1761 TODO: Implement a more precise computation by estimating the time
1762 for each CPU or memory op in the loop. Time estimates for memory ops
1763 should account for cache misses. */
1764 insn_to_mem_ratio
= ninsns
/ mem_ref_count
;
1766 if (insn_to_mem_ratio
< PREFETCH_MIN_INSN_TO_MEM_RATIO
)
1768 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1770 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1778 /* Determine whether or not the instruction to prefetch ratio in the loop is
1779 too small based on the profitablity consideration.
1780 NINSNS: estimated number of instructions in the loop,
1781 PREFETCH_COUNT: an estimate of the number of prefetches,
1782 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1785 insn_to_prefetch_ratio_too_small_p (unsigned ninsns
, unsigned prefetch_count
,
1786 unsigned unroll_factor
)
1788 int insn_to_prefetch_ratio
;
1790 /* Prefetching most likely causes performance degradation when the instruction
1791 to prefetch ratio is too small. Too many prefetch instructions in a loop
1792 may reduce the I-cache performance.
1793 (unroll_factor * ninsns) is used to estimate the number of instructions in
1794 the unrolled loop. This implementation is a bit simplistic -- the number
1795 of issued prefetch instructions is also affected by unrolling. So,
1796 prefetch_mod and the unroll factor should be taken into account when
1797 determining prefetch_count. Also, the number of insns of the unrolled
1798 loop will usually be significantly smaller than the number of insns of the
1799 original loop * unroll_factor (at least the induction variable increases
1800 and the exit branches will get eliminated), so it might be better to use
1801 tree_estimate_loop_size + estimated_unrolled_size. */
1802 insn_to_prefetch_ratio
= (unroll_factor
* ninsns
) / prefetch_count
;
1803 if (insn_to_prefetch_ratio
< MIN_INSN_TO_PREFETCH_RATIO
)
1805 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1807 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1808 insn_to_prefetch_ratio
);
1816 /* Issue prefetch instructions for array references in LOOP. Returns
1817 true if the LOOP was unrolled. */
1820 loop_prefetch_arrays (struct loop
*loop
)
1822 struct mem_ref_group
*refs
;
1823 unsigned ahead
, ninsns
, time
, unroll_factor
;
1824 HOST_WIDE_INT est_niter
;
1825 struct tree_niter_desc desc
;
1826 bool unrolled
= false, no_other_refs
;
1827 unsigned prefetch_count
;
1828 unsigned mem_ref_count
;
1830 if (optimize_loop_nest_for_size_p (loop
))
1832 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1833 fprintf (dump_file
, " ignored (cold area)\n");
1837 /* FIXME: the time should be weighted by the probabilities of the blocks in
1839 time
= tree_num_loop_insns (loop
, &eni_time_weights
);
1843 ahead
= (PREFETCH_LATENCY
+ time
- 1) / time
;
1844 est_niter
= estimated_stmt_executions_int (loop
);
1845 if (est_niter
== -1)
1846 est_niter
= max_stmt_executions_int (loop
);
1848 /* Prefetching is not likely to be profitable if the trip count to ahead
1849 ratio is too small. */
1850 if (trip_count_to_ahead_ratio_too_small_p (ahead
, est_niter
))
1853 ninsns
= tree_num_loop_insns (loop
, &eni_size_weights
);
1855 /* Step 1: gather the memory references. */
1856 refs
= gather_memory_references (loop
, &no_other_refs
, &mem_ref_count
);
1858 /* Give up prefetching if the number of memory references in the
1859 loop is not reasonable based on profitablity and compilation time
1861 if (!mem_ref_count_reasonable_p (ninsns
, mem_ref_count
))
1864 /* Step 2: estimate the reuse effects. */
1865 prune_by_reuse (refs
);
1867 if (nothing_to_prefetch_p (refs
))
1870 if (!determine_loop_nest_reuse (loop
, refs
, no_other_refs
))
1873 /* Step 3: determine unroll factor. */
1874 unroll_factor
= determine_unroll_factor (loop
, refs
, ninsns
, &desc
,
1877 /* Estimate prefetch count for the unrolled loop. */
1878 prefetch_count
= estimate_prefetch_count (refs
, unroll_factor
);
1879 if (prefetch_count
== 0)
1882 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1883 fprintf (dump_file
, "Ahead %d, unroll factor %d, trip count "
1884 HOST_WIDE_INT_PRINT_DEC
"\n"
1885 "insn count %d, mem ref count %d, prefetch count %d\n",
1886 ahead
, unroll_factor
, est_niter
,
1887 ninsns
, mem_ref_count
, prefetch_count
);
1889 /* Prefetching is not likely to be profitable if the instruction to prefetch
1890 ratio is too small. */
1891 if (insn_to_prefetch_ratio_too_small_p (ninsns
, prefetch_count
,
1895 mark_nontemporal_stores (loop
, refs
);
1897 /* Step 4: what to prefetch? */
1898 if (!schedule_prefetches (refs
, unroll_factor
, ahead
))
1901 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1902 iterations so that we do not issue superfluous prefetches. */
1903 if (unroll_factor
!= 1)
1905 tree_unroll_loop (loop
, unroll_factor
,
1906 single_dom_exit (loop
), &desc
);
1910 /* Step 6: issue the prefetches. */
1911 issue_prefetches (refs
, unroll_factor
, ahead
);
1914 release_mem_refs (refs
);
1918 /* Issue prefetch instructions for array references in loops. */
1921 tree_ssa_prefetch_arrays (void)
1925 bool unrolled
= false;
1929 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1930 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1931 of processor costs and i486 does not have prefetch, but
1932 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1933 || PREFETCH_BLOCK
== 0)
1936 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1938 fprintf (dump_file
, "Prefetching parameters:\n");
1939 fprintf (dump_file
, " simultaneous prefetches: %d\n",
1940 SIMULTANEOUS_PREFETCHES
);
1941 fprintf (dump_file
, " prefetch latency: %d\n", PREFETCH_LATENCY
);
1942 fprintf (dump_file
, " prefetch block size: %d\n", PREFETCH_BLOCK
);
1943 fprintf (dump_file
, " L1 cache size: %d lines, %d kB\n",
1944 L1_CACHE_SIZE_BYTES
/ L1_CACHE_LINE_SIZE
, L1_CACHE_SIZE
);
1945 fprintf (dump_file
, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE
);
1946 fprintf (dump_file
, " L2 cache size: %d kB\n", L2_CACHE_SIZE
);
1947 fprintf (dump_file
, " min insn-to-prefetch ratio: %d \n",
1948 MIN_INSN_TO_PREFETCH_RATIO
);
1949 fprintf (dump_file
, " min insn-to-mem ratio: %d \n",
1950 PREFETCH_MIN_INSN_TO_MEM_RATIO
);
1951 fprintf (dump_file
, "\n");
1954 initialize_original_copy_tables ();
1956 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH
))
1958 tree type
= build_function_type_list (void_type_node
,
1959 const_ptr_type_node
, NULL_TREE
);
1960 tree decl
= add_builtin_function ("__builtin_prefetch", type
,
1961 BUILT_IN_PREFETCH
, BUILT_IN_NORMAL
,
1963 DECL_IS_NOVOPS (decl
) = true;
1964 set_builtin_decl (BUILT_IN_PREFETCH
, decl
, false);
1967 /* We assume that size of cache line is a power of two, so verify this
1969 gcc_assert ((PREFETCH_BLOCK
& (PREFETCH_BLOCK
- 1)) == 0);
1971 FOR_EACH_LOOP (li
, loop
, LI_FROM_INNERMOST
)
1973 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1974 fprintf (dump_file
, "Processing loop %d:\n", loop
->num
);
1976 unrolled
|= loop_prefetch_arrays (loop
);
1978 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1979 fprintf (dump_file
, "\n\n");
1985 todo_flags
|= TODO_cleanup_cfg
;
1988 free_original_copy_tables ();