2 Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
26 #include "basic-block.h"
28 #include "diagnostic.h"
29 #include "tree-pretty-print.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
35 #include "tree-pass.h"
36 #include "insn-config.h"
39 #include "tree-chrec.h"
40 #include "tree-scalar-evolution.h"
43 #include "langhooks.h"
44 #include "tree-inline.h"
45 #include "tree-data-ref.h"
48 /* This pass inserts prefetch instructions to optimize cache usage during
49 accesses to arrays in loops. It processes loops sequentially and:
51 1) Gathers all memory references in the single loop.
52 2) For each of the references it decides when it is profitable to prefetch
53 it. To do it, we evaluate the reuse among the accesses, and determines
54 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
55 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
56 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
57 iterations of the loop that are zero modulo PREFETCH_MOD). For example
58 (assuming cache line size is 64 bytes, char has size 1 byte and there
59 is no hardware sequential prefetch):
62 for (i = 0; i < max; i++)
69 a[187*i + 50] = ...; (5)
72 (0) obviously has PREFETCH_BEFORE 1
73 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
74 location 64 iterations before it, and PREFETCH_MOD 64 (since
75 it hits the same cache line otherwise).
76 (2) has PREFETCH_MOD 64
77 (3) has PREFETCH_MOD 4
78 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
79 the cache line accessed by (4) is the same with probability only
81 (5) has PREFETCH_MOD 1 as well.
83 Additionally, we use data dependence analysis to determine for each
84 reference the distance till the first reuse; this information is used
85 to determine the temporality of the issued prefetch instruction.
87 3) We determine how much ahead we need to prefetch. The number of
88 iterations needed is time to fetch / time spent in one iteration of
89 the loop. The problem is that we do not know either of these values,
90 so we just make a heuristic guess based on a magic (possibly)
91 target-specific constant and size of the loop.
93 4) Determine which of the references we prefetch. We take into account
94 that there is a maximum number of simultaneous prefetches (provided
95 by machine description). We prefetch as many prefetches as possible
96 while still within this bound (starting with those with lowest
97 prefetch_mod, since they are responsible for most of the cache
100 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
101 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
102 prefetching nonaccessed memory.
103 TODO -- actually implement peeling.
105 6) We actually emit the prefetch instructions. ??? Perhaps emit the
106 prefetch instructions with guards in cases where 5) was not sufficient
107 to satisfy the constraints?
109 The function is_loop_prefetching_profitable() implements a cost model
110 to determine if prefetching is profitable for a given loop. The cost
111 model has two heuristcs:
112 1. A heuristic that determines whether the given loop has enough CPU
113 ops that can be overlapped with cache missing memory ops.
114 If not, the loop won't benefit from prefetching. This is implemented
115 by requirung the ratio between the instruction count and the mem ref
116 count to be above a certain minimum.
117 2. A heuristic that disables prefetching in a loop with an unknown trip
118 count if the prefetching cost is above a certain limit. The relative
119 prefetching cost is estimated by taking the ratio between the
120 prefetch count and the total intruction count (this models the I-cache
122 The limits used in these heuristics are defined as parameters with
123 reasonable default values. Machine-specific default values will be
127 -- write and use more general reuse analysis (that could be also used
128 in other cache aimed loop optimizations)
129 -- make it behave sanely together with the prefetches given by user
130 (now we just ignore them; at the very least we should avoid
131 optimizing loops in that user put his own prefetches)
132 -- we assume cache line size alignment of arrays; this could be
135 /* Magic constants follow. These should be replaced by machine specific
138 /* True if write can be prefetched by a read prefetch. */
140 #ifndef WRITE_CAN_USE_READ_PREFETCH
141 #define WRITE_CAN_USE_READ_PREFETCH 1
144 /* True if read can be prefetched by a write prefetch. */
146 #ifndef READ_CAN_USE_WRITE_PREFETCH
147 #define READ_CAN_USE_WRITE_PREFETCH 0
150 /* The size of the block loaded by a single prefetch. Usually, this is
151 the same as cache line size (at the moment, we only consider one level
152 of cache hierarchy). */
154 #ifndef PREFETCH_BLOCK
155 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
158 /* Do we have a forward hardware sequential prefetching? */
160 #ifndef HAVE_FORWARD_PREFETCH
161 #define HAVE_FORWARD_PREFETCH 0
164 /* Do we have a backward hardware sequential prefetching? */
166 #ifndef HAVE_BACKWARD_PREFETCH
167 #define HAVE_BACKWARD_PREFETCH 0
170 /* In some cases we are only able to determine that there is a certain
171 probability that the two accesses hit the same cache line. In this
172 case, we issue the prefetches for both of them if this probability
173 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
175 #ifndef ACCEPTABLE_MISS_RATE
176 #define ACCEPTABLE_MISS_RATE 50
179 #ifndef HAVE_prefetch
180 #define HAVE_prefetch 0
183 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
184 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
186 /* We consider a memory access nontemporal if it is not reused sooner than
187 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
188 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
189 so that we use nontemporal prefetches e.g. if single memory location
190 is accessed several times in a single iteration of the loop. */
191 #define NONTEMPORAL_FRACTION 16
193 /* In case we have to emit a memory fence instruction after the loop that
194 uses nontemporal stores, this defines the builtin to use. */
196 #ifndef FENCE_FOLLOWING_MOVNT
197 #define FENCE_FOLLOWING_MOVNT NULL_TREE
200 /* It is not profitable to prefetch when the trip count is not at
201 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
202 For example, in a loop with a prefetch ahead distance of 10,
203 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
204 profitable to prefetch when the trip count is greater or equal to
205 40. In that case, 30 out of the 40 iterations will benefit from
208 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
209 #define TRIP_COUNT_TO_AHEAD_RATIO 4
212 /* The group of references between that reuse may occur. */
216 tree base
; /* Base of the reference. */
217 tree step
; /* Step of the reference. */
218 struct mem_ref
*refs
; /* References in the group. */
219 struct mem_ref_group
*next
; /* Next group of references. */
222 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
224 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
226 /* Do not generate a prefetch if the unroll factor is significantly less
227 than what is required by the prefetch. This is to avoid redundant
228 prefetches. For example, if prefetch_mod is 16 and unroll_factor is
229 1, this means prefetching requires unrolling the loop 16 times, but
230 the loop is not going to be unrolled. In this case (ratio = 16),
231 prefetching is not likely to be beneficial. */
233 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
234 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 8
237 /* The memory reference. */
241 gimple stmt
; /* Statement in that the reference appears. */
242 tree mem
; /* The reference. */
243 HOST_WIDE_INT delta
; /* Constant offset of the reference. */
244 struct mem_ref_group
*group
; /* The group of references it belongs to. */
245 unsigned HOST_WIDE_INT prefetch_mod
;
246 /* Prefetch only each PREFETCH_MOD-th
248 unsigned HOST_WIDE_INT prefetch_before
;
249 /* Prefetch only first PREFETCH_BEFORE
251 unsigned reuse_distance
; /* The amount of data accessed before the first
252 reuse of this value. */
253 struct mem_ref
*next
; /* The next reference in the group. */
254 unsigned write_p
: 1; /* Is it a write? */
255 unsigned independent_p
: 1; /* True if the reference is independent on
256 all other references inside the loop. */
257 unsigned issue_prefetch_p
: 1; /* Should we really issue the prefetch? */
258 unsigned storent_p
: 1; /* True if we changed the store to a
262 /* Dumps information about reference REF to FILE. */
265 dump_mem_ref (FILE *file
, struct mem_ref
*ref
)
267 fprintf (file
, "Reference %p:\n", (void *) ref
);
269 fprintf (file
, " group %p (base ", (void *) ref
->group
);
270 print_generic_expr (file
, ref
->group
->base
, TDF_SLIM
);
271 fprintf (file
, ", step ");
272 if (cst_and_fits_in_hwi (ref
->group
->step
))
273 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, int_cst_value (ref
->group
->step
));
275 print_generic_expr (file
, ref
->group
->step
, TDF_TREE
);
276 fprintf (file
, ")\n");
278 fprintf (file
, " delta ");
279 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ref
->delta
);
280 fprintf (file
, "\n");
282 fprintf (file
, " %s\n", ref
->write_p
? "write" : "read");
284 fprintf (file
, "\n");
287 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
290 static struct mem_ref_group
*
291 find_or_create_group (struct mem_ref_group
**groups
, tree base
, tree step
)
293 struct mem_ref_group
*group
;
295 for (; *groups
; groups
= &(*groups
)->next
)
297 if (operand_equal_p ((*groups
)->step
, step
, 0)
298 && operand_equal_p ((*groups
)->base
, base
, 0))
301 /* If step is an integer constant, keep the list of groups sorted
302 by decreasing step. */
303 if (cst_and_fits_in_hwi ((*groups
)->step
) && cst_and_fits_in_hwi (step
)
304 && int_cst_value ((*groups
)->step
) < int_cst_value (step
))
308 group
= XNEW (struct mem_ref_group
);
312 group
->next
= *groups
;
318 /* Records a memory reference MEM in GROUP with offset DELTA and write status
319 WRITE_P. The reference occurs in statement STMT. */
322 record_ref (struct mem_ref_group
*group
, gimple stmt
, tree mem
,
323 HOST_WIDE_INT delta
, bool write_p
)
325 struct mem_ref
**aref
;
327 /* Do not record the same address twice. */
328 for (aref
= &group
->refs
; *aref
; aref
= &(*aref
)->next
)
330 /* It does not have to be possible for write reference to reuse the read
331 prefetch, or vice versa. */
332 if (!WRITE_CAN_USE_READ_PREFETCH
334 && !(*aref
)->write_p
)
336 if (!READ_CAN_USE_WRITE_PREFETCH
341 if ((*aref
)->delta
== delta
)
345 (*aref
) = XNEW (struct mem_ref
);
346 (*aref
)->stmt
= stmt
;
348 (*aref
)->delta
= delta
;
349 (*aref
)->write_p
= write_p
;
350 (*aref
)->prefetch_before
= PREFETCH_ALL
;
351 (*aref
)->prefetch_mod
= 1;
352 (*aref
)->reuse_distance
= 0;
353 (*aref
)->issue_prefetch_p
= false;
354 (*aref
)->group
= group
;
355 (*aref
)->next
= NULL
;
356 (*aref
)->independent_p
= false;
357 (*aref
)->storent_p
= false;
359 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
360 dump_mem_ref (dump_file
, *aref
);
363 /* Release memory references in GROUPS. */
366 release_mem_refs (struct mem_ref_group
*groups
)
368 struct mem_ref_group
*next_g
;
369 struct mem_ref
*ref
, *next_r
;
371 for (; groups
; groups
= next_g
)
373 next_g
= groups
->next
;
374 for (ref
= groups
->refs
; ref
; ref
= next_r
)
383 /* A structure used to pass arguments to idx_analyze_ref. */
387 struct loop
*loop
; /* Loop of the reference. */
388 gimple stmt
; /* Statement of the reference. */
389 tree
*step
; /* Step of the memory reference. */
390 HOST_WIDE_INT
*delta
; /* Offset of the memory reference. */
393 /* Analyzes a single INDEX of a memory reference to obtain information
394 described at analyze_ref. Callback for for_each_index. */
397 idx_analyze_ref (tree base
, tree
*index
, void *data
)
399 struct ar_data
*ar_data
= (struct ar_data
*) data
;
400 tree ibase
, step
, stepsize
;
401 HOST_WIDE_INT idelta
= 0, imult
= 1;
404 if (TREE_CODE (base
) == MISALIGNED_INDIRECT_REF
405 || TREE_CODE (base
) == ALIGN_INDIRECT_REF
)
408 if (!simple_iv (ar_data
->loop
, loop_containing_stmt (ar_data
->stmt
),
414 if (TREE_CODE (ibase
) == POINTER_PLUS_EXPR
415 && cst_and_fits_in_hwi (TREE_OPERAND (ibase
, 1)))
417 idelta
= int_cst_value (TREE_OPERAND (ibase
, 1));
418 ibase
= TREE_OPERAND (ibase
, 0);
420 if (cst_and_fits_in_hwi (ibase
))
422 idelta
+= int_cst_value (ibase
);
423 ibase
= build_int_cst (TREE_TYPE (ibase
), 0);
426 if (TREE_CODE (base
) == ARRAY_REF
)
428 stepsize
= array_ref_element_size (base
);
429 if (!cst_and_fits_in_hwi (stepsize
))
431 imult
= int_cst_value (stepsize
);
432 step
= fold_build2 (MULT_EXPR
, sizetype
,
433 fold_convert (sizetype
, step
),
434 fold_convert (sizetype
, stepsize
));
438 if (*ar_data
->step
== NULL_TREE
)
439 *ar_data
->step
= step
;
441 *ar_data
->step
= fold_build2 (PLUS_EXPR
, sizetype
,
442 fold_convert (sizetype
, *ar_data
->step
),
443 fold_convert (sizetype
, step
));
444 *ar_data
->delta
+= idelta
;
450 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
451 STEP are integer constants and iter is number of iterations of LOOP. The
452 reference occurs in statement STMT. Strips nonaddressable component
453 references from REF_P. */
456 analyze_ref (struct loop
*loop
, tree
*ref_p
, tree
*base
,
457 tree
*step
, HOST_WIDE_INT
*delta
,
460 struct ar_data ar_data
;
462 HOST_WIDE_INT bit_offset
;
468 /* First strip off the component references. Ignore bitfields. */
469 if (TREE_CODE (ref
) == COMPONENT_REF
470 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref
, 1)))
471 ref
= TREE_OPERAND (ref
, 0);
475 for (; TREE_CODE (ref
) == COMPONENT_REF
; ref
= TREE_OPERAND (ref
, 0))
477 off
= DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref
, 1));
478 bit_offset
= TREE_INT_CST_LOW (off
);
479 gcc_assert (bit_offset
% BITS_PER_UNIT
== 0);
481 *delta
+= bit_offset
/ BITS_PER_UNIT
;
484 *base
= unshare_expr (ref
);
488 ar_data
.delta
= delta
;
489 return for_each_index (base
, idx_analyze_ref
, &ar_data
);
492 /* Record a memory reference REF to the list REFS. The reference occurs in
493 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
494 reference was recorded, false otherwise. */
497 gather_memory_references_ref (struct loop
*loop
, struct mem_ref_group
**refs
,
498 tree ref
, bool write_p
, gimple stmt
)
502 struct mem_ref_group
*agrp
;
504 if (get_base_address (ref
) == NULL
)
507 if (!analyze_ref (loop
, &ref
, &base
, &step
, &delta
, stmt
))
509 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
510 if (step
== NULL_TREE
)
513 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
514 are integer constants. */
515 agrp
= find_or_create_group (refs
, base
, step
);
516 record_ref (agrp
, stmt
, ref
, delta
, write_p
);
521 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
522 true if there are no other memory references inside the loop. */
524 static struct mem_ref_group
*
525 gather_memory_references (struct loop
*loop
, bool *no_other_refs
, unsigned *ref_count
)
527 basic_block
*body
= get_loop_body_in_dom_order (loop
);
530 gimple_stmt_iterator bsi
;
533 struct mem_ref_group
*refs
= NULL
;
535 *no_other_refs
= true;
538 /* Scan the loop body in order, so that the former references precede the
540 for (i
= 0; i
< loop
->num_nodes
; i
++)
543 if (bb
->loop_father
!= loop
)
546 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
548 stmt
= gsi_stmt (bsi
);
550 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
552 if (gimple_vuse (stmt
)
553 || (is_gimple_call (stmt
)
554 && !(gimple_call_flags (stmt
) & ECF_CONST
)))
555 *no_other_refs
= false;
559 lhs
= gimple_assign_lhs (stmt
);
560 rhs
= gimple_assign_rhs1 (stmt
);
562 if (REFERENCE_CLASS_P (rhs
))
564 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
568 if (REFERENCE_CLASS_P (lhs
))
570 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
581 /* Prune the prefetch candidate REF using the self-reuse. */
584 prune_ref_by_self_reuse (struct mem_ref
*ref
)
589 /* If the step size is non constant, we cannot calculate prefetch_mod. */
590 if (!cst_and_fits_in_hwi (ref
->group
->step
))
593 step
= int_cst_value (ref
->group
->step
);
599 /* Prefetch references to invariant address just once. */
600 ref
->prefetch_before
= 1;
607 if (step
> PREFETCH_BLOCK
)
610 if ((backward
&& HAVE_BACKWARD_PREFETCH
)
611 || (!backward
&& HAVE_FORWARD_PREFETCH
))
613 ref
->prefetch_before
= 1;
617 ref
->prefetch_mod
= PREFETCH_BLOCK
/ step
;
620 /* Divides X by BY, rounding down. */
623 ddown (HOST_WIDE_INT x
, unsigned HOST_WIDE_INT by
)
630 return (x
+ by
- 1) / by
;
633 /* Given a CACHE_LINE_SIZE and two inductive memory references
634 with a common STEP greater than CACHE_LINE_SIZE and an address
635 difference DELTA, compute the probability that they will fall
636 in different cache lines. DISTINCT_ITERS is the number of
637 distinct iterations after which the pattern repeats itself.
638 ALIGN_UNIT is the unit of alignment in bytes. */
641 compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size
,
642 HOST_WIDE_INT step
, HOST_WIDE_INT delta
,
643 unsigned HOST_WIDE_INT distinct_iters
,
646 unsigned align
, iter
;
647 int total_positions
, miss_positions
, miss_rate
;
648 int address1
, address2
, cache_line1
, cache_line2
;
653 /* Iterate through all possible alignments of the first
654 memory reference within its cache line. */
655 for (align
= 0; align
< cache_line_size
; align
+= align_unit
)
657 /* Iterate through all distinct iterations. */
658 for (iter
= 0; iter
< distinct_iters
; iter
++)
660 address1
= align
+ step
* iter
;
661 address2
= address1
+ delta
;
662 cache_line1
= address1
/ cache_line_size
;
663 cache_line2
= address2
/ cache_line_size
;
664 total_positions
+= 1;
665 if (cache_line1
!= cache_line2
)
668 miss_rate
= 1000 * miss_positions
/ total_positions
;
672 /* Prune the prefetch candidate REF using the reuse with BY.
673 If BY_IS_BEFORE is true, BY is before REF in the loop. */
676 prune_ref_by_group_reuse (struct mem_ref
*ref
, struct mem_ref
*by
,
681 HOST_WIDE_INT delta_r
= ref
->delta
, delta_b
= by
->delta
;
682 HOST_WIDE_INT delta
= delta_b
- delta_r
;
683 HOST_WIDE_INT hit_from
;
684 unsigned HOST_WIDE_INT prefetch_before
, prefetch_block
;
686 HOST_WIDE_INT reduced_step
;
687 unsigned HOST_WIDE_INT reduced_prefetch_block
;
691 /* If the step is non constant we cannot calculate prefetch_before. */
692 if (!cst_and_fits_in_hwi (ref
->group
->step
)) {
696 step
= int_cst_value (ref
->group
->step
);
703 /* If the references has the same address, only prefetch the
706 ref
->prefetch_before
= 0;
713 /* If the reference addresses are invariant and fall into the
714 same cache line, prefetch just the first one. */
718 if (ddown (ref
->delta
, PREFETCH_BLOCK
)
719 != ddown (by
->delta
, PREFETCH_BLOCK
))
722 ref
->prefetch_before
= 0;
726 /* Only prune the reference that is behind in the array. */
732 /* Transform the data so that we may assume that the accesses
736 delta_r
= PREFETCH_BLOCK
- 1 - delta_r
;
737 delta_b
= PREFETCH_BLOCK
- 1 - delta_b
;
745 /* Check whether the two references are likely to hit the same cache
746 line, and how distant the iterations in that it occurs are from
749 if (step
<= PREFETCH_BLOCK
)
751 /* The accesses are sure to meet. Let us check when. */
752 hit_from
= ddown (delta_b
, PREFETCH_BLOCK
) * PREFETCH_BLOCK
;
753 prefetch_before
= (hit_from
- delta_r
+ step
- 1) / step
;
755 /* Do not reduce prefetch_before if we meet beyond cache size. */
756 if (prefetch_before
> (unsigned) abs (L2_CACHE_SIZE_BYTES
/ step
))
757 prefetch_before
= PREFETCH_ALL
;
758 if (prefetch_before
< ref
->prefetch_before
)
759 ref
->prefetch_before
= prefetch_before
;
764 /* A more complicated case with step > prefetch_block. First reduce
765 the ratio between the step and the cache line size to its simplest
766 terms. The resulting denominator will then represent the number of
767 distinct iterations after which each address will go back to its
768 initial location within the cache line. This computation assumes
769 that PREFETCH_BLOCK is a power of two. */
770 prefetch_block
= PREFETCH_BLOCK
;
771 reduced_prefetch_block
= prefetch_block
;
773 while ((reduced_step
& 1) == 0
774 && reduced_prefetch_block
> 1)
777 reduced_prefetch_block
>>= 1;
780 prefetch_before
= delta
/ step
;
782 ref_type
= TREE_TYPE (ref
->mem
);
783 align_unit
= TYPE_ALIGN (ref_type
) / 8;
784 miss_rate
= compute_miss_rate(prefetch_block
, step
, delta
,
785 reduced_prefetch_block
, align_unit
);
786 if (miss_rate
<= ACCEPTABLE_MISS_RATE
)
788 /* Do not reduce prefetch_before if we meet beyond cache size. */
789 if (prefetch_before
> L2_CACHE_SIZE_BYTES
/ PREFETCH_BLOCK
)
790 prefetch_before
= PREFETCH_ALL
;
791 if (prefetch_before
< ref
->prefetch_before
)
792 ref
->prefetch_before
= prefetch_before
;
797 /* Try also the following iteration. */
799 delta
= step
- delta
;
800 miss_rate
= compute_miss_rate(prefetch_block
, step
, delta
,
801 reduced_prefetch_block
, align_unit
);
802 if (miss_rate
<= ACCEPTABLE_MISS_RATE
)
804 if (prefetch_before
< ref
->prefetch_before
)
805 ref
->prefetch_before
= prefetch_before
;
810 /* The ref probably does not reuse by. */
814 /* Prune the prefetch candidate REF using the reuses with other references
818 prune_ref_by_reuse (struct mem_ref
*ref
, struct mem_ref
*refs
)
820 struct mem_ref
*prune_by
;
823 prune_ref_by_self_reuse (ref
);
825 for (prune_by
= refs
; prune_by
; prune_by
= prune_by
->next
)
833 if (!WRITE_CAN_USE_READ_PREFETCH
835 && !prune_by
->write_p
)
837 if (!READ_CAN_USE_WRITE_PREFETCH
839 && prune_by
->write_p
)
842 prune_ref_by_group_reuse (ref
, prune_by
, before
);
846 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
849 prune_group_by_reuse (struct mem_ref_group
*group
)
851 struct mem_ref
*ref_pruned
;
853 for (ref_pruned
= group
->refs
; ref_pruned
; ref_pruned
= ref_pruned
->next
)
855 prune_ref_by_reuse (ref_pruned
, group
->refs
);
857 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
859 fprintf (dump_file
, "Reference %p:", (void *) ref_pruned
);
861 if (ref_pruned
->prefetch_before
== PREFETCH_ALL
862 && ref_pruned
->prefetch_mod
== 1)
863 fprintf (dump_file
, " no restrictions");
864 else if (ref_pruned
->prefetch_before
== 0)
865 fprintf (dump_file
, " do not prefetch");
866 else if (ref_pruned
->prefetch_before
<= ref_pruned
->prefetch_mod
)
867 fprintf (dump_file
, " prefetch once");
870 if (ref_pruned
->prefetch_before
!= PREFETCH_ALL
)
872 fprintf (dump_file
, " prefetch before ");
873 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
874 ref_pruned
->prefetch_before
);
876 if (ref_pruned
->prefetch_mod
!= 1)
878 fprintf (dump_file
, " prefetch mod ");
879 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
880 ref_pruned
->prefetch_mod
);
883 fprintf (dump_file
, "\n");
888 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
891 prune_by_reuse (struct mem_ref_group
*groups
)
893 for (; groups
; groups
= groups
->next
)
894 prune_group_by_reuse (groups
);
897 /* Returns true if we should issue prefetch for REF. */
900 should_issue_prefetch_p (struct mem_ref
*ref
)
902 /* For now do not issue prefetches for only first few of the
904 if (ref
->prefetch_before
!= PREFETCH_ALL
)
906 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
907 fprintf (dump_file
, "Ignoring %p due to prefetch_before\n",
912 /* Do not prefetch nontemporal stores. */
915 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
916 fprintf (dump_file
, "Ignoring nontemporal store %p\n", (void *) ref
);
923 /* Decide which of the prefetch candidates in GROUPS to prefetch.
924 AHEAD is the number of iterations to prefetch ahead (which corresponds
925 to the number of simultaneous instances of one prefetch running at a
926 time). UNROLL_FACTOR is the factor by that the loop is going to be
927 unrolled. Returns true if there is anything to prefetch. */
930 schedule_prefetches (struct mem_ref_group
*groups
, unsigned unroll_factor
,
933 unsigned remaining_prefetch_slots
, n_prefetches
, prefetch_slots
;
934 unsigned slots_per_prefetch
;
938 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
939 remaining_prefetch_slots
= SIMULTANEOUS_PREFETCHES
;
941 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
942 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
943 it will need a prefetch slot. */
944 slots_per_prefetch
= (ahead
+ unroll_factor
/ 2) / unroll_factor
;
945 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
946 fprintf (dump_file
, "Each prefetch instruction takes %u prefetch slots.\n",
949 /* For now we just take memory references one by one and issue
950 prefetches for as many as possible. The groups are sorted
951 starting with the largest step, since the references with
952 large step are more likely to cause many cache misses. */
954 for (; groups
; groups
= groups
->next
)
955 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
957 if (!should_issue_prefetch_p (ref
))
960 /* The loop is far from being sufficiently unrolled for this
961 prefetch. Do not generate prefetch to avoid many redudant
963 if (ref
->prefetch_mod
/ unroll_factor
> PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
)
966 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
967 and we unroll the loop UNROLL_FACTOR times, we need to insert
968 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
970 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
971 / ref
->prefetch_mod
);
972 prefetch_slots
= n_prefetches
* slots_per_prefetch
;
974 /* If more than half of the prefetches would be lost anyway, do not
975 issue the prefetch. */
976 if (2 * remaining_prefetch_slots
< prefetch_slots
)
979 ref
->issue_prefetch_p
= true;
981 if (remaining_prefetch_slots
<= prefetch_slots
)
983 remaining_prefetch_slots
-= prefetch_slots
;
990 /* Estimate the number of prefetches in the given GROUPS. */
993 estimate_prefetch_count (struct mem_ref_group
*groups
)
996 int prefetch_count
= 0;
998 for (; groups
; groups
= groups
->next
)
999 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1000 if (should_issue_prefetch_p (ref
))
1003 return prefetch_count
;
1006 /* Issue prefetches for the reference REF into loop as decided before.
1007 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1008 is the factor by which LOOP was unrolled. */
1011 issue_prefetch_ref (struct mem_ref
*ref
, unsigned unroll_factor
, unsigned ahead
)
1013 HOST_WIDE_INT delta
;
1014 tree addr
, addr_base
, write_p
, local
, forward
;
1016 gimple_stmt_iterator bsi
;
1017 unsigned n_prefetches
, ap
;
1018 bool nontemporal
= ref
->reuse_distance
>= L2_CACHE_SIZE_BYTES
;
1020 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1021 fprintf (dump_file
, "Issued%s prefetch for %p.\n",
1022 nontemporal
? " nontemporal" : "",
1025 bsi
= gsi_for_stmt (ref
->stmt
);
1027 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1028 / ref
->prefetch_mod
);
1029 addr_base
= build_fold_addr_expr_with_type (ref
->mem
, ptr_type_node
);
1030 addr_base
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr_base
),
1031 true, NULL
, true, GSI_SAME_STMT
);
1032 write_p
= ref
->write_p
? integer_one_node
: integer_zero_node
;
1033 local
= build_int_cst (integer_type_node
, nontemporal
? 0 : 3);
1035 for (ap
= 0; ap
< n_prefetches
; ap
++)
1037 if (cst_and_fits_in_hwi (ref
->group
->step
))
1039 /* Determine the address to prefetch. */
1040 delta
= (ahead
+ ap
* ref
->prefetch_mod
) *
1041 int_cst_value (ref
->group
->step
);
1042 addr
= fold_build2 (POINTER_PLUS_EXPR
, ptr_type_node
,
1043 addr_base
, size_int (delta
));
1044 addr
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr
), true, NULL
,
1045 true, GSI_SAME_STMT
);
1049 /* The step size is non-constant but loop-invariant. We use the
1050 heuristic to simply prefetch ahead iterations ahead. */
1051 forward
= fold_build2 (MULT_EXPR
, sizetype
,
1052 fold_convert (sizetype
, ref
->group
->step
),
1053 fold_convert (sizetype
, size_int (ahead
)));
1054 addr
= fold_build2 (POINTER_PLUS_EXPR
, ptr_type_node
, addr_base
,
1056 addr
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr
), true,
1057 NULL
, true, GSI_SAME_STMT
);
1059 /* Create the prefetch instruction. */
1060 prefetch
= gimple_build_call (built_in_decls
[BUILT_IN_PREFETCH
],
1061 3, addr
, write_p
, local
);
1062 gsi_insert_before (&bsi
, prefetch
, GSI_SAME_STMT
);
1066 /* Issue prefetches for the references in GROUPS into loop as decided before.
1067 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1068 factor by that LOOP was unrolled. */
1071 issue_prefetches (struct mem_ref_group
*groups
,
1072 unsigned unroll_factor
, unsigned ahead
)
1074 struct mem_ref
*ref
;
1076 for (; groups
; groups
= groups
->next
)
1077 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1078 if (ref
->issue_prefetch_p
)
1079 issue_prefetch_ref (ref
, unroll_factor
, ahead
);
1082 /* Returns true if REF is a memory write for that a nontemporal store insn
1086 nontemporal_store_p (struct mem_ref
*ref
)
1088 enum machine_mode mode
;
1089 enum insn_code code
;
1091 /* REF must be a write that is not reused. We require it to be independent
1092 on all other memory references in the loop, as the nontemporal stores may
1093 be reordered with respect to other memory references. */
1095 || !ref
->independent_p
1096 || ref
->reuse_distance
< L2_CACHE_SIZE_BYTES
)
1099 /* Check that we have the storent instruction for the mode. */
1100 mode
= TYPE_MODE (TREE_TYPE (ref
->mem
));
1101 if (mode
== BLKmode
)
1104 code
= optab_handler (storent_optab
, mode
)->insn_code
;
1105 return code
!= CODE_FOR_nothing
;
1108 /* If REF is a nontemporal store, we mark the corresponding modify statement
1109 and return true. Otherwise, we return false. */
1112 mark_nontemporal_store (struct mem_ref
*ref
)
1114 if (!nontemporal_store_p (ref
))
1117 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1118 fprintf (dump_file
, "Marked reference %p as a nontemporal store.\n",
1121 gimple_assign_set_nontemporal_move (ref
->stmt
, true);
1122 ref
->storent_p
= true;
1127 /* Issue a memory fence instruction after LOOP. */
1130 emit_mfence_after_loop (struct loop
*loop
)
1132 VEC (edge
, heap
) *exits
= get_loop_exit_edges (loop
);
1135 gimple_stmt_iterator bsi
;
1138 for (i
= 0; VEC_iterate (edge
, exits
, i
, exit
); i
++)
1140 call
= gimple_build_call (FENCE_FOLLOWING_MOVNT
, 0);
1142 if (!single_pred_p (exit
->dest
)
1143 /* If possible, we prefer not to insert the fence on other paths
1145 && !(exit
->flags
& EDGE_ABNORMAL
))
1146 split_loop_exit_edge (exit
);
1147 bsi
= gsi_after_labels (exit
->dest
);
1149 gsi_insert_before (&bsi
, call
, GSI_NEW_STMT
);
1150 mark_virtual_ops_for_renaming (call
);
1153 VEC_free (edge
, heap
, exits
);
1154 update_ssa (TODO_update_ssa_only_virtuals
);
1157 /* Returns true if we can use storent in loop, false otherwise. */
1160 may_use_storent_in_loop_p (struct loop
*loop
)
1164 if (loop
->inner
!= NULL
)
1167 /* If we must issue a mfence insn after using storent, check that there
1168 is a suitable place for it at each of the loop exits. */
1169 if (FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1171 VEC (edge
, heap
) *exits
= get_loop_exit_edges (loop
);
1175 for (i
= 0; VEC_iterate (edge
, exits
, i
, exit
); i
++)
1176 if ((exit
->flags
& EDGE_ABNORMAL
)
1177 && exit
->dest
== EXIT_BLOCK_PTR
)
1180 VEC_free (edge
, heap
, exits
);
1186 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1187 references in the loop. */
1190 mark_nontemporal_stores (struct loop
*loop
, struct mem_ref_group
*groups
)
1192 struct mem_ref
*ref
;
1195 if (!may_use_storent_in_loop_p (loop
))
1198 for (; groups
; groups
= groups
->next
)
1199 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1200 any
|= mark_nontemporal_store (ref
);
1202 if (any
&& FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1203 emit_mfence_after_loop (loop
);
1206 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1207 this is the case, fill in DESC by the description of number of
1211 should_unroll_loop_p (struct loop
*loop
, struct tree_niter_desc
*desc
,
1214 if (!can_unroll_loop_p (loop
, factor
, desc
))
1217 /* We only consider loops without control flow for unrolling. This is not
1218 a hard restriction -- tree_unroll_loop works with arbitrary loops
1219 as well; but the unrolling/prefetching is usually more profitable for
1220 loops consisting of a single basic block, and we want to limit the
1222 if (loop
->num_nodes
> 2)
1228 /* Determine the coefficient by that unroll LOOP, from the information
1229 contained in the list of memory references REFS. Description of
1230 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1231 insns of the LOOP. EST_NITER is the estimated number of iterations of
1232 the loop, or -1 if no estimate is available. */
1235 determine_unroll_factor (struct loop
*loop
, struct mem_ref_group
*refs
,
1236 unsigned ninsns
, struct tree_niter_desc
*desc
,
1237 HOST_WIDE_INT est_niter
)
1239 unsigned upper_bound
;
1240 unsigned nfactor
, factor
, mod_constraint
;
1241 struct mem_ref_group
*agp
;
1242 struct mem_ref
*ref
;
1244 /* First check whether the loop is not too large to unroll. We ignore
1245 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1246 from unrolling them enough to make exactly one cache line covered by each
1247 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1248 us from unrolling the loops too many times in cases where we only expect
1249 gains from better scheduling and decreasing loop overhead, which is not
1251 upper_bound
= PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS
) / ninsns
;
1253 /* If we unrolled the loop more times than it iterates, the unrolled version
1254 of the loop would be never entered. */
1255 if (est_niter
>= 0 && est_niter
< (HOST_WIDE_INT
) upper_bound
)
1256 upper_bound
= est_niter
;
1258 if (upper_bound
<= 1)
1261 /* Choose the factor so that we may prefetch each cache just once,
1262 but bound the unrolling by UPPER_BOUND. */
1264 for (agp
= refs
; agp
; agp
= agp
->next
)
1265 for (ref
= agp
->refs
; ref
; ref
= ref
->next
)
1266 if (should_issue_prefetch_p (ref
))
1268 mod_constraint
= ref
->prefetch_mod
;
1269 nfactor
= least_common_multiple (mod_constraint
, factor
);
1270 if (nfactor
<= upper_bound
)
1274 if (!should_unroll_loop_p (loop
, desc
, factor
))
1280 /* Returns the total volume of the memory references REFS, taking into account
1281 reuses in the innermost loop and cache line size. TODO -- we should also
1282 take into account reuses across the iterations of the loops in the loop
1286 volume_of_references (struct mem_ref_group
*refs
)
1288 unsigned volume
= 0;
1289 struct mem_ref_group
*gr
;
1290 struct mem_ref
*ref
;
1292 for (gr
= refs
; gr
; gr
= gr
->next
)
1293 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1295 /* Almost always reuses another value? */
1296 if (ref
->prefetch_before
!= PREFETCH_ALL
)
1299 /* If several iterations access the same cache line, use the size of
1300 the line divided by this number. Otherwise, a cache line is
1301 accessed in each iteration. TODO -- in the latter case, we should
1302 take the size of the reference into account, rounding it up on cache
1303 line size multiple. */
1304 volume
+= L1_CACHE_LINE_SIZE
/ ref
->prefetch_mod
;
1309 /* Returns the volume of memory references accessed across VEC iterations of
1310 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1311 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1314 volume_of_dist_vector (lambda_vector vec
, unsigned *loop_sizes
, unsigned n
)
1318 for (i
= 0; i
< n
; i
++)
1325 gcc_assert (vec
[i
] > 0);
1327 /* We ignore the parts of the distance vector in subloops, since usually
1328 the numbers of iterations are much smaller. */
1329 return loop_sizes
[i
] * vec
[i
];
1332 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1333 at the position corresponding to the loop of the step. N is the depth
1334 of the considered loop nest, and, LOOP is its innermost loop. */
1337 add_subscript_strides (tree access_fn
, unsigned stride
,
1338 HOST_WIDE_INT
*strides
, unsigned n
, struct loop
*loop
)
1342 HOST_WIDE_INT astep
;
1343 unsigned min_depth
= loop_depth (loop
) - n
;
1345 while (TREE_CODE (access_fn
) == POLYNOMIAL_CHREC
)
1347 aloop
= get_chrec_loop (access_fn
);
1348 step
= CHREC_RIGHT (access_fn
);
1349 access_fn
= CHREC_LEFT (access_fn
);
1351 if ((unsigned) loop_depth (aloop
) <= min_depth
)
1354 if (host_integerp (step
, 0))
1355 astep
= tree_low_cst (step
, 0);
1357 astep
= L1_CACHE_LINE_SIZE
;
1359 strides
[n
- 1 - loop_depth (loop
) + loop_depth (aloop
)] += astep
* stride
;
1364 /* Returns the volume of memory references accessed between two consecutive
1365 self-reuses of the reference DR. We consider the subscripts of DR in N
1366 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1367 loops. LOOP is the innermost loop of the current loop nest. */
1370 self_reuse_distance (data_reference_p dr
, unsigned *loop_sizes
, unsigned n
,
1373 tree stride
, access_fn
;
1374 HOST_WIDE_INT
*strides
, astride
;
1375 VEC (tree
, heap
) *access_fns
;
1376 tree ref
= DR_REF (dr
);
1377 unsigned i
, ret
= ~0u;
1379 /* In the following example:
1381 for (i = 0; i < N; i++)
1382 for (j = 0; j < N; j++)
1384 the same cache line is accessed each N steps (except if the change from
1385 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1386 we cannot rely purely on the results of the data dependence analysis.
1388 Instead, we compute the stride of the reference in each loop, and consider
1389 the innermost loop in that the stride is less than cache size. */
1391 strides
= XCNEWVEC (HOST_WIDE_INT
, n
);
1392 access_fns
= DR_ACCESS_FNS (dr
);
1394 for (i
= 0; VEC_iterate (tree
, access_fns
, i
, access_fn
); i
++)
1396 /* Keep track of the reference corresponding to the subscript, so that we
1398 while (handled_component_p (ref
) && TREE_CODE (ref
) != ARRAY_REF
)
1399 ref
= TREE_OPERAND (ref
, 0);
1401 if (TREE_CODE (ref
) == ARRAY_REF
)
1403 stride
= TYPE_SIZE_UNIT (TREE_TYPE (ref
));
1404 if (host_integerp (stride
, 1))
1405 astride
= tree_low_cst (stride
, 1);
1407 astride
= L1_CACHE_LINE_SIZE
;
1409 ref
= TREE_OPERAND (ref
, 0);
1414 add_subscript_strides (access_fn
, astride
, strides
, n
, loop
);
1417 for (i
= n
; i
-- > 0; )
1419 unsigned HOST_WIDE_INT s
;
1421 s
= strides
[i
] < 0 ? -strides
[i
] : strides
[i
];
1423 if (s
< (unsigned) L1_CACHE_LINE_SIZE
1425 > (unsigned) (L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)))
1427 ret
= loop_sizes
[i
];
1436 /* Determines the distance till the first reuse of each reference in REFS
1437 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1438 memory references in the loop. */
1441 determine_loop_nest_reuse (struct loop
*loop
, struct mem_ref_group
*refs
,
1444 struct loop
*nest
, *aloop
;
1445 VEC (data_reference_p
, heap
) *datarefs
= NULL
;
1446 VEC (ddr_p
, heap
) *dependences
= NULL
;
1447 struct mem_ref_group
*gr
;
1448 struct mem_ref
*ref
, *refb
;
1449 VEC (loop_p
, heap
) *vloops
= NULL
;
1450 unsigned *loop_data_size
;
1452 unsigned volume
, dist
, adist
;
1454 data_reference_p dr
;
1460 /* Find the outermost loop of the loop nest of loop (we require that
1461 there are no sibling loops inside the nest). */
1465 aloop
= loop_outer (nest
);
1467 if (aloop
== current_loops
->tree_root
1468 || aloop
->inner
->next
)
1474 /* For each loop, determine the amount of data accessed in each iteration.
1475 We use this to estimate whether the reference is evicted from the
1476 cache before its reuse. */
1477 find_loop_nest (nest
, &vloops
);
1478 n
= VEC_length (loop_p
, vloops
);
1479 loop_data_size
= XNEWVEC (unsigned, n
);
1480 volume
= volume_of_references (refs
);
1484 loop_data_size
[i
] = volume
;
1485 /* Bound the volume by the L2 cache size, since above this bound,
1486 all dependence distances are equivalent. */
1487 if (volume
> L2_CACHE_SIZE_BYTES
)
1490 aloop
= VEC_index (loop_p
, vloops
, i
);
1491 vol
= estimated_loop_iterations_int (aloop
, false);
1493 vol
= expected_loop_iterations (aloop
);
1497 /* Prepare the references in the form suitable for data dependence
1498 analysis. We ignore unanalyzable data references (the results
1499 are used just as a heuristics to estimate temporality of the
1500 references, hence we do not need to worry about correctness). */
1501 for (gr
= refs
; gr
; gr
= gr
->next
)
1502 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1504 dr
= create_data_ref (nest
, ref
->mem
, ref
->stmt
, !ref
->write_p
);
1508 ref
->reuse_distance
= volume
;
1510 VEC_safe_push (data_reference_p
, heap
, datarefs
, dr
);
1513 no_other_refs
= false;
1516 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1518 dist
= self_reuse_distance (dr
, loop_data_size
, n
, loop
);
1519 ref
= (struct mem_ref
*) dr
->aux
;
1520 if (ref
->reuse_distance
> dist
)
1521 ref
->reuse_distance
= dist
;
1524 ref
->independent_p
= true;
1527 compute_all_dependences (datarefs
, &dependences
, vloops
, true);
1529 for (i
= 0; VEC_iterate (ddr_p
, dependences
, i
, dep
); i
++)
1531 if (DDR_ARE_DEPENDENT (dep
) == chrec_known
)
1534 ref
= (struct mem_ref
*) DDR_A (dep
)->aux
;
1535 refb
= (struct mem_ref
*) DDR_B (dep
)->aux
;
1537 if (DDR_ARE_DEPENDENT (dep
) == chrec_dont_know
1538 || DDR_NUM_DIST_VECTS (dep
) == 0)
1540 /* If the dependence cannot be analyzed, assume that there might be
1544 ref
->independent_p
= false;
1545 refb
->independent_p
= false;
1549 /* The distance vectors are normalized to be always lexicographically
1550 positive, hence we cannot tell just from them whether DDR_A comes
1551 before DDR_B or vice versa. However, it is not important,
1552 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1553 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1554 in cache (and marking it as nontemporal would not affect
1558 for (j
= 0; j
< DDR_NUM_DIST_VECTS (dep
); j
++)
1560 adist
= volume_of_dist_vector (DDR_DIST_VECT (dep
, j
),
1563 /* If this is a dependence in the innermost loop (i.e., the
1564 distances in all superloops are zero) and it is not
1565 the trivial self-dependence with distance zero, record that
1566 the references are not completely independent. */
1567 if (lambda_vector_zerop (DDR_DIST_VECT (dep
, j
), n
- 1)
1569 || DDR_DIST_VECT (dep
, j
)[n
-1] != 0))
1571 ref
->independent_p
= false;
1572 refb
->independent_p
= false;
1575 /* Ignore accesses closer than
1576 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1577 so that we use nontemporal prefetches e.g. if single memory
1578 location is accessed several times in a single iteration of
1580 if (adist
< L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)
1588 if (ref
->reuse_distance
> dist
)
1589 ref
->reuse_distance
= dist
;
1590 if (refb
->reuse_distance
> dist
)
1591 refb
->reuse_distance
= dist
;
1594 free_dependence_relations (dependences
);
1595 free_data_refs (datarefs
);
1596 free (loop_data_size
);
1598 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1600 fprintf (dump_file
, "Reuse distances:\n");
1601 for (gr
= refs
; gr
; gr
= gr
->next
)
1602 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1603 fprintf (dump_file
, " ref %p distance %u\n",
1604 (void *) ref
, ref
->reuse_distance
);
1608 /* Do a cost-benefit analysis to determine if prefetching is profitable
1609 for the current loop given the following parameters:
1610 AHEAD: the iteration ahead distance,
1611 EST_NITER: the estimated trip count,
1612 NINSNS: estimated number of instructions in the loop,
1613 PREFETCH_COUNT: an estimate of the number of prefetches
1614 MEM_REF_COUNT: total number of memory references in the loop. */
1617 is_loop_prefetching_profitable (unsigned ahead
, HOST_WIDE_INT est_niter
,
1618 unsigned ninsns
, unsigned prefetch_count
,
1619 unsigned mem_ref_count
, unsigned unroll_factor
)
1621 int insn_to_mem_ratio
, insn_to_prefetch_ratio
;
1623 if (mem_ref_count
== 0)
1626 /* Prefetching improves performance by overlapping cache missing
1627 memory accesses with CPU operations. If the loop does not have
1628 enough CPU operations to overlap with memory operations, prefetching
1629 won't give a significant benefit. One approximate way of checking
1630 this is to require the ratio of instructions to memory references to
1631 be above a certain limit. This approximation works well in practice.
1632 TODO: Implement a more precise computation by estimating the time
1633 for each CPU or memory op in the loop. Time estimates for memory ops
1634 should account for cache misses. */
1635 insn_to_mem_ratio
= ninsns
/ mem_ref_count
;
1637 if (insn_to_mem_ratio
< PREFETCH_MIN_INSN_TO_MEM_RATIO
)
1639 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1641 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1646 /* Prefetching most likely causes performance degradation when the instruction
1647 to prefetch ratio is too small. Too many prefetch instructions in a loop
1648 may reduce the I-cache performance.
1649 (unroll_factor * ninsns) is used to estimate the number of instructions in
1650 the unrolled loop. This implementation is a bit simplistic -- the number
1651 of issued prefetch instructions is also affected by unrolling. So,
1652 prefetch_mod and the unroll factor should be taken into account when
1653 determining prefetch_count. Also, the number of insns of the unrolled
1654 loop will usually be significantly smaller than the number of insns of the
1655 original loop * unroll_factor (at least the induction variable increases
1656 and the exit branches will get eliminated), so it might be better to use
1657 tree_estimate_loop_size + estimated_unrolled_size. */
1658 insn_to_prefetch_ratio
= (unroll_factor
* ninsns
) / prefetch_count
;
1659 if (insn_to_prefetch_ratio
< MIN_INSN_TO_PREFETCH_RATIO
)
1661 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1663 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1664 insn_to_prefetch_ratio
);
1668 /* Could not do further estimation if the trip count is unknown. Just assume
1669 prefetching is profitable. Too aggressive??? */
1673 if (est_niter
< (HOST_WIDE_INT
) (TRIP_COUNT_TO_AHEAD_RATIO
* ahead
))
1675 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1677 "Not prefetching -- loop estimated to roll only %d times\n",
1685 /* Issue prefetch instructions for array references in LOOP. Returns
1686 true if the LOOP was unrolled. */
1689 loop_prefetch_arrays (struct loop
*loop
)
1691 struct mem_ref_group
*refs
;
1692 unsigned ahead
, ninsns
, time
, unroll_factor
;
1693 HOST_WIDE_INT est_niter
;
1694 struct tree_niter_desc desc
;
1695 bool unrolled
= false, no_other_refs
;
1696 unsigned prefetch_count
;
1697 unsigned mem_ref_count
;
1699 if (optimize_loop_nest_for_size_p (loop
))
1701 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1702 fprintf (dump_file
, " ignored (cold area)\n");
1706 /* Step 1: gather the memory references. */
1707 refs
= gather_memory_references (loop
, &no_other_refs
, &mem_ref_count
);
1709 /* Step 2: estimate the reuse effects. */
1710 prune_by_reuse (refs
);
1712 prefetch_count
= estimate_prefetch_count (refs
);
1713 if (prefetch_count
== 0)
1716 determine_loop_nest_reuse (loop
, refs
, no_other_refs
);
1718 /* Step 3: determine the ahead and unroll factor. */
1720 /* FIXME: the time should be weighted by the probabilities of the blocks in
1722 time
= tree_num_loop_insns (loop
, &eni_time_weights
);
1723 ahead
= (PREFETCH_LATENCY
+ time
- 1) / time
;
1724 est_niter
= estimated_loop_iterations_int (loop
, false);
1726 ninsns
= tree_num_loop_insns (loop
, &eni_size_weights
);
1727 unroll_factor
= determine_unroll_factor (loop
, refs
, ninsns
, &desc
,
1729 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1730 fprintf (dump_file
, "Ahead %d, unroll factor %d, trip count "
1731 HOST_WIDE_INT_PRINT_DEC
"\n"
1732 "insn count %d, mem ref count %d, prefetch count %d\n",
1733 ahead
, unroll_factor
, est_niter
,
1734 ninsns
, mem_ref_count
, prefetch_count
);
1736 if (!is_loop_prefetching_profitable (ahead
, est_niter
, ninsns
, prefetch_count
,
1737 mem_ref_count
, unroll_factor
))
1740 mark_nontemporal_stores (loop
, refs
);
1742 /* Step 4: what to prefetch? */
1743 if (!schedule_prefetches (refs
, unroll_factor
, ahead
))
1746 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1747 iterations so that we do not issue superfluous prefetches. */
1748 if (unroll_factor
!= 1)
1750 tree_unroll_loop (loop
, unroll_factor
,
1751 single_dom_exit (loop
), &desc
);
1755 /* Step 6: issue the prefetches. */
1756 issue_prefetches (refs
, unroll_factor
, ahead
);
1759 release_mem_refs (refs
);
1763 /* Issue prefetch instructions for array references in loops. */
1766 tree_ssa_prefetch_arrays (void)
1770 bool unrolled
= false;
1774 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1775 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1776 of processor costs and i486 does not have prefetch, but
1777 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1778 || PREFETCH_BLOCK
== 0)
1781 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1783 fprintf (dump_file
, "Prefetching parameters:\n");
1784 fprintf (dump_file
, " simultaneous prefetches: %d\n",
1785 SIMULTANEOUS_PREFETCHES
);
1786 fprintf (dump_file
, " prefetch latency: %d\n", PREFETCH_LATENCY
);
1787 fprintf (dump_file
, " prefetch block size: %d\n", PREFETCH_BLOCK
);
1788 fprintf (dump_file
, " L1 cache size: %d lines, %d kB\n",
1789 L1_CACHE_SIZE_BYTES
/ L1_CACHE_LINE_SIZE
, L1_CACHE_SIZE
);
1790 fprintf (dump_file
, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE
);
1791 fprintf (dump_file
, " L2 cache size: %d kB\n", L2_CACHE_SIZE
);
1792 fprintf (dump_file
, " min insn-to-prefetch ratio: %d \n",
1793 MIN_INSN_TO_PREFETCH_RATIO
);
1794 fprintf (dump_file
, " min insn-to-mem ratio: %d \n",
1795 PREFETCH_MIN_INSN_TO_MEM_RATIO
);
1796 fprintf (dump_file
, "\n");
1799 initialize_original_copy_tables ();
1801 if (!built_in_decls
[BUILT_IN_PREFETCH
])
1803 tree type
= build_function_type (void_type_node
,
1804 tree_cons (NULL_TREE
,
1805 const_ptr_type_node
,
1807 tree decl
= add_builtin_function ("__builtin_prefetch", type
,
1808 BUILT_IN_PREFETCH
, BUILT_IN_NORMAL
,
1810 DECL_IS_NOVOPS (decl
) = true;
1811 built_in_decls
[BUILT_IN_PREFETCH
] = decl
;
1814 /* We assume that size of cache line is a power of two, so verify this
1816 gcc_assert ((PREFETCH_BLOCK
& (PREFETCH_BLOCK
- 1)) == 0);
1818 FOR_EACH_LOOP (li
, loop
, LI_FROM_INNERMOST
)
1820 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1821 fprintf (dump_file
, "Processing loop %d:\n", loop
->num
);
1823 unrolled
|= loop_prefetch_arrays (loop
);
1825 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1826 fprintf (dump_file
, "\n\n");
1832 todo_flags
|= TODO_cleanup_cfg
;
1835 free_original_copy_tables ();