* config/sh/sh.c (sh_delegitimize_address): Handle UNSPEC_SYMOFF
[official-gcc.git] / gcc / tree-ssa-loop-prefetch.c
blobd920ec6eb073436b6a6a5f303d608e60b906be2c
1 /* Array prefetching.
2 Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "output.h"
28 #include "tree-pretty-print.h"
29 #include "tree-flow.h"
30 #include "tree-dump.h"
31 #include "timevar.h"
32 #include "cfgloop.h"
33 #include "tree-pass.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "hashtab.h"
37 #include "tree-chrec.h"
38 #include "tree-scalar-evolution.h"
39 #include "diagnostic-core.h"
40 #include "params.h"
41 #include "langhooks.h"
42 #include "tree-inline.h"
43 #include "tree-data-ref.h"
46 /* FIXME: Needed for optabs, but this should all be moved to a TBD interface
47 between the GIMPLE and RTL worlds. */
48 #include "expr.h"
49 #include "optabs.h"
51 /* This pass inserts prefetch instructions to optimize cache usage during
52 accesses to arrays in loops. It processes loops sequentially and:
54 1) Gathers all memory references in the single loop.
55 2) For each of the references it decides when it is profitable to prefetch
56 it. To do it, we evaluate the reuse among the accesses, and determines
57 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
58 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
59 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
60 iterations of the loop that are zero modulo PREFETCH_MOD). For example
61 (assuming cache line size is 64 bytes, char has size 1 byte and there
62 is no hardware sequential prefetch):
64 char *a;
65 for (i = 0; i < max; i++)
67 a[255] = ...; (0)
68 a[i] = ...; (1)
69 a[i + 64] = ...; (2)
70 a[16*i] = ...; (3)
71 a[187*i] = ...; (4)
72 a[187*i + 50] = ...; (5)
75 (0) obviously has PREFETCH_BEFORE 1
76 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
77 location 64 iterations before it, and PREFETCH_MOD 64 (since
78 it hits the same cache line otherwise).
79 (2) has PREFETCH_MOD 64
80 (3) has PREFETCH_MOD 4
81 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
82 the cache line accessed by (5) is the same with probability only
83 7/32.
84 (5) has PREFETCH_MOD 1 as well.
86 Additionally, we use data dependence analysis to determine for each
87 reference the distance till the first reuse; this information is used
88 to determine the temporality of the issued prefetch instruction.
90 3) We determine how much ahead we need to prefetch. The number of
91 iterations needed is time to fetch / time spent in one iteration of
92 the loop. The problem is that we do not know either of these values,
93 so we just make a heuristic guess based on a magic (possibly)
94 target-specific constant and size of the loop.
96 4) Determine which of the references we prefetch. We take into account
97 that there is a maximum number of simultaneous prefetches (provided
98 by machine description). We prefetch as many prefetches as possible
99 while still within this bound (starting with those with lowest
100 prefetch_mod, since they are responsible for most of the cache
101 misses).
103 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
104 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
105 prefetching nonaccessed memory.
106 TODO -- actually implement peeling.
108 6) We actually emit the prefetch instructions. ??? Perhaps emit the
109 prefetch instructions with guards in cases where 5) was not sufficient
110 to satisfy the constraints?
112 A cost model is implemented to determine whether or not prefetching is
113 profitable for a given loop. The cost model has three heuristics:
115 1. Function trip_count_to_ahead_ratio_too_small_p implements a
116 heuristic that determines whether or not the loop has too few
117 iterations (compared to ahead). Prefetching is not likely to be
118 beneficial if the trip count to ahead ratio is below a certain
119 minimum.
121 2. Function mem_ref_count_reasonable_p implements a heuristic that
122 determines whether the given loop has enough CPU ops that can be
123 overlapped with cache missing memory ops. If not, the loop
124 won't benefit from prefetching. In the implementation,
125 prefetching is not considered beneficial if the ratio between
126 the instruction count and the mem ref count is below a certain
127 minimum.
129 3. Function insn_to_prefetch_ratio_too_small_p implements a
130 heuristic that disables prefetching in a loop if the prefetching
131 cost is above a certain limit. The relative prefetching cost is
132 estimated by taking the ratio between the prefetch count and the
133 total intruction count (this models the I-cache cost).
135 The limits used in these heuristics are defined as parameters with
136 reasonable default values. Machine-specific default values will be
137 added later.
139 Some other TODO:
140 -- write and use more general reuse analysis (that could be also used
141 in other cache aimed loop optimizations)
142 -- make it behave sanely together with the prefetches given by user
143 (now we just ignore them; at the very least we should avoid
144 optimizing loops in that user put his own prefetches)
145 -- we assume cache line size alignment of arrays; this could be
146 improved. */
148 /* Magic constants follow. These should be replaced by machine specific
149 numbers. */
151 /* True if write can be prefetched by a read prefetch. */
153 #ifndef WRITE_CAN_USE_READ_PREFETCH
154 #define WRITE_CAN_USE_READ_PREFETCH 1
155 #endif
157 /* True if read can be prefetched by a write prefetch. */
159 #ifndef READ_CAN_USE_WRITE_PREFETCH
160 #define READ_CAN_USE_WRITE_PREFETCH 0
161 #endif
163 /* The size of the block loaded by a single prefetch. Usually, this is
164 the same as cache line size (at the moment, we only consider one level
165 of cache hierarchy). */
167 #ifndef PREFETCH_BLOCK
168 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
169 #endif
171 /* Do we have a forward hardware sequential prefetching? */
173 #ifndef HAVE_FORWARD_PREFETCH
174 #define HAVE_FORWARD_PREFETCH 0
175 #endif
177 /* Do we have a backward hardware sequential prefetching? */
179 #ifndef HAVE_BACKWARD_PREFETCH
180 #define HAVE_BACKWARD_PREFETCH 0
181 #endif
183 /* In some cases we are only able to determine that there is a certain
184 probability that the two accesses hit the same cache line. In this
185 case, we issue the prefetches for both of them if this probability
186 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
188 #ifndef ACCEPTABLE_MISS_RATE
189 #define ACCEPTABLE_MISS_RATE 50
190 #endif
192 #ifndef HAVE_prefetch
193 #define HAVE_prefetch 0
194 #endif
196 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
197 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
199 /* We consider a memory access nontemporal if it is not reused sooner than
200 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
201 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
202 so that we use nontemporal prefetches e.g. if single memory location
203 is accessed several times in a single iteration of the loop. */
204 #define NONTEMPORAL_FRACTION 16
206 /* In case we have to emit a memory fence instruction after the loop that
207 uses nontemporal stores, this defines the builtin to use. */
209 #ifndef FENCE_FOLLOWING_MOVNT
210 #define FENCE_FOLLOWING_MOVNT NULL_TREE
211 #endif
213 /* It is not profitable to prefetch when the trip count is not at
214 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
215 For example, in a loop with a prefetch ahead distance of 10,
216 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
217 profitable to prefetch when the trip count is greater or equal to
218 40. In that case, 30 out of the 40 iterations will benefit from
219 prefetching. */
221 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
222 #define TRIP_COUNT_TO_AHEAD_RATIO 4
223 #endif
225 /* The group of references between that reuse may occur. */
227 struct mem_ref_group
229 tree base; /* Base of the reference. */
230 tree step; /* Step of the reference. */
231 struct mem_ref *refs; /* References in the group. */
232 struct mem_ref_group *next; /* Next group of references. */
235 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
237 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
239 /* Do not generate a prefetch if the unroll factor is significantly less
240 than what is required by the prefetch. This is to avoid redundant
241 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
242 2, prefetching requires unrolling the loop 16 times, but
243 the loop is actually unrolled twice. In this case (ratio = 8),
244 prefetching is not likely to be beneficial. */
246 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
247 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
248 #endif
250 /* Some of the prefetch computations have quadratic complexity. We want to
251 avoid huge compile times and, therefore, want to limit the amount of
252 memory references per loop where we consider prefetching. */
254 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
255 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
256 #endif
258 /* The memory reference. */
260 struct mem_ref
262 gimple stmt; /* Statement in that the reference appears. */
263 tree mem; /* The reference. */
264 HOST_WIDE_INT delta; /* Constant offset of the reference. */
265 struct mem_ref_group *group; /* The group of references it belongs to. */
266 unsigned HOST_WIDE_INT prefetch_mod;
267 /* Prefetch only each PREFETCH_MOD-th
268 iteration. */
269 unsigned HOST_WIDE_INT prefetch_before;
270 /* Prefetch only first PREFETCH_BEFORE
271 iterations. */
272 unsigned reuse_distance; /* The amount of data accessed before the first
273 reuse of this value. */
274 struct mem_ref *next; /* The next reference in the group. */
275 unsigned write_p : 1; /* Is it a write? */
276 unsigned independent_p : 1; /* True if the reference is independent on
277 all other references inside the loop. */
278 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
279 unsigned storent_p : 1; /* True if we changed the store to a
280 nontemporal one. */
283 /* Dumps information about reference REF to FILE. */
285 static void
286 dump_mem_ref (FILE *file, struct mem_ref *ref)
288 fprintf (file, "Reference %p:\n", (void *) ref);
290 fprintf (file, " group %p (base ", (void *) ref->group);
291 print_generic_expr (file, ref->group->base, TDF_SLIM);
292 fprintf (file, ", step ");
293 if (cst_and_fits_in_hwi (ref->group->step))
294 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
295 else
296 print_generic_expr (file, ref->group->step, TDF_TREE);
297 fprintf (file, ")\n");
299 fprintf (file, " delta ");
300 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
301 fprintf (file, "\n");
303 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
305 fprintf (file, "\n");
308 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
309 exist. */
311 static struct mem_ref_group *
312 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
314 struct mem_ref_group *group;
316 for (; *groups; groups = &(*groups)->next)
318 if (operand_equal_p ((*groups)->step, step, 0)
319 && operand_equal_p ((*groups)->base, base, 0))
320 return *groups;
322 /* If step is an integer constant, keep the list of groups sorted
323 by decreasing step. */
324 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
325 && int_cst_value ((*groups)->step) < int_cst_value (step))
326 break;
329 group = XNEW (struct mem_ref_group);
330 group->base = base;
331 group->step = step;
332 group->refs = NULL;
333 group->next = *groups;
334 *groups = group;
336 return group;
339 /* Records a memory reference MEM in GROUP with offset DELTA and write status
340 WRITE_P. The reference occurs in statement STMT. */
342 static void
343 record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
344 HOST_WIDE_INT delta, bool write_p)
346 struct mem_ref **aref;
348 /* Do not record the same address twice. */
349 for (aref = &group->refs; *aref; aref = &(*aref)->next)
351 /* It does not have to be possible for write reference to reuse the read
352 prefetch, or vice versa. */
353 if (!WRITE_CAN_USE_READ_PREFETCH
354 && write_p
355 && !(*aref)->write_p)
356 continue;
357 if (!READ_CAN_USE_WRITE_PREFETCH
358 && !write_p
359 && (*aref)->write_p)
360 continue;
362 if ((*aref)->delta == delta)
363 return;
366 (*aref) = XNEW (struct mem_ref);
367 (*aref)->stmt = stmt;
368 (*aref)->mem = mem;
369 (*aref)->delta = delta;
370 (*aref)->write_p = write_p;
371 (*aref)->prefetch_before = PREFETCH_ALL;
372 (*aref)->prefetch_mod = 1;
373 (*aref)->reuse_distance = 0;
374 (*aref)->issue_prefetch_p = false;
375 (*aref)->group = group;
376 (*aref)->next = NULL;
377 (*aref)->independent_p = false;
378 (*aref)->storent_p = false;
380 if (dump_file && (dump_flags & TDF_DETAILS))
381 dump_mem_ref (dump_file, *aref);
384 /* Release memory references in GROUPS. */
386 static void
387 release_mem_refs (struct mem_ref_group *groups)
389 struct mem_ref_group *next_g;
390 struct mem_ref *ref, *next_r;
392 for (; groups; groups = next_g)
394 next_g = groups->next;
395 for (ref = groups->refs; ref; ref = next_r)
397 next_r = ref->next;
398 free (ref);
400 free (groups);
404 /* A structure used to pass arguments to idx_analyze_ref. */
406 struct ar_data
408 struct loop *loop; /* Loop of the reference. */
409 gimple stmt; /* Statement of the reference. */
410 tree *step; /* Step of the memory reference. */
411 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
414 /* Analyzes a single INDEX of a memory reference to obtain information
415 described at analyze_ref. Callback for for_each_index. */
417 static bool
418 idx_analyze_ref (tree base, tree *index, void *data)
420 struct ar_data *ar_data = (struct ar_data *) data;
421 tree ibase, step, stepsize;
422 HOST_WIDE_INT idelta = 0, imult = 1;
423 affine_iv iv;
425 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
426 *index, &iv, true))
427 return false;
428 ibase = iv.base;
429 step = iv.step;
431 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
432 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
434 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
435 ibase = TREE_OPERAND (ibase, 0);
437 if (cst_and_fits_in_hwi (ibase))
439 idelta += int_cst_value (ibase);
440 ibase = build_int_cst (TREE_TYPE (ibase), 0);
443 if (TREE_CODE (base) == ARRAY_REF)
445 stepsize = array_ref_element_size (base);
446 if (!cst_and_fits_in_hwi (stepsize))
447 return false;
448 imult = int_cst_value (stepsize);
449 step = fold_build2 (MULT_EXPR, sizetype,
450 fold_convert (sizetype, step),
451 fold_convert (sizetype, stepsize));
452 idelta *= imult;
455 if (*ar_data->step == NULL_TREE)
456 *ar_data->step = step;
457 else
458 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
459 fold_convert (sizetype, *ar_data->step),
460 fold_convert (sizetype, step));
461 *ar_data->delta += idelta;
462 *index = ibase;
464 return true;
467 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
468 STEP are integer constants and iter is number of iterations of LOOP. The
469 reference occurs in statement STMT. Strips nonaddressable component
470 references from REF_P. */
472 static bool
473 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
474 tree *step, HOST_WIDE_INT *delta,
475 gimple stmt)
477 struct ar_data ar_data;
478 tree off;
479 HOST_WIDE_INT bit_offset;
480 tree ref = *ref_p;
482 *step = NULL_TREE;
483 *delta = 0;
485 /* First strip off the component references. Ignore bitfields.
486 Also strip off the real and imagine parts of a complex, so that
487 they can have the same base. */
488 if (TREE_CODE (ref) == REALPART_EXPR
489 || TREE_CODE (ref) == IMAGPART_EXPR
490 || (TREE_CODE (ref) == COMPONENT_REF
491 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
493 if (TREE_CODE (ref) == IMAGPART_EXPR)
494 *delta += int_size_in_bytes (TREE_TYPE (ref));
495 ref = TREE_OPERAND (ref, 0);
498 *ref_p = ref;
500 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
502 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
503 bit_offset = TREE_INT_CST_LOW (off);
504 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
506 *delta += bit_offset / BITS_PER_UNIT;
509 *base = unshare_expr (ref);
510 ar_data.loop = loop;
511 ar_data.stmt = stmt;
512 ar_data.step = step;
513 ar_data.delta = delta;
514 return for_each_index (base, idx_analyze_ref, &ar_data);
517 /* Record a memory reference REF to the list REFS. The reference occurs in
518 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
519 reference was recorded, false otherwise. */
521 static bool
522 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
523 tree ref, bool write_p, gimple stmt)
525 tree base, step;
526 HOST_WIDE_INT delta;
527 struct mem_ref_group *agrp;
529 if (get_base_address (ref) == NULL)
530 return false;
532 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
533 return false;
534 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
535 if (step == NULL_TREE)
536 return false;
538 /* Stop if the address of BASE could not be taken. */
539 if (may_be_nonaddressable_p (base))
540 return false;
542 /* Limit non-constant step prefetching only to the innermost loops. */
543 if (!cst_and_fits_in_hwi (step) && loop->inner != NULL)
544 return false;
546 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
547 are integer constants. */
548 agrp = find_or_create_group (refs, base, step);
549 record_ref (agrp, stmt, ref, delta, write_p);
551 return true;
554 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
555 true if there are no other memory references inside the loop. */
557 static struct mem_ref_group *
558 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
560 basic_block *body = get_loop_body_in_dom_order (loop);
561 basic_block bb;
562 unsigned i;
563 gimple_stmt_iterator bsi;
564 gimple stmt;
565 tree lhs, rhs;
566 struct mem_ref_group *refs = NULL;
568 *no_other_refs = true;
569 *ref_count = 0;
571 /* Scan the loop body in order, so that the former references precede the
572 later ones. */
573 for (i = 0; i < loop->num_nodes; i++)
575 bb = body[i];
576 if (bb->loop_father != loop)
577 continue;
579 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
581 stmt = gsi_stmt (bsi);
583 if (gimple_code (stmt) != GIMPLE_ASSIGN)
585 if (gimple_vuse (stmt)
586 || (is_gimple_call (stmt)
587 && !(gimple_call_flags (stmt) & ECF_CONST)))
588 *no_other_refs = false;
589 continue;
592 lhs = gimple_assign_lhs (stmt);
593 rhs = gimple_assign_rhs1 (stmt);
595 if (REFERENCE_CLASS_P (rhs))
597 *no_other_refs &= gather_memory_references_ref (loop, &refs,
598 rhs, false, stmt);
599 *ref_count += 1;
601 if (REFERENCE_CLASS_P (lhs))
603 *no_other_refs &= gather_memory_references_ref (loop, &refs,
604 lhs, true, stmt);
605 *ref_count += 1;
609 free (body);
611 return refs;
614 /* Prune the prefetch candidate REF using the self-reuse. */
616 static void
617 prune_ref_by_self_reuse (struct mem_ref *ref)
619 HOST_WIDE_INT step;
620 bool backward;
622 /* If the step size is non constant, we cannot calculate prefetch_mod. */
623 if (!cst_and_fits_in_hwi (ref->group->step))
624 return;
626 step = int_cst_value (ref->group->step);
628 backward = step < 0;
630 if (step == 0)
632 /* Prefetch references to invariant address just once. */
633 ref->prefetch_before = 1;
634 return;
637 if (backward)
638 step = -step;
640 if (step > PREFETCH_BLOCK)
641 return;
643 if ((backward && HAVE_BACKWARD_PREFETCH)
644 || (!backward && HAVE_FORWARD_PREFETCH))
646 ref->prefetch_before = 1;
647 return;
650 ref->prefetch_mod = PREFETCH_BLOCK / step;
653 /* Divides X by BY, rounding down. */
655 static HOST_WIDE_INT
656 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
658 gcc_assert (by > 0);
660 if (x >= 0)
661 return x / by;
662 else
663 return (x + by - 1) / by;
666 /* Given a CACHE_LINE_SIZE and two inductive memory references
667 with a common STEP greater than CACHE_LINE_SIZE and an address
668 difference DELTA, compute the probability that they will fall
669 in different cache lines. Return true if the computed miss rate
670 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
671 number of distinct iterations after which the pattern repeats itself.
672 ALIGN_UNIT is the unit of alignment in bytes. */
674 static bool
675 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
676 HOST_WIDE_INT step, HOST_WIDE_INT delta,
677 unsigned HOST_WIDE_INT distinct_iters,
678 int align_unit)
680 unsigned align, iter;
681 int total_positions, miss_positions, max_allowed_miss_positions;
682 int address1, address2, cache_line1, cache_line2;
684 /* It always misses if delta is greater than or equal to the cache
685 line size. */
686 if (delta >= (HOST_WIDE_INT) cache_line_size)
687 return false;
689 miss_positions = 0;
690 total_positions = (cache_line_size / align_unit) * distinct_iters;
691 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
693 /* Iterate through all possible alignments of the first
694 memory reference within its cache line. */
695 for (align = 0; align < cache_line_size; align += align_unit)
697 /* Iterate through all distinct iterations. */
698 for (iter = 0; iter < distinct_iters; iter++)
700 address1 = align + step * iter;
701 address2 = address1 + delta;
702 cache_line1 = address1 / cache_line_size;
703 cache_line2 = address2 / cache_line_size;
704 if (cache_line1 != cache_line2)
706 miss_positions += 1;
707 if (miss_positions > max_allowed_miss_positions)
708 return false;
711 return true;
714 /* Prune the prefetch candidate REF using the reuse with BY.
715 If BY_IS_BEFORE is true, BY is before REF in the loop. */
717 static void
718 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
719 bool by_is_before)
721 HOST_WIDE_INT step;
722 bool backward;
723 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
724 HOST_WIDE_INT delta = delta_b - delta_r;
725 HOST_WIDE_INT hit_from;
726 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
727 HOST_WIDE_INT reduced_step;
728 unsigned HOST_WIDE_INT reduced_prefetch_block;
729 tree ref_type;
730 int align_unit;
732 /* If the step is non constant we cannot calculate prefetch_before. */
733 if (!cst_and_fits_in_hwi (ref->group->step)) {
734 return;
737 step = int_cst_value (ref->group->step);
739 backward = step < 0;
742 if (delta == 0)
744 /* If the references has the same address, only prefetch the
745 former. */
746 if (by_is_before)
747 ref->prefetch_before = 0;
749 return;
752 if (!step)
754 /* If the reference addresses are invariant and fall into the
755 same cache line, prefetch just the first one. */
756 if (!by_is_before)
757 return;
759 if (ddown (ref->delta, PREFETCH_BLOCK)
760 != ddown (by->delta, PREFETCH_BLOCK))
761 return;
763 ref->prefetch_before = 0;
764 return;
767 /* Only prune the reference that is behind in the array. */
768 if (backward)
770 if (delta > 0)
771 return;
773 /* Transform the data so that we may assume that the accesses
774 are forward. */
775 delta = - delta;
776 step = -step;
777 delta_r = PREFETCH_BLOCK - 1 - delta_r;
778 delta_b = PREFETCH_BLOCK - 1 - delta_b;
780 else
782 if (delta < 0)
783 return;
786 /* Check whether the two references are likely to hit the same cache
787 line, and how distant the iterations in that it occurs are from
788 each other. */
790 if (step <= PREFETCH_BLOCK)
792 /* The accesses are sure to meet. Let us check when. */
793 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
794 prefetch_before = (hit_from - delta_r + step - 1) / step;
796 /* Do not reduce prefetch_before if we meet beyond cache size. */
797 if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
798 prefetch_before = PREFETCH_ALL;
799 if (prefetch_before < ref->prefetch_before)
800 ref->prefetch_before = prefetch_before;
802 return;
805 /* A more complicated case with step > prefetch_block. First reduce
806 the ratio between the step and the cache line size to its simplest
807 terms. The resulting denominator will then represent the number of
808 distinct iterations after which each address will go back to its
809 initial location within the cache line. This computation assumes
810 that PREFETCH_BLOCK is a power of two. */
811 prefetch_block = PREFETCH_BLOCK;
812 reduced_prefetch_block = prefetch_block;
813 reduced_step = step;
814 while ((reduced_step & 1) == 0
815 && reduced_prefetch_block > 1)
817 reduced_step >>= 1;
818 reduced_prefetch_block >>= 1;
821 prefetch_before = delta / step;
822 delta %= step;
823 ref_type = TREE_TYPE (ref->mem);
824 align_unit = TYPE_ALIGN (ref_type) / 8;
825 if (is_miss_rate_acceptable (prefetch_block, step, delta,
826 reduced_prefetch_block, align_unit))
828 /* Do not reduce prefetch_before if we meet beyond cache size. */
829 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
830 prefetch_before = PREFETCH_ALL;
831 if (prefetch_before < ref->prefetch_before)
832 ref->prefetch_before = prefetch_before;
834 return;
837 /* Try also the following iteration. */
838 prefetch_before++;
839 delta = step - delta;
840 if (is_miss_rate_acceptable (prefetch_block, step, delta,
841 reduced_prefetch_block, align_unit))
843 if (prefetch_before < ref->prefetch_before)
844 ref->prefetch_before = prefetch_before;
846 return;
849 /* The ref probably does not reuse by. */
850 return;
853 /* Prune the prefetch candidate REF using the reuses with other references
854 in REFS. */
856 static void
857 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
859 struct mem_ref *prune_by;
860 bool before = true;
862 prune_ref_by_self_reuse (ref);
864 for (prune_by = refs; prune_by; prune_by = prune_by->next)
866 if (prune_by == ref)
868 before = false;
869 continue;
872 if (!WRITE_CAN_USE_READ_PREFETCH
873 && ref->write_p
874 && !prune_by->write_p)
875 continue;
876 if (!READ_CAN_USE_WRITE_PREFETCH
877 && !ref->write_p
878 && prune_by->write_p)
879 continue;
881 prune_ref_by_group_reuse (ref, prune_by, before);
885 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
887 static void
888 prune_group_by_reuse (struct mem_ref_group *group)
890 struct mem_ref *ref_pruned;
892 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
894 prune_ref_by_reuse (ref_pruned, group->refs);
896 if (dump_file && (dump_flags & TDF_DETAILS))
898 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
900 if (ref_pruned->prefetch_before == PREFETCH_ALL
901 && ref_pruned->prefetch_mod == 1)
902 fprintf (dump_file, " no restrictions");
903 else if (ref_pruned->prefetch_before == 0)
904 fprintf (dump_file, " do not prefetch");
905 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
906 fprintf (dump_file, " prefetch once");
907 else
909 if (ref_pruned->prefetch_before != PREFETCH_ALL)
911 fprintf (dump_file, " prefetch before ");
912 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
913 ref_pruned->prefetch_before);
915 if (ref_pruned->prefetch_mod != 1)
917 fprintf (dump_file, " prefetch mod ");
918 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
919 ref_pruned->prefetch_mod);
922 fprintf (dump_file, "\n");
927 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
929 static void
930 prune_by_reuse (struct mem_ref_group *groups)
932 for (; groups; groups = groups->next)
933 prune_group_by_reuse (groups);
936 /* Returns true if we should issue prefetch for REF. */
938 static bool
939 should_issue_prefetch_p (struct mem_ref *ref)
941 /* For now do not issue prefetches for only first few of the
942 iterations. */
943 if (ref->prefetch_before != PREFETCH_ALL)
945 if (dump_file && (dump_flags & TDF_DETAILS))
946 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
947 (void *) ref);
948 return false;
951 /* Do not prefetch nontemporal stores. */
952 if (ref->storent_p)
954 if (dump_file && (dump_flags & TDF_DETAILS))
955 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
956 return false;
959 return true;
962 /* Decide which of the prefetch candidates in GROUPS to prefetch.
963 AHEAD is the number of iterations to prefetch ahead (which corresponds
964 to the number of simultaneous instances of one prefetch running at a
965 time). UNROLL_FACTOR is the factor by that the loop is going to be
966 unrolled. Returns true if there is anything to prefetch. */
968 static bool
969 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
970 unsigned ahead)
972 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
973 unsigned slots_per_prefetch;
974 struct mem_ref *ref;
975 bool any = false;
977 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
978 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
980 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
981 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
982 it will need a prefetch slot. */
983 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
984 if (dump_file && (dump_flags & TDF_DETAILS))
985 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
986 slots_per_prefetch);
988 /* For now we just take memory references one by one and issue
989 prefetches for as many as possible. The groups are sorted
990 starting with the largest step, since the references with
991 large step are more likely to cause many cache misses. */
993 for (; groups; groups = groups->next)
994 for (ref = groups->refs; ref; ref = ref->next)
996 if (!should_issue_prefetch_p (ref))
997 continue;
999 /* The loop is far from being sufficiently unrolled for this
1000 prefetch. Do not generate prefetch to avoid many redudant
1001 prefetches. */
1002 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1003 continue;
1005 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1006 and we unroll the loop UNROLL_FACTOR times, we need to insert
1007 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1008 iteration. */
1009 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1010 / ref->prefetch_mod);
1011 prefetch_slots = n_prefetches * slots_per_prefetch;
1013 /* If more than half of the prefetches would be lost anyway, do not
1014 issue the prefetch. */
1015 if (2 * remaining_prefetch_slots < prefetch_slots)
1016 continue;
1018 ref->issue_prefetch_p = true;
1020 if (remaining_prefetch_slots <= prefetch_slots)
1021 return true;
1022 remaining_prefetch_slots -= prefetch_slots;
1023 any = true;
1026 return any;
1029 /* Return TRUE if no prefetch is going to be generated in the given
1030 GROUPS. */
1032 static bool
1033 nothing_to_prefetch_p (struct mem_ref_group *groups)
1035 struct mem_ref *ref;
1037 for (; groups; groups = groups->next)
1038 for (ref = groups->refs; ref; ref = ref->next)
1039 if (should_issue_prefetch_p (ref))
1040 return false;
1042 return true;
1045 /* Estimate the number of prefetches in the given GROUPS.
1046 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
1048 static int
1049 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
1051 struct mem_ref *ref;
1052 unsigned n_prefetches;
1053 int prefetch_count = 0;
1055 for (; groups; groups = groups->next)
1056 for (ref = groups->refs; ref; ref = ref->next)
1057 if (should_issue_prefetch_p (ref))
1059 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1060 / ref->prefetch_mod);
1061 prefetch_count += n_prefetches;
1064 return prefetch_count;
1067 /* Issue prefetches for the reference REF into loop as decided before.
1068 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1069 is the factor by which LOOP was unrolled. */
1071 static void
1072 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1074 HOST_WIDE_INT delta;
1075 tree addr, addr_base, write_p, local, forward;
1076 gimple prefetch;
1077 gimple_stmt_iterator bsi;
1078 unsigned n_prefetches, ap;
1079 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1081 if (dump_file && (dump_flags & TDF_DETAILS))
1082 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1083 nontemporal ? " nontemporal" : "",
1084 (void *) ref);
1086 bsi = gsi_for_stmt (ref->stmt);
1088 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1089 / ref->prefetch_mod);
1090 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1091 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1092 true, NULL, true, GSI_SAME_STMT);
1093 write_p = ref->write_p ? integer_one_node : integer_zero_node;
1094 local = nontemporal ? integer_zero_node : integer_three_node;
1096 for (ap = 0; ap < n_prefetches; ap++)
1098 if (cst_and_fits_in_hwi (ref->group->step))
1100 /* Determine the address to prefetch. */
1101 delta = (ahead + ap * ref->prefetch_mod) *
1102 int_cst_value (ref->group->step);
1103 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
1104 addr_base, size_int (delta));
1105 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1106 true, GSI_SAME_STMT);
1108 else
1110 /* The step size is non-constant but loop-invariant. We use the
1111 heuristic to simply prefetch ahead iterations ahead. */
1112 forward = fold_build2 (MULT_EXPR, sizetype,
1113 fold_convert (sizetype, ref->group->step),
1114 fold_convert (sizetype, size_int (ahead)));
1115 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
1116 forward);
1117 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1118 NULL, true, GSI_SAME_STMT);
1120 /* Create the prefetch instruction. */
1121 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
1122 3, addr, write_p, local);
1123 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1127 /* Issue prefetches for the references in GROUPS into loop as decided before.
1128 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1129 factor by that LOOP was unrolled. */
1131 static void
1132 issue_prefetches (struct mem_ref_group *groups,
1133 unsigned unroll_factor, unsigned ahead)
1135 struct mem_ref *ref;
1137 for (; groups; groups = groups->next)
1138 for (ref = groups->refs; ref; ref = ref->next)
1139 if (ref->issue_prefetch_p)
1140 issue_prefetch_ref (ref, unroll_factor, ahead);
1143 /* Returns true if REF is a memory write for that a nontemporal store insn
1144 can be used. */
1146 static bool
1147 nontemporal_store_p (struct mem_ref *ref)
1149 enum machine_mode mode;
1150 enum insn_code code;
1152 /* REF must be a write that is not reused. We require it to be independent
1153 on all other memory references in the loop, as the nontemporal stores may
1154 be reordered with respect to other memory references. */
1155 if (!ref->write_p
1156 || !ref->independent_p
1157 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1158 return false;
1160 /* Check that we have the storent instruction for the mode. */
1161 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1162 if (mode == BLKmode)
1163 return false;
1165 code = optab_handler (storent_optab, mode);
1166 return code != CODE_FOR_nothing;
1169 /* If REF is a nontemporal store, we mark the corresponding modify statement
1170 and return true. Otherwise, we return false. */
1172 static bool
1173 mark_nontemporal_store (struct mem_ref *ref)
1175 if (!nontemporal_store_p (ref))
1176 return false;
1178 if (dump_file && (dump_flags & TDF_DETAILS))
1179 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1180 (void *) ref);
1182 gimple_assign_set_nontemporal_move (ref->stmt, true);
1183 ref->storent_p = true;
1185 return true;
1188 /* Issue a memory fence instruction after LOOP. */
1190 static void
1191 emit_mfence_after_loop (struct loop *loop)
1193 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1194 edge exit;
1195 gimple call;
1196 gimple_stmt_iterator bsi;
1197 unsigned i;
1199 FOR_EACH_VEC_ELT (edge, exits, i, exit)
1201 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1203 if (!single_pred_p (exit->dest)
1204 /* If possible, we prefer not to insert the fence on other paths
1205 in cfg. */
1206 && !(exit->flags & EDGE_ABNORMAL))
1207 split_loop_exit_edge (exit);
1208 bsi = gsi_after_labels (exit->dest);
1210 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1211 mark_virtual_ops_for_renaming (call);
1214 VEC_free (edge, heap, exits);
1215 update_ssa (TODO_update_ssa_only_virtuals);
1218 /* Returns true if we can use storent in loop, false otherwise. */
1220 static bool
1221 may_use_storent_in_loop_p (struct loop *loop)
1223 bool ret = true;
1225 if (loop->inner != NULL)
1226 return false;
1228 /* If we must issue a mfence insn after using storent, check that there
1229 is a suitable place for it at each of the loop exits. */
1230 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1232 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1233 unsigned i;
1234 edge exit;
1236 FOR_EACH_VEC_ELT (edge, exits, i, exit)
1237 if ((exit->flags & EDGE_ABNORMAL)
1238 && exit->dest == EXIT_BLOCK_PTR)
1239 ret = false;
1241 VEC_free (edge, heap, exits);
1244 return ret;
1247 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1248 references in the loop. */
1250 static void
1251 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1253 struct mem_ref *ref;
1254 bool any = false;
1256 if (!may_use_storent_in_loop_p (loop))
1257 return;
1259 for (; groups; groups = groups->next)
1260 for (ref = groups->refs; ref; ref = ref->next)
1261 any |= mark_nontemporal_store (ref);
1263 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1264 emit_mfence_after_loop (loop);
1267 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1268 this is the case, fill in DESC by the description of number of
1269 iterations. */
1271 static bool
1272 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1273 unsigned factor)
1275 if (!can_unroll_loop_p (loop, factor, desc))
1276 return false;
1278 /* We only consider loops without control flow for unrolling. This is not
1279 a hard restriction -- tree_unroll_loop works with arbitrary loops
1280 as well; but the unrolling/prefetching is usually more profitable for
1281 loops consisting of a single basic block, and we want to limit the
1282 code growth. */
1283 if (loop->num_nodes > 2)
1284 return false;
1286 return true;
1289 /* Determine the coefficient by that unroll LOOP, from the information
1290 contained in the list of memory references REFS. Description of
1291 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1292 insns of the LOOP. EST_NITER is the estimated number of iterations of
1293 the loop, or -1 if no estimate is available. */
1295 static unsigned
1296 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1297 unsigned ninsns, struct tree_niter_desc *desc,
1298 HOST_WIDE_INT est_niter)
1300 unsigned upper_bound;
1301 unsigned nfactor, factor, mod_constraint;
1302 struct mem_ref_group *agp;
1303 struct mem_ref *ref;
1305 /* First check whether the loop is not too large to unroll. We ignore
1306 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1307 from unrolling them enough to make exactly one cache line covered by each
1308 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1309 us from unrolling the loops too many times in cases where we only expect
1310 gains from better scheduling and decreasing loop overhead, which is not
1311 the case here. */
1312 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1314 /* If we unrolled the loop more times than it iterates, the unrolled version
1315 of the loop would be never entered. */
1316 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1317 upper_bound = est_niter;
1319 if (upper_bound <= 1)
1320 return 1;
1322 /* Choose the factor so that we may prefetch each cache just once,
1323 but bound the unrolling by UPPER_BOUND. */
1324 factor = 1;
1325 for (agp = refs; agp; agp = agp->next)
1326 for (ref = agp->refs; ref; ref = ref->next)
1327 if (should_issue_prefetch_p (ref))
1329 mod_constraint = ref->prefetch_mod;
1330 nfactor = least_common_multiple (mod_constraint, factor);
1331 if (nfactor <= upper_bound)
1332 factor = nfactor;
1335 if (!should_unroll_loop_p (loop, desc, factor))
1336 return 1;
1338 return factor;
1341 /* Returns the total volume of the memory references REFS, taking into account
1342 reuses in the innermost loop and cache line size. TODO -- we should also
1343 take into account reuses across the iterations of the loops in the loop
1344 nest. */
1346 static unsigned
1347 volume_of_references (struct mem_ref_group *refs)
1349 unsigned volume = 0;
1350 struct mem_ref_group *gr;
1351 struct mem_ref *ref;
1353 for (gr = refs; gr; gr = gr->next)
1354 for (ref = gr->refs; ref; ref = ref->next)
1356 /* Almost always reuses another value? */
1357 if (ref->prefetch_before != PREFETCH_ALL)
1358 continue;
1360 /* If several iterations access the same cache line, use the size of
1361 the line divided by this number. Otherwise, a cache line is
1362 accessed in each iteration. TODO -- in the latter case, we should
1363 take the size of the reference into account, rounding it up on cache
1364 line size multiple. */
1365 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1367 return volume;
1370 /* Returns the volume of memory references accessed across VEC iterations of
1371 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1372 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1374 static unsigned
1375 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1377 unsigned i;
1379 for (i = 0; i < n; i++)
1380 if (vec[i] != 0)
1381 break;
1383 if (i == n)
1384 return 0;
1386 gcc_assert (vec[i] > 0);
1388 /* We ignore the parts of the distance vector in subloops, since usually
1389 the numbers of iterations are much smaller. */
1390 return loop_sizes[i] * vec[i];
1393 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1394 at the position corresponding to the loop of the step. N is the depth
1395 of the considered loop nest, and, LOOP is its innermost loop. */
1397 static void
1398 add_subscript_strides (tree access_fn, unsigned stride,
1399 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1401 struct loop *aloop;
1402 tree step;
1403 HOST_WIDE_INT astep;
1404 unsigned min_depth = loop_depth (loop) - n;
1406 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1408 aloop = get_chrec_loop (access_fn);
1409 step = CHREC_RIGHT (access_fn);
1410 access_fn = CHREC_LEFT (access_fn);
1412 if ((unsigned) loop_depth (aloop) <= min_depth)
1413 continue;
1415 if (host_integerp (step, 0))
1416 astep = tree_low_cst (step, 0);
1417 else
1418 astep = L1_CACHE_LINE_SIZE;
1420 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1425 /* Returns the volume of memory references accessed between two consecutive
1426 self-reuses of the reference DR. We consider the subscripts of DR in N
1427 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1428 loops. LOOP is the innermost loop of the current loop nest. */
1430 static unsigned
1431 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1432 struct loop *loop)
1434 tree stride, access_fn;
1435 HOST_WIDE_INT *strides, astride;
1436 VEC (tree, heap) *access_fns;
1437 tree ref = DR_REF (dr);
1438 unsigned i, ret = ~0u;
1440 /* In the following example:
1442 for (i = 0; i < N; i++)
1443 for (j = 0; j < N; j++)
1444 use (a[j][i]);
1445 the same cache line is accessed each N steps (except if the change from
1446 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1447 we cannot rely purely on the results of the data dependence analysis.
1449 Instead, we compute the stride of the reference in each loop, and consider
1450 the innermost loop in that the stride is less than cache size. */
1452 strides = XCNEWVEC (HOST_WIDE_INT, n);
1453 access_fns = DR_ACCESS_FNS (dr);
1455 FOR_EACH_VEC_ELT (tree, access_fns, i, access_fn)
1457 /* Keep track of the reference corresponding to the subscript, so that we
1458 know its stride. */
1459 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1460 ref = TREE_OPERAND (ref, 0);
1462 if (TREE_CODE (ref) == ARRAY_REF)
1464 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1465 if (host_integerp (stride, 1))
1466 astride = tree_low_cst (stride, 1);
1467 else
1468 astride = L1_CACHE_LINE_SIZE;
1470 ref = TREE_OPERAND (ref, 0);
1472 else
1473 astride = 1;
1475 add_subscript_strides (access_fn, astride, strides, n, loop);
1478 for (i = n; i-- > 0; )
1480 unsigned HOST_WIDE_INT s;
1482 s = strides[i] < 0 ? -strides[i] : strides[i];
1484 if (s < (unsigned) L1_CACHE_LINE_SIZE
1485 && (loop_sizes[i]
1486 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1488 ret = loop_sizes[i];
1489 break;
1493 free (strides);
1494 return ret;
1497 /* Determines the distance till the first reuse of each reference in REFS
1498 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1499 memory references in the loop. */
1501 static void
1502 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1503 bool no_other_refs)
1505 struct loop *nest, *aloop;
1506 VEC (data_reference_p, heap) *datarefs = NULL;
1507 VEC (ddr_p, heap) *dependences = NULL;
1508 struct mem_ref_group *gr;
1509 struct mem_ref *ref, *refb;
1510 VEC (loop_p, heap) *vloops = NULL;
1511 unsigned *loop_data_size;
1512 unsigned i, j, n;
1513 unsigned volume, dist, adist;
1514 HOST_WIDE_INT vol;
1515 data_reference_p dr;
1516 ddr_p dep;
1518 if (loop->inner)
1519 return;
1521 /* Find the outermost loop of the loop nest of loop (we require that
1522 there are no sibling loops inside the nest). */
1523 nest = loop;
1524 while (1)
1526 aloop = loop_outer (nest);
1528 if (aloop == current_loops->tree_root
1529 || aloop->inner->next)
1530 break;
1532 nest = aloop;
1535 /* For each loop, determine the amount of data accessed in each iteration.
1536 We use this to estimate whether the reference is evicted from the
1537 cache before its reuse. */
1538 find_loop_nest (nest, &vloops);
1539 n = VEC_length (loop_p, vloops);
1540 loop_data_size = XNEWVEC (unsigned, n);
1541 volume = volume_of_references (refs);
1542 i = n;
1543 while (i-- != 0)
1545 loop_data_size[i] = volume;
1546 /* Bound the volume by the L2 cache size, since above this bound,
1547 all dependence distances are equivalent. */
1548 if (volume > L2_CACHE_SIZE_BYTES)
1549 continue;
1551 aloop = VEC_index (loop_p, vloops, i);
1552 vol = estimated_loop_iterations_int (aloop, false);
1553 if (vol < 0)
1554 vol = expected_loop_iterations (aloop);
1555 volume *= vol;
1558 /* Prepare the references in the form suitable for data dependence
1559 analysis. We ignore unanalyzable data references (the results
1560 are used just as a heuristics to estimate temporality of the
1561 references, hence we do not need to worry about correctness). */
1562 for (gr = refs; gr; gr = gr->next)
1563 for (ref = gr->refs; ref; ref = ref->next)
1565 dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
1566 ref->mem, ref->stmt, !ref->write_p);
1568 if (dr)
1570 ref->reuse_distance = volume;
1571 dr->aux = ref;
1572 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1574 else
1575 no_other_refs = false;
1578 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1580 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1581 ref = (struct mem_ref *) dr->aux;
1582 if (ref->reuse_distance > dist)
1583 ref->reuse_distance = dist;
1585 if (no_other_refs)
1586 ref->independent_p = true;
1589 compute_all_dependences (datarefs, &dependences, vloops, true);
1591 FOR_EACH_VEC_ELT (ddr_p, dependences, i, dep)
1593 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1594 continue;
1596 ref = (struct mem_ref *) DDR_A (dep)->aux;
1597 refb = (struct mem_ref *) DDR_B (dep)->aux;
1599 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1600 || DDR_NUM_DIST_VECTS (dep) == 0)
1602 /* If the dependence cannot be analyzed, assume that there might be
1603 a reuse. */
1604 dist = 0;
1606 ref->independent_p = false;
1607 refb->independent_p = false;
1609 else
1611 /* The distance vectors are normalized to be always lexicographically
1612 positive, hence we cannot tell just from them whether DDR_A comes
1613 before DDR_B or vice versa. However, it is not important,
1614 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1615 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1616 in cache (and marking it as nontemporal would not affect
1617 anything). */
1619 dist = volume;
1620 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1622 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1623 loop_data_size, n);
1625 /* If this is a dependence in the innermost loop (i.e., the
1626 distances in all superloops are zero) and it is not
1627 the trivial self-dependence with distance zero, record that
1628 the references are not completely independent. */
1629 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1630 && (ref != refb
1631 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1633 ref->independent_p = false;
1634 refb->independent_p = false;
1637 /* Ignore accesses closer than
1638 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1639 so that we use nontemporal prefetches e.g. if single memory
1640 location is accessed several times in a single iteration of
1641 the loop. */
1642 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1643 continue;
1645 if (adist < dist)
1646 dist = adist;
1650 if (ref->reuse_distance > dist)
1651 ref->reuse_distance = dist;
1652 if (refb->reuse_distance > dist)
1653 refb->reuse_distance = dist;
1656 free_dependence_relations (dependences);
1657 free_data_refs (datarefs);
1658 free (loop_data_size);
1660 if (dump_file && (dump_flags & TDF_DETAILS))
1662 fprintf (dump_file, "Reuse distances:\n");
1663 for (gr = refs; gr; gr = gr->next)
1664 for (ref = gr->refs; ref; ref = ref->next)
1665 fprintf (dump_file, " ref %p distance %u\n",
1666 (void *) ref, ref->reuse_distance);
1670 /* Determine whether or not the trip count to ahead ratio is too small based
1671 on prefitablility consideration.
1672 AHEAD: the iteration ahead distance,
1673 EST_NITER: the estimated trip count. */
1675 static bool
1676 trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1678 /* Assume trip count to ahead ratio is big enough if the trip count could not
1679 be estimated at compile time. */
1680 if (est_niter < 0)
1681 return false;
1683 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1685 if (dump_file && (dump_flags & TDF_DETAILS))
1686 fprintf (dump_file,
1687 "Not prefetching -- loop estimated to roll only %d times\n",
1688 (int) est_niter);
1689 return true;
1692 return false;
1695 /* Determine whether or not the number of memory references in the loop is
1696 reasonable based on the profitablity and compilation time considerations.
1697 NINSNS: estimated number of instructions in the loop,
1698 MEM_REF_COUNT: total number of memory references in the loop. */
1700 static bool
1701 mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
1703 int insn_to_mem_ratio;
1705 if (mem_ref_count == 0)
1706 return false;
1708 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1709 (compute_all_dependences) have high costs based on quadratic complexity.
1710 To avoid huge compilation time, we give up prefetching if mem_ref_count
1711 is too large. */
1712 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1713 return false;
1715 /* Prefetching improves performance by overlapping cache missing
1716 memory accesses with CPU operations. If the loop does not have
1717 enough CPU operations to overlap with memory operations, prefetching
1718 won't give a significant benefit. One approximate way of checking
1719 this is to require the ratio of instructions to memory references to
1720 be above a certain limit. This approximation works well in practice.
1721 TODO: Implement a more precise computation by estimating the time
1722 for each CPU or memory op in the loop. Time estimates for memory ops
1723 should account for cache misses. */
1724 insn_to_mem_ratio = ninsns / mem_ref_count;
1726 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1728 if (dump_file && (dump_flags & TDF_DETAILS))
1729 fprintf (dump_file,
1730 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1731 insn_to_mem_ratio);
1732 return false;
1735 return true;
1738 /* Determine whether or not the instruction to prefetch ratio in the loop is
1739 too small based on the profitablity consideration.
1740 NINSNS: estimated number of instructions in the loop,
1741 PREFETCH_COUNT: an estimate of the number of prefetches,
1742 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1744 static bool
1745 insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1746 unsigned unroll_factor)
1748 int insn_to_prefetch_ratio;
1750 /* Prefetching most likely causes performance degradation when the instruction
1751 to prefetch ratio is too small. Too many prefetch instructions in a loop
1752 may reduce the I-cache performance.
1753 (unroll_factor * ninsns) is used to estimate the number of instructions in
1754 the unrolled loop. This implementation is a bit simplistic -- the number
1755 of issued prefetch instructions is also affected by unrolling. So,
1756 prefetch_mod and the unroll factor should be taken into account when
1757 determining prefetch_count. Also, the number of insns of the unrolled
1758 loop will usually be significantly smaller than the number of insns of the
1759 original loop * unroll_factor (at least the induction variable increases
1760 and the exit branches will get eliminated), so it might be better to use
1761 tree_estimate_loop_size + estimated_unrolled_size. */
1762 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1763 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1765 if (dump_file && (dump_flags & TDF_DETAILS))
1766 fprintf (dump_file,
1767 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1768 insn_to_prefetch_ratio);
1769 return true;
1772 return false;
1776 /* Issue prefetch instructions for array references in LOOP. Returns
1777 true if the LOOP was unrolled. */
1779 static bool
1780 loop_prefetch_arrays (struct loop *loop)
1782 struct mem_ref_group *refs;
1783 unsigned ahead, ninsns, time, unroll_factor;
1784 HOST_WIDE_INT est_niter;
1785 struct tree_niter_desc desc;
1786 bool unrolled = false, no_other_refs;
1787 unsigned prefetch_count;
1788 unsigned mem_ref_count;
1790 if (optimize_loop_nest_for_size_p (loop))
1792 if (dump_file && (dump_flags & TDF_DETAILS))
1793 fprintf (dump_file, " ignored (cold area)\n");
1794 return false;
1797 /* FIXME: the time should be weighted by the probabilities of the blocks in
1798 the loop body. */
1799 time = tree_num_loop_insns (loop, &eni_time_weights);
1800 if (time == 0)
1801 return false;
1803 ahead = (PREFETCH_LATENCY + time - 1) / time;
1804 est_niter = estimated_loop_iterations_int (loop, false);
1806 /* Prefetching is not likely to be profitable if the trip count to ahead
1807 ratio is too small. */
1808 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1809 return false;
1811 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1813 /* Step 1: gather the memory references. */
1814 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1816 /* Give up prefetching if the number of memory references in the
1817 loop is not reasonable based on profitablity and compilation time
1818 considerations. */
1819 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1820 goto fail;
1822 /* Step 2: estimate the reuse effects. */
1823 prune_by_reuse (refs);
1825 if (nothing_to_prefetch_p (refs))
1826 goto fail;
1828 determine_loop_nest_reuse (loop, refs, no_other_refs);
1830 /* Step 3: determine unroll factor. */
1831 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1832 est_niter);
1834 /* Estimate prefetch count for the unrolled loop. */
1835 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1836 if (prefetch_count == 0)
1837 goto fail;
1839 if (dump_file && (dump_flags & TDF_DETAILS))
1840 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1841 HOST_WIDE_INT_PRINT_DEC "\n"
1842 "insn count %d, mem ref count %d, prefetch count %d\n",
1843 ahead, unroll_factor, est_niter,
1844 ninsns, mem_ref_count, prefetch_count);
1846 /* Prefetching is not likely to be profitable if the instruction to prefetch
1847 ratio is too small. */
1848 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1849 unroll_factor))
1850 goto fail;
1852 mark_nontemporal_stores (loop, refs);
1854 /* Step 4: what to prefetch? */
1855 if (!schedule_prefetches (refs, unroll_factor, ahead))
1856 goto fail;
1858 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1859 iterations so that we do not issue superfluous prefetches. */
1860 if (unroll_factor != 1)
1862 tree_unroll_loop (loop, unroll_factor,
1863 single_dom_exit (loop), &desc);
1864 unrolled = true;
1867 /* Step 6: issue the prefetches. */
1868 issue_prefetches (refs, unroll_factor, ahead);
1870 fail:
1871 release_mem_refs (refs);
1872 return unrolled;
1875 /* Issue prefetch instructions for array references in loops. */
1877 unsigned int
1878 tree_ssa_prefetch_arrays (void)
1880 loop_iterator li;
1881 struct loop *loop;
1882 bool unrolled = false;
1883 int todo_flags = 0;
1885 if (!HAVE_prefetch
1886 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1887 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1888 of processor costs and i486 does not have prefetch, but
1889 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1890 || PREFETCH_BLOCK == 0)
1891 return 0;
1893 if (dump_file && (dump_flags & TDF_DETAILS))
1895 fprintf (dump_file, "Prefetching parameters:\n");
1896 fprintf (dump_file, " simultaneous prefetches: %d\n",
1897 SIMULTANEOUS_PREFETCHES);
1898 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1899 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1900 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1901 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1902 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1903 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1904 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
1905 MIN_INSN_TO_PREFETCH_RATIO);
1906 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
1907 PREFETCH_MIN_INSN_TO_MEM_RATIO);
1908 fprintf (dump_file, "\n");
1911 initialize_original_copy_tables ();
1913 if (!built_in_decls[BUILT_IN_PREFETCH])
1915 tree type = build_function_type_list (void_type_node,
1916 const_ptr_type_node, NULL_TREE);
1917 tree decl = add_builtin_function ("__builtin_prefetch", type,
1918 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1919 NULL, NULL_TREE);
1920 DECL_IS_NOVOPS (decl) = true;
1921 built_in_decls[BUILT_IN_PREFETCH] = decl;
1924 /* We assume that size of cache line is a power of two, so verify this
1925 here. */
1926 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1928 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1930 if (dump_file && (dump_flags & TDF_DETAILS))
1931 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1933 unrolled |= loop_prefetch_arrays (loop);
1935 if (dump_file && (dump_flags & TDF_DETAILS))
1936 fprintf (dump_file, "\n\n");
1939 if (unrolled)
1941 scev_reset ();
1942 todo_flags |= TODO_cleanup_cfg;
1945 free_original_copy_tables ();
1946 return todo_flags;