Daily bump.
[official-gcc.git] / gcc / tree-ssa-loop-prefetch.c
blobc054c60203cc08643964bbf9ffd025c3f41d3c5f
1 /* Array prefetching.
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "tree-pass.h"
30 #include "gimple-ssa.h"
31 #include "optabs-query.h"
32 #include "tree-pretty-print.h"
33 #include "fold-const.h"
34 #include "stor-layout.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "gimplify-me.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-manip.h"
40 #include "tree-ssa-loop-niter.h"
41 #include "tree-ssa-loop.h"
42 #include "tree-into-ssa.h"
43 #include "cfgloop.h"
44 #include "tree-scalar-evolution.h"
45 #include "params.h"
46 #include "langhooks.h"
47 #include "tree-inline.h"
48 #include "tree-data-ref.h"
51 /* FIXME: Needed for optabs, but this should all be moved to a TBD interface
52 between the GIMPLE and RTL worlds. */
54 /* This pass inserts prefetch instructions to optimize cache usage during
55 accesses to arrays in loops. It processes loops sequentially and:
57 1) Gathers all memory references in the single loop.
58 2) For each of the references it decides when it is profitable to prefetch
59 it. To do it, we evaluate the reuse among the accesses, and determines
60 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
61 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
62 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
63 iterations of the loop that are zero modulo PREFETCH_MOD). For example
64 (assuming cache line size is 64 bytes, char has size 1 byte and there
65 is no hardware sequential prefetch):
67 char *a;
68 for (i = 0; i < max; i++)
70 a[255] = ...; (0)
71 a[i] = ...; (1)
72 a[i + 64] = ...; (2)
73 a[16*i] = ...; (3)
74 a[187*i] = ...; (4)
75 a[187*i + 50] = ...; (5)
78 (0) obviously has PREFETCH_BEFORE 1
79 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
80 location 64 iterations before it, and PREFETCH_MOD 64 (since
81 it hits the same cache line otherwise).
82 (2) has PREFETCH_MOD 64
83 (3) has PREFETCH_MOD 4
84 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
85 the cache line accessed by (5) is the same with probability only
86 7/32.
87 (5) has PREFETCH_MOD 1 as well.
89 Additionally, we use data dependence analysis to determine for each
90 reference the distance till the first reuse; this information is used
91 to determine the temporality of the issued prefetch instruction.
93 3) We determine how much ahead we need to prefetch. The number of
94 iterations needed is time to fetch / time spent in one iteration of
95 the loop. The problem is that we do not know either of these values,
96 so we just make a heuristic guess based on a magic (possibly)
97 target-specific constant and size of the loop.
99 4) Determine which of the references we prefetch. We take into account
100 that there is a maximum number of simultaneous prefetches (provided
101 by machine description). We prefetch as many prefetches as possible
102 while still within this bound (starting with those with lowest
103 prefetch_mod, since they are responsible for most of the cache
104 misses).
106 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
107 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
108 prefetching nonaccessed memory.
109 TODO -- actually implement peeling.
111 6) We actually emit the prefetch instructions. ??? Perhaps emit the
112 prefetch instructions with guards in cases where 5) was not sufficient
113 to satisfy the constraints?
115 A cost model is implemented to determine whether or not prefetching is
116 profitable for a given loop. The cost model has three heuristics:
118 1. Function trip_count_to_ahead_ratio_too_small_p implements a
119 heuristic that determines whether or not the loop has too few
120 iterations (compared to ahead). Prefetching is not likely to be
121 beneficial if the trip count to ahead ratio is below a certain
122 minimum.
124 2. Function mem_ref_count_reasonable_p implements a heuristic that
125 determines whether the given loop has enough CPU ops that can be
126 overlapped with cache missing memory ops. If not, the loop
127 won't benefit from prefetching. In the implementation,
128 prefetching is not considered beneficial if the ratio between
129 the instruction count and the mem ref count is below a certain
130 minimum.
132 3. Function insn_to_prefetch_ratio_too_small_p implements a
133 heuristic that disables prefetching in a loop if the prefetching
134 cost is above a certain limit. The relative prefetching cost is
135 estimated by taking the ratio between the prefetch count and the
136 total intruction count (this models the I-cache cost).
138 The limits used in these heuristics are defined as parameters with
139 reasonable default values. Machine-specific default values will be
140 added later.
142 Some other TODO:
143 -- write and use more general reuse analysis (that could be also used
144 in other cache aimed loop optimizations)
145 -- make it behave sanely together with the prefetches given by user
146 (now we just ignore them; at the very least we should avoid
147 optimizing loops in that user put his own prefetches)
148 -- we assume cache line size alignment of arrays; this could be
149 improved. */
151 /* Magic constants follow. These should be replaced by machine specific
152 numbers. */
154 /* True if write can be prefetched by a read prefetch. */
156 #ifndef WRITE_CAN_USE_READ_PREFETCH
157 #define WRITE_CAN_USE_READ_PREFETCH 1
158 #endif
160 /* True if read can be prefetched by a write prefetch. */
162 #ifndef READ_CAN_USE_WRITE_PREFETCH
163 #define READ_CAN_USE_WRITE_PREFETCH 0
164 #endif
166 /* The size of the block loaded by a single prefetch. Usually, this is
167 the same as cache line size (at the moment, we only consider one level
168 of cache hierarchy). */
170 #ifndef PREFETCH_BLOCK
171 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
172 #endif
174 /* Do we have a forward hardware sequential prefetching? */
176 #ifndef HAVE_FORWARD_PREFETCH
177 #define HAVE_FORWARD_PREFETCH 0
178 #endif
180 /* Do we have a backward hardware sequential prefetching? */
182 #ifndef HAVE_BACKWARD_PREFETCH
183 #define HAVE_BACKWARD_PREFETCH 0
184 #endif
186 /* In some cases we are only able to determine that there is a certain
187 probability that the two accesses hit the same cache line. In this
188 case, we issue the prefetches for both of them if this probability
189 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
191 #ifndef ACCEPTABLE_MISS_RATE
192 #define ACCEPTABLE_MISS_RATE 50
193 #endif
195 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
196 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
198 /* We consider a memory access nontemporal if it is not reused sooner than
199 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
200 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
201 so that we use nontemporal prefetches e.g. if single memory location
202 is accessed several times in a single iteration of the loop. */
203 #define NONTEMPORAL_FRACTION 16
205 /* In case we have to emit a memory fence instruction after the loop that
206 uses nontemporal stores, this defines the builtin to use. */
208 #ifndef FENCE_FOLLOWING_MOVNT
209 #define FENCE_FOLLOWING_MOVNT NULL_TREE
210 #endif
212 /* It is not profitable to prefetch when the trip count is not at
213 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
214 For example, in a loop with a prefetch ahead distance of 10,
215 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
216 profitable to prefetch when the trip count is greater or equal to
217 40. In that case, 30 out of the 40 iterations will benefit from
218 prefetching. */
220 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
221 #define TRIP_COUNT_TO_AHEAD_RATIO 4
222 #endif
224 /* The group of references between that reuse may occur. */
226 struct mem_ref_group
228 tree base; /* Base of the reference. */
229 tree step; /* Step of the reference. */
230 struct mem_ref *refs; /* References in the group. */
231 struct mem_ref_group *next; /* Next group of references. */
234 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
236 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
238 /* Do not generate a prefetch if the unroll factor is significantly less
239 than what is required by the prefetch. This is to avoid redundant
240 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
241 2, prefetching requires unrolling the loop 16 times, but
242 the loop is actually unrolled twice. In this case (ratio = 8),
243 prefetching is not likely to be beneficial. */
245 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
246 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
247 #endif
249 /* Some of the prefetch computations have quadratic complexity. We want to
250 avoid huge compile times and, therefore, want to limit the amount of
251 memory references per loop where we consider prefetching. */
253 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
254 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
255 #endif
257 /* The memory reference. */
259 struct mem_ref
261 gimple *stmt; /* Statement in that the reference appears. */
262 tree mem; /* The reference. */
263 HOST_WIDE_INT delta; /* Constant offset of the reference. */
264 struct mem_ref_group *group; /* The group of references it belongs to. */
265 unsigned HOST_WIDE_INT prefetch_mod;
266 /* Prefetch only each PREFETCH_MOD-th
267 iteration. */
268 unsigned HOST_WIDE_INT prefetch_before;
269 /* Prefetch only first PREFETCH_BEFORE
270 iterations. */
271 unsigned reuse_distance; /* The amount of data accessed before the first
272 reuse of this value. */
273 struct mem_ref *next; /* The next reference in the group. */
274 unsigned write_p : 1; /* Is it a write? */
275 unsigned independent_p : 1; /* True if the reference is independent on
276 all other references inside the loop. */
277 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
278 unsigned storent_p : 1; /* True if we changed the store to a
279 nontemporal one. */
282 /* Dumps information about memory reference */
283 static void
284 dump_mem_details (FILE *file, tree base, tree step,
285 HOST_WIDE_INT delta, bool write_p)
287 fprintf (file, "(base ");
288 print_generic_expr (file, base, TDF_SLIM);
289 fprintf (file, ", step ");
290 if (cst_and_fits_in_hwi (step))
291 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
292 else
293 print_generic_expr (file, step, TDF_TREE);
294 fprintf (file, ")\n");
295 fprintf (file, " delta ");
296 fprintf (file, HOST_WIDE_INT_PRINT_DEC, delta);
297 fprintf (file, "\n");
298 fprintf (file, " %s\n", write_p ? "write" : "read");
299 fprintf (file, "\n");
302 /* Dumps information about reference REF to FILE. */
304 static void
305 dump_mem_ref (FILE *file, struct mem_ref *ref)
307 fprintf (file, "Reference %p:\n", (void *) ref);
309 fprintf (file, " group %p ", (void *) ref->group);
311 dump_mem_details (file, ref->group->base, ref->group->step, ref->delta,
312 ref->write_p);
315 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
316 exist. */
318 static struct mem_ref_group *
319 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
321 struct mem_ref_group *group;
323 for (; *groups; groups = &(*groups)->next)
325 if (operand_equal_p ((*groups)->step, step, 0)
326 && operand_equal_p ((*groups)->base, base, 0))
327 return *groups;
329 /* If step is an integer constant, keep the list of groups sorted
330 by decreasing step. */
331 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
332 && int_cst_value ((*groups)->step) < int_cst_value (step))
333 break;
336 group = XNEW (struct mem_ref_group);
337 group->base = base;
338 group->step = step;
339 group->refs = NULL;
340 group->next = *groups;
341 *groups = group;
343 return group;
346 /* Records a memory reference MEM in GROUP with offset DELTA and write status
347 WRITE_P. The reference occurs in statement STMT. */
349 static void
350 record_ref (struct mem_ref_group *group, gimple *stmt, tree mem,
351 HOST_WIDE_INT delta, bool write_p)
353 struct mem_ref **aref;
355 /* Do not record the same address twice. */
356 for (aref = &group->refs; *aref; aref = &(*aref)->next)
358 /* It does not have to be possible for write reference to reuse the read
359 prefetch, or vice versa. */
360 if (!WRITE_CAN_USE_READ_PREFETCH
361 && write_p
362 && !(*aref)->write_p)
363 continue;
364 if (!READ_CAN_USE_WRITE_PREFETCH
365 && !write_p
366 && (*aref)->write_p)
367 continue;
369 if ((*aref)->delta == delta)
370 return;
373 (*aref) = XNEW (struct mem_ref);
374 (*aref)->stmt = stmt;
375 (*aref)->mem = mem;
376 (*aref)->delta = delta;
377 (*aref)->write_p = write_p;
378 (*aref)->prefetch_before = PREFETCH_ALL;
379 (*aref)->prefetch_mod = 1;
380 (*aref)->reuse_distance = 0;
381 (*aref)->issue_prefetch_p = false;
382 (*aref)->group = group;
383 (*aref)->next = NULL;
384 (*aref)->independent_p = false;
385 (*aref)->storent_p = false;
387 if (dump_file && (dump_flags & TDF_DETAILS))
388 dump_mem_ref (dump_file, *aref);
391 /* Release memory references in GROUPS. */
393 static void
394 release_mem_refs (struct mem_ref_group *groups)
396 struct mem_ref_group *next_g;
397 struct mem_ref *ref, *next_r;
399 for (; groups; groups = next_g)
401 next_g = groups->next;
402 for (ref = groups->refs; ref; ref = next_r)
404 next_r = ref->next;
405 free (ref);
407 free (groups);
411 /* A structure used to pass arguments to idx_analyze_ref. */
413 struct ar_data
415 struct loop *loop; /* Loop of the reference. */
416 gimple *stmt; /* Statement of the reference. */
417 tree *step; /* Step of the memory reference. */
418 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
421 /* Analyzes a single INDEX of a memory reference to obtain information
422 described at analyze_ref. Callback for for_each_index. */
424 static bool
425 idx_analyze_ref (tree base, tree *index, void *data)
427 struct ar_data *ar_data = (struct ar_data *) data;
428 tree ibase, step, stepsize;
429 HOST_WIDE_INT idelta = 0, imult = 1;
430 affine_iv iv;
432 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
433 *index, &iv, true))
434 return false;
435 ibase = iv.base;
436 step = iv.step;
438 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
439 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
441 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
442 ibase = TREE_OPERAND (ibase, 0);
444 if (cst_and_fits_in_hwi (ibase))
446 idelta += int_cst_value (ibase);
447 ibase = build_int_cst (TREE_TYPE (ibase), 0);
450 if (TREE_CODE (base) == ARRAY_REF)
452 stepsize = array_ref_element_size (base);
453 if (!cst_and_fits_in_hwi (stepsize))
454 return false;
455 imult = int_cst_value (stepsize);
456 step = fold_build2 (MULT_EXPR, sizetype,
457 fold_convert (sizetype, step),
458 fold_convert (sizetype, stepsize));
459 idelta *= imult;
462 if (*ar_data->step == NULL_TREE)
463 *ar_data->step = step;
464 else
465 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
466 fold_convert (sizetype, *ar_data->step),
467 fold_convert (sizetype, step));
468 *ar_data->delta += idelta;
469 *index = ibase;
471 return true;
474 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
475 STEP are integer constants and iter is number of iterations of LOOP. The
476 reference occurs in statement STMT. Strips nonaddressable component
477 references from REF_P. */
479 static bool
480 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
481 tree *step, HOST_WIDE_INT *delta,
482 gimple *stmt)
484 struct ar_data ar_data;
485 tree off;
486 HOST_WIDE_INT bit_offset;
487 tree ref = *ref_p;
489 *step = NULL_TREE;
490 *delta = 0;
492 /* First strip off the component references. Ignore bitfields.
493 Also strip off the real and imagine parts of a complex, so that
494 they can have the same base. */
495 if (TREE_CODE (ref) == REALPART_EXPR
496 || TREE_CODE (ref) == IMAGPART_EXPR
497 || (TREE_CODE (ref) == COMPONENT_REF
498 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
500 if (TREE_CODE (ref) == IMAGPART_EXPR)
501 *delta += int_size_in_bytes (TREE_TYPE (ref));
502 ref = TREE_OPERAND (ref, 0);
505 *ref_p = ref;
507 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
509 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
510 bit_offset = TREE_INT_CST_LOW (off);
511 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
513 *delta += bit_offset / BITS_PER_UNIT;
516 *base = unshare_expr (ref);
517 ar_data.loop = loop;
518 ar_data.stmt = stmt;
519 ar_data.step = step;
520 ar_data.delta = delta;
521 return for_each_index (base, idx_analyze_ref, &ar_data);
524 /* Record a memory reference REF to the list REFS. The reference occurs in
525 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
526 reference was recorded, false otherwise. */
528 static bool
529 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
530 tree ref, bool write_p, gimple *stmt)
532 tree base, step;
533 HOST_WIDE_INT delta;
534 struct mem_ref_group *agrp;
536 if (get_base_address (ref) == NULL)
537 return false;
539 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
540 return false;
541 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
542 if (step == NULL_TREE)
543 return false;
545 /* Stop if the address of BASE could not be taken. */
546 if (may_be_nonaddressable_p (base))
547 return false;
549 /* Limit non-constant step prefetching only to the innermost loops and
550 only when the step is loop invariant in the entire loop nest. */
551 if (!cst_and_fits_in_hwi (step))
553 if (loop->inner != NULL)
555 if (dump_file && (dump_flags & TDF_DETAILS))
557 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
558 print_generic_expr (dump_file, ref, TDF_TREE);
559 fprintf (dump_file,":");
560 dump_mem_details (dump_file, base, step, delta, write_p);
561 fprintf (dump_file,
562 "Ignoring %p, non-constant step prefetching is "
563 "limited to inner most loops \n",
564 (void *) ref);
566 return false;
568 else
570 if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
572 if (dump_file && (dump_flags & TDF_DETAILS))
574 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
575 print_generic_expr (dump_file, ref, TDF_TREE);
576 fprintf (dump_file,":");
577 dump_mem_details (dump_file, base, step, delta, write_p);
578 fprintf (dump_file,
579 "Not prefetching, ignoring %p due to "
580 "loop variant step\n",
581 (void *) ref);
583 return false;
588 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
589 are integer constants. */
590 agrp = find_or_create_group (refs, base, step);
591 record_ref (agrp, stmt, ref, delta, write_p);
593 return true;
596 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
597 true if there are no other memory references inside the loop. */
599 static struct mem_ref_group *
600 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
602 basic_block *body = get_loop_body_in_dom_order (loop);
603 basic_block bb;
604 unsigned i;
605 gimple_stmt_iterator bsi;
606 gimple *stmt;
607 tree lhs, rhs;
608 struct mem_ref_group *refs = NULL;
610 *no_other_refs = true;
611 *ref_count = 0;
613 /* Scan the loop body in order, so that the former references precede the
614 later ones. */
615 for (i = 0; i < loop->num_nodes; i++)
617 bb = body[i];
618 if (bb->loop_father != loop)
619 continue;
621 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
623 stmt = gsi_stmt (bsi);
625 if (gimple_code (stmt) != GIMPLE_ASSIGN)
627 if (gimple_vuse (stmt)
628 || (is_gimple_call (stmt)
629 && !(gimple_call_flags (stmt) & ECF_CONST)))
630 *no_other_refs = false;
631 continue;
634 lhs = gimple_assign_lhs (stmt);
635 rhs = gimple_assign_rhs1 (stmt);
637 if (REFERENCE_CLASS_P (rhs))
639 *no_other_refs &= gather_memory_references_ref (loop, &refs,
640 rhs, false, stmt);
641 *ref_count += 1;
643 if (REFERENCE_CLASS_P (lhs))
645 *no_other_refs &= gather_memory_references_ref (loop, &refs,
646 lhs, true, stmt);
647 *ref_count += 1;
651 free (body);
653 return refs;
656 /* Prune the prefetch candidate REF using the self-reuse. */
658 static void
659 prune_ref_by_self_reuse (struct mem_ref *ref)
661 HOST_WIDE_INT step;
662 bool backward;
664 /* If the step size is non constant, we cannot calculate prefetch_mod. */
665 if (!cst_and_fits_in_hwi (ref->group->step))
666 return;
668 step = int_cst_value (ref->group->step);
670 backward = step < 0;
672 if (step == 0)
674 /* Prefetch references to invariant address just once. */
675 ref->prefetch_before = 1;
676 return;
679 if (backward)
680 step = -step;
682 if (step > PREFETCH_BLOCK)
683 return;
685 if ((backward && HAVE_BACKWARD_PREFETCH)
686 || (!backward && HAVE_FORWARD_PREFETCH))
688 ref->prefetch_before = 1;
689 return;
692 ref->prefetch_mod = PREFETCH_BLOCK / step;
695 /* Divides X by BY, rounding down. */
697 static HOST_WIDE_INT
698 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
700 gcc_assert (by > 0);
702 if (x >= 0)
703 return x / by;
704 else
705 return (x + by - 1) / by;
708 /* Given a CACHE_LINE_SIZE and two inductive memory references
709 with a common STEP greater than CACHE_LINE_SIZE and an address
710 difference DELTA, compute the probability that they will fall
711 in different cache lines. Return true if the computed miss rate
712 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
713 number of distinct iterations after which the pattern repeats itself.
714 ALIGN_UNIT is the unit of alignment in bytes. */
716 static bool
717 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
718 HOST_WIDE_INT step, HOST_WIDE_INT delta,
719 unsigned HOST_WIDE_INT distinct_iters,
720 int align_unit)
722 unsigned align, iter;
723 int total_positions, miss_positions, max_allowed_miss_positions;
724 int address1, address2, cache_line1, cache_line2;
726 /* It always misses if delta is greater than or equal to the cache
727 line size. */
728 if (delta >= (HOST_WIDE_INT) cache_line_size)
729 return false;
731 miss_positions = 0;
732 total_positions = (cache_line_size / align_unit) * distinct_iters;
733 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
735 /* Iterate through all possible alignments of the first
736 memory reference within its cache line. */
737 for (align = 0; align < cache_line_size; align += align_unit)
739 /* Iterate through all distinct iterations. */
740 for (iter = 0; iter < distinct_iters; iter++)
742 address1 = align + step * iter;
743 address2 = address1 + delta;
744 cache_line1 = address1 / cache_line_size;
745 cache_line2 = address2 / cache_line_size;
746 if (cache_line1 != cache_line2)
748 miss_positions += 1;
749 if (miss_positions > max_allowed_miss_positions)
750 return false;
753 return true;
756 /* Prune the prefetch candidate REF using the reuse with BY.
757 If BY_IS_BEFORE is true, BY is before REF in the loop. */
759 static void
760 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
761 bool by_is_before)
763 HOST_WIDE_INT step;
764 bool backward;
765 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
766 HOST_WIDE_INT delta = delta_b - delta_r;
767 HOST_WIDE_INT hit_from;
768 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
769 HOST_WIDE_INT reduced_step;
770 unsigned HOST_WIDE_INT reduced_prefetch_block;
771 tree ref_type;
772 int align_unit;
774 /* If the step is non constant we cannot calculate prefetch_before. */
775 if (!cst_and_fits_in_hwi (ref->group->step)) {
776 return;
779 step = int_cst_value (ref->group->step);
781 backward = step < 0;
784 if (delta == 0)
786 /* If the references has the same address, only prefetch the
787 former. */
788 if (by_is_before)
789 ref->prefetch_before = 0;
791 return;
794 if (!step)
796 /* If the reference addresses are invariant and fall into the
797 same cache line, prefetch just the first one. */
798 if (!by_is_before)
799 return;
801 if (ddown (ref->delta, PREFETCH_BLOCK)
802 != ddown (by->delta, PREFETCH_BLOCK))
803 return;
805 ref->prefetch_before = 0;
806 return;
809 /* Only prune the reference that is behind in the array. */
810 if (backward)
812 if (delta > 0)
813 return;
815 /* Transform the data so that we may assume that the accesses
816 are forward. */
817 delta = - delta;
818 step = -step;
819 delta_r = PREFETCH_BLOCK - 1 - delta_r;
820 delta_b = PREFETCH_BLOCK - 1 - delta_b;
822 else
824 if (delta < 0)
825 return;
828 /* Check whether the two references are likely to hit the same cache
829 line, and how distant the iterations in that it occurs are from
830 each other. */
832 if (step <= PREFETCH_BLOCK)
834 /* The accesses are sure to meet. Let us check when. */
835 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
836 prefetch_before = (hit_from - delta_r + step - 1) / step;
838 /* Do not reduce prefetch_before if we meet beyond cache size. */
839 if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
840 prefetch_before = PREFETCH_ALL;
841 if (prefetch_before < ref->prefetch_before)
842 ref->prefetch_before = prefetch_before;
844 return;
847 /* A more complicated case with step > prefetch_block. First reduce
848 the ratio between the step and the cache line size to its simplest
849 terms. The resulting denominator will then represent the number of
850 distinct iterations after which each address will go back to its
851 initial location within the cache line. This computation assumes
852 that PREFETCH_BLOCK is a power of two. */
853 prefetch_block = PREFETCH_BLOCK;
854 reduced_prefetch_block = prefetch_block;
855 reduced_step = step;
856 while ((reduced_step & 1) == 0
857 && reduced_prefetch_block > 1)
859 reduced_step >>= 1;
860 reduced_prefetch_block >>= 1;
863 prefetch_before = delta / step;
864 delta %= step;
865 ref_type = TREE_TYPE (ref->mem);
866 align_unit = TYPE_ALIGN (ref_type) / 8;
867 if (is_miss_rate_acceptable (prefetch_block, step, delta,
868 reduced_prefetch_block, align_unit))
870 /* Do not reduce prefetch_before if we meet beyond cache size. */
871 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
872 prefetch_before = PREFETCH_ALL;
873 if (prefetch_before < ref->prefetch_before)
874 ref->prefetch_before = prefetch_before;
876 return;
879 /* Try also the following iteration. */
880 prefetch_before++;
881 delta = step - delta;
882 if (is_miss_rate_acceptable (prefetch_block, step, delta,
883 reduced_prefetch_block, align_unit))
885 if (prefetch_before < ref->prefetch_before)
886 ref->prefetch_before = prefetch_before;
888 return;
891 /* The ref probably does not reuse by. */
892 return;
895 /* Prune the prefetch candidate REF using the reuses with other references
896 in REFS. */
898 static void
899 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
901 struct mem_ref *prune_by;
902 bool before = true;
904 prune_ref_by_self_reuse (ref);
906 for (prune_by = refs; prune_by; prune_by = prune_by->next)
908 if (prune_by == ref)
910 before = false;
911 continue;
914 if (!WRITE_CAN_USE_READ_PREFETCH
915 && ref->write_p
916 && !prune_by->write_p)
917 continue;
918 if (!READ_CAN_USE_WRITE_PREFETCH
919 && !ref->write_p
920 && prune_by->write_p)
921 continue;
923 prune_ref_by_group_reuse (ref, prune_by, before);
927 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
929 static void
930 prune_group_by_reuse (struct mem_ref_group *group)
932 struct mem_ref *ref_pruned;
934 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
936 prune_ref_by_reuse (ref_pruned, group->refs);
938 if (dump_file && (dump_flags & TDF_DETAILS))
940 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
942 if (ref_pruned->prefetch_before == PREFETCH_ALL
943 && ref_pruned->prefetch_mod == 1)
944 fprintf (dump_file, " no restrictions");
945 else if (ref_pruned->prefetch_before == 0)
946 fprintf (dump_file, " do not prefetch");
947 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
948 fprintf (dump_file, " prefetch once");
949 else
951 if (ref_pruned->prefetch_before != PREFETCH_ALL)
953 fprintf (dump_file, " prefetch before ");
954 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
955 ref_pruned->prefetch_before);
957 if (ref_pruned->prefetch_mod != 1)
959 fprintf (dump_file, " prefetch mod ");
960 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
961 ref_pruned->prefetch_mod);
964 fprintf (dump_file, "\n");
969 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
971 static void
972 prune_by_reuse (struct mem_ref_group *groups)
974 for (; groups; groups = groups->next)
975 prune_group_by_reuse (groups);
978 /* Returns true if we should issue prefetch for REF. */
980 static bool
981 should_issue_prefetch_p (struct mem_ref *ref)
983 /* For now do not issue prefetches for only first few of the
984 iterations. */
985 if (ref->prefetch_before != PREFETCH_ALL)
987 if (dump_file && (dump_flags & TDF_DETAILS))
988 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
989 (void *) ref);
990 return false;
993 /* Do not prefetch nontemporal stores. */
994 if (ref->storent_p)
996 if (dump_file && (dump_flags & TDF_DETAILS))
997 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
998 return false;
1001 return true;
1004 /* Decide which of the prefetch candidates in GROUPS to prefetch.
1005 AHEAD is the number of iterations to prefetch ahead (which corresponds
1006 to the number of simultaneous instances of one prefetch running at a
1007 time). UNROLL_FACTOR is the factor by that the loop is going to be
1008 unrolled. Returns true if there is anything to prefetch. */
1010 static bool
1011 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
1012 unsigned ahead)
1014 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
1015 unsigned slots_per_prefetch;
1016 struct mem_ref *ref;
1017 bool any = false;
1019 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
1020 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
1022 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1023 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
1024 it will need a prefetch slot. */
1025 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
1026 if (dump_file && (dump_flags & TDF_DETAILS))
1027 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
1028 slots_per_prefetch);
1030 /* For now we just take memory references one by one and issue
1031 prefetches for as many as possible. The groups are sorted
1032 starting with the largest step, since the references with
1033 large step are more likely to cause many cache misses. */
1035 for (; groups; groups = groups->next)
1036 for (ref = groups->refs; ref; ref = ref->next)
1038 if (!should_issue_prefetch_p (ref))
1039 continue;
1041 /* The loop is far from being sufficiently unrolled for this
1042 prefetch. Do not generate prefetch to avoid many redudant
1043 prefetches. */
1044 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1045 continue;
1047 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1048 and we unroll the loop UNROLL_FACTOR times, we need to insert
1049 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1050 iteration. */
1051 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1052 / ref->prefetch_mod);
1053 prefetch_slots = n_prefetches * slots_per_prefetch;
1055 /* If more than half of the prefetches would be lost anyway, do not
1056 issue the prefetch. */
1057 if (2 * remaining_prefetch_slots < prefetch_slots)
1058 continue;
1060 ref->issue_prefetch_p = true;
1062 if (remaining_prefetch_slots <= prefetch_slots)
1063 return true;
1064 remaining_prefetch_slots -= prefetch_slots;
1065 any = true;
1068 return any;
1071 /* Return TRUE if no prefetch is going to be generated in the given
1072 GROUPS. */
1074 static bool
1075 nothing_to_prefetch_p (struct mem_ref_group *groups)
1077 struct mem_ref *ref;
1079 for (; groups; groups = groups->next)
1080 for (ref = groups->refs; ref; ref = ref->next)
1081 if (should_issue_prefetch_p (ref))
1082 return false;
1084 return true;
1087 /* Estimate the number of prefetches in the given GROUPS.
1088 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
1090 static int
1091 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
1093 struct mem_ref *ref;
1094 unsigned n_prefetches;
1095 int prefetch_count = 0;
1097 for (; groups; groups = groups->next)
1098 for (ref = groups->refs; ref; ref = ref->next)
1099 if (should_issue_prefetch_p (ref))
1101 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1102 / ref->prefetch_mod);
1103 prefetch_count += n_prefetches;
1106 return prefetch_count;
1109 /* Issue prefetches for the reference REF into loop as decided before.
1110 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1111 is the factor by which LOOP was unrolled. */
1113 static void
1114 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1116 HOST_WIDE_INT delta;
1117 tree addr, addr_base, write_p, local, forward;
1118 gcall *prefetch;
1119 gimple_stmt_iterator bsi;
1120 unsigned n_prefetches, ap;
1121 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1123 if (dump_file && (dump_flags & TDF_DETAILS))
1124 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1125 nontemporal ? " nontemporal" : "",
1126 (void *) ref);
1128 bsi = gsi_for_stmt (ref->stmt);
1130 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1131 / ref->prefetch_mod);
1132 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1133 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1134 true, NULL, true, GSI_SAME_STMT);
1135 write_p = ref->write_p ? integer_one_node : integer_zero_node;
1136 local = nontemporal ? integer_zero_node : integer_three_node;
1138 for (ap = 0; ap < n_prefetches; ap++)
1140 if (cst_and_fits_in_hwi (ref->group->step))
1142 /* Determine the address to prefetch. */
1143 delta = (ahead + ap * ref->prefetch_mod) *
1144 int_cst_value (ref->group->step);
1145 addr = fold_build_pointer_plus_hwi (addr_base, delta);
1146 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1147 true, GSI_SAME_STMT);
1149 else
1151 /* The step size is non-constant but loop-invariant. We use the
1152 heuristic to simply prefetch ahead iterations ahead. */
1153 forward = fold_build2 (MULT_EXPR, sizetype,
1154 fold_convert (sizetype, ref->group->step),
1155 fold_convert (sizetype, size_int (ahead)));
1156 addr = fold_build_pointer_plus (addr_base, forward);
1157 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1158 NULL, true, GSI_SAME_STMT);
1160 /* Create the prefetch instruction. */
1161 prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
1162 3, addr, write_p, local);
1163 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1167 /* Issue prefetches for the references in GROUPS into loop as decided before.
1168 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1169 factor by that LOOP was unrolled. */
1171 static void
1172 issue_prefetches (struct mem_ref_group *groups,
1173 unsigned unroll_factor, unsigned ahead)
1175 struct mem_ref *ref;
1177 for (; groups; groups = groups->next)
1178 for (ref = groups->refs; ref; ref = ref->next)
1179 if (ref->issue_prefetch_p)
1180 issue_prefetch_ref (ref, unroll_factor, ahead);
1183 /* Returns true if REF is a memory write for that a nontemporal store insn
1184 can be used. */
1186 static bool
1187 nontemporal_store_p (struct mem_ref *ref)
1189 machine_mode mode;
1190 enum insn_code code;
1192 /* REF must be a write that is not reused. We require it to be independent
1193 on all other memory references in the loop, as the nontemporal stores may
1194 be reordered with respect to other memory references. */
1195 if (!ref->write_p
1196 || !ref->independent_p
1197 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1198 return false;
1200 /* Check that we have the storent instruction for the mode. */
1201 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1202 if (mode == BLKmode)
1203 return false;
1205 code = optab_handler (storent_optab, mode);
1206 return code != CODE_FOR_nothing;
1209 /* If REF is a nontemporal store, we mark the corresponding modify statement
1210 and return true. Otherwise, we return false. */
1212 static bool
1213 mark_nontemporal_store (struct mem_ref *ref)
1215 if (!nontemporal_store_p (ref))
1216 return false;
1218 if (dump_file && (dump_flags & TDF_DETAILS))
1219 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1220 (void *) ref);
1222 gimple_assign_set_nontemporal_move (ref->stmt, true);
1223 ref->storent_p = true;
1225 return true;
1228 /* Issue a memory fence instruction after LOOP. */
1230 static void
1231 emit_mfence_after_loop (struct loop *loop)
1233 vec<edge> exits = get_loop_exit_edges (loop);
1234 edge exit;
1235 gcall *call;
1236 gimple_stmt_iterator bsi;
1237 unsigned i;
1239 FOR_EACH_VEC_ELT (exits, i, exit)
1241 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1243 if (!single_pred_p (exit->dest)
1244 /* If possible, we prefer not to insert the fence on other paths
1245 in cfg. */
1246 && !(exit->flags & EDGE_ABNORMAL))
1247 split_loop_exit_edge (exit);
1248 bsi = gsi_after_labels (exit->dest);
1250 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1253 exits.release ();
1254 update_ssa (TODO_update_ssa_only_virtuals);
1257 /* Returns true if we can use storent in loop, false otherwise. */
1259 static bool
1260 may_use_storent_in_loop_p (struct loop *loop)
1262 bool ret = true;
1264 if (loop->inner != NULL)
1265 return false;
1267 /* If we must issue a mfence insn after using storent, check that there
1268 is a suitable place for it at each of the loop exits. */
1269 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1271 vec<edge> exits = get_loop_exit_edges (loop);
1272 unsigned i;
1273 edge exit;
1275 FOR_EACH_VEC_ELT (exits, i, exit)
1276 if ((exit->flags & EDGE_ABNORMAL)
1277 && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1278 ret = false;
1280 exits.release ();
1283 return ret;
1286 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1287 references in the loop. */
1289 static void
1290 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1292 struct mem_ref *ref;
1293 bool any = false;
1295 if (!may_use_storent_in_loop_p (loop))
1296 return;
1298 for (; groups; groups = groups->next)
1299 for (ref = groups->refs; ref; ref = ref->next)
1300 any |= mark_nontemporal_store (ref);
1302 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1303 emit_mfence_after_loop (loop);
1306 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1307 this is the case, fill in DESC by the description of number of
1308 iterations. */
1310 static bool
1311 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1312 unsigned factor)
1314 if (!can_unroll_loop_p (loop, factor, desc))
1315 return false;
1317 /* We only consider loops without control flow for unrolling. This is not
1318 a hard restriction -- tree_unroll_loop works with arbitrary loops
1319 as well; but the unrolling/prefetching is usually more profitable for
1320 loops consisting of a single basic block, and we want to limit the
1321 code growth. */
1322 if (loop->num_nodes > 2)
1323 return false;
1325 return true;
1328 /* Determine the coefficient by that unroll LOOP, from the information
1329 contained in the list of memory references REFS. Description of
1330 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1331 insns of the LOOP. EST_NITER is the estimated number of iterations of
1332 the loop, or -1 if no estimate is available. */
1334 static unsigned
1335 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1336 unsigned ninsns, struct tree_niter_desc *desc,
1337 HOST_WIDE_INT est_niter)
1339 unsigned upper_bound;
1340 unsigned nfactor, factor, mod_constraint;
1341 struct mem_ref_group *agp;
1342 struct mem_ref *ref;
1344 /* First check whether the loop is not too large to unroll. We ignore
1345 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1346 from unrolling them enough to make exactly one cache line covered by each
1347 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1348 us from unrolling the loops too many times in cases where we only expect
1349 gains from better scheduling and decreasing loop overhead, which is not
1350 the case here. */
1351 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1353 /* If we unrolled the loop more times than it iterates, the unrolled version
1354 of the loop would be never entered. */
1355 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1356 upper_bound = est_niter;
1358 if (upper_bound <= 1)
1359 return 1;
1361 /* Choose the factor so that we may prefetch each cache just once,
1362 but bound the unrolling by UPPER_BOUND. */
1363 factor = 1;
1364 for (agp = refs; agp; agp = agp->next)
1365 for (ref = agp->refs; ref; ref = ref->next)
1366 if (should_issue_prefetch_p (ref))
1368 mod_constraint = ref->prefetch_mod;
1369 nfactor = least_common_multiple (mod_constraint, factor);
1370 if (nfactor <= upper_bound)
1371 factor = nfactor;
1374 if (!should_unroll_loop_p (loop, desc, factor))
1375 return 1;
1377 return factor;
1380 /* Returns the total volume of the memory references REFS, taking into account
1381 reuses in the innermost loop and cache line size. TODO -- we should also
1382 take into account reuses across the iterations of the loops in the loop
1383 nest. */
1385 static unsigned
1386 volume_of_references (struct mem_ref_group *refs)
1388 unsigned volume = 0;
1389 struct mem_ref_group *gr;
1390 struct mem_ref *ref;
1392 for (gr = refs; gr; gr = gr->next)
1393 for (ref = gr->refs; ref; ref = ref->next)
1395 /* Almost always reuses another value? */
1396 if (ref->prefetch_before != PREFETCH_ALL)
1397 continue;
1399 /* If several iterations access the same cache line, use the size of
1400 the line divided by this number. Otherwise, a cache line is
1401 accessed in each iteration. TODO -- in the latter case, we should
1402 take the size of the reference into account, rounding it up on cache
1403 line size multiple. */
1404 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1406 return volume;
1409 /* Returns the volume of memory references accessed across VEC iterations of
1410 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1411 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1413 static unsigned
1414 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1416 unsigned i;
1418 for (i = 0; i < n; i++)
1419 if (vec[i] != 0)
1420 break;
1422 if (i == n)
1423 return 0;
1425 gcc_assert (vec[i] > 0);
1427 /* We ignore the parts of the distance vector in subloops, since usually
1428 the numbers of iterations are much smaller. */
1429 return loop_sizes[i] * vec[i];
1432 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1433 at the position corresponding to the loop of the step. N is the depth
1434 of the considered loop nest, and, LOOP is its innermost loop. */
1436 static void
1437 add_subscript_strides (tree access_fn, unsigned stride,
1438 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1440 struct loop *aloop;
1441 tree step;
1442 HOST_WIDE_INT astep;
1443 unsigned min_depth = loop_depth (loop) - n;
1445 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1447 aloop = get_chrec_loop (access_fn);
1448 step = CHREC_RIGHT (access_fn);
1449 access_fn = CHREC_LEFT (access_fn);
1451 if ((unsigned) loop_depth (aloop) <= min_depth)
1452 continue;
1454 if (tree_fits_shwi_p (step))
1455 astep = tree_to_shwi (step);
1456 else
1457 astep = L1_CACHE_LINE_SIZE;
1459 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1464 /* Returns the volume of memory references accessed between two consecutive
1465 self-reuses of the reference DR. We consider the subscripts of DR in N
1466 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1467 loops. LOOP is the innermost loop of the current loop nest. */
1469 static unsigned
1470 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1471 struct loop *loop)
1473 tree stride, access_fn;
1474 HOST_WIDE_INT *strides, astride;
1475 vec<tree> access_fns;
1476 tree ref = DR_REF (dr);
1477 unsigned i, ret = ~0u;
1479 /* In the following example:
1481 for (i = 0; i < N; i++)
1482 for (j = 0; j < N; j++)
1483 use (a[j][i]);
1484 the same cache line is accessed each N steps (except if the change from
1485 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1486 we cannot rely purely on the results of the data dependence analysis.
1488 Instead, we compute the stride of the reference in each loop, and consider
1489 the innermost loop in that the stride is less than cache size. */
1491 strides = XCNEWVEC (HOST_WIDE_INT, n);
1492 access_fns = DR_ACCESS_FNS (dr);
1494 FOR_EACH_VEC_ELT (access_fns, i, access_fn)
1496 /* Keep track of the reference corresponding to the subscript, so that we
1497 know its stride. */
1498 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1499 ref = TREE_OPERAND (ref, 0);
1501 if (TREE_CODE (ref) == ARRAY_REF)
1503 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1504 if (tree_fits_uhwi_p (stride))
1505 astride = tree_to_uhwi (stride);
1506 else
1507 astride = L1_CACHE_LINE_SIZE;
1509 ref = TREE_OPERAND (ref, 0);
1511 else
1512 astride = 1;
1514 add_subscript_strides (access_fn, astride, strides, n, loop);
1517 for (i = n; i-- > 0; )
1519 unsigned HOST_WIDE_INT s;
1521 s = strides[i] < 0 ? -strides[i] : strides[i];
1523 if (s < (unsigned) L1_CACHE_LINE_SIZE
1524 && (loop_sizes[i]
1525 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1527 ret = loop_sizes[i];
1528 break;
1532 free (strides);
1533 return ret;
1536 /* Determines the distance till the first reuse of each reference in REFS
1537 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1538 memory references in the loop. Return false if the analysis fails. */
1540 static bool
1541 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1542 bool no_other_refs)
1544 struct loop *nest, *aloop;
1545 vec<data_reference_p> datarefs = vNULL;
1546 vec<ddr_p> dependences = vNULL;
1547 struct mem_ref_group *gr;
1548 struct mem_ref *ref, *refb;
1549 vec<loop_p> vloops = vNULL;
1550 unsigned *loop_data_size;
1551 unsigned i, j, n;
1552 unsigned volume, dist, adist;
1553 HOST_WIDE_INT vol;
1554 data_reference_p dr;
1555 ddr_p dep;
1557 if (loop->inner)
1558 return true;
1560 /* Find the outermost loop of the loop nest of loop (we require that
1561 there are no sibling loops inside the nest). */
1562 nest = loop;
1563 while (1)
1565 aloop = loop_outer (nest);
1567 if (aloop == current_loops->tree_root
1568 || aloop->inner->next)
1569 break;
1571 nest = aloop;
1574 /* For each loop, determine the amount of data accessed in each iteration.
1575 We use this to estimate whether the reference is evicted from the
1576 cache before its reuse. */
1577 find_loop_nest (nest, &vloops);
1578 n = vloops.length ();
1579 loop_data_size = XNEWVEC (unsigned, n);
1580 volume = volume_of_references (refs);
1581 i = n;
1582 while (i-- != 0)
1584 loop_data_size[i] = volume;
1585 /* Bound the volume by the L2 cache size, since above this bound,
1586 all dependence distances are equivalent. */
1587 if (volume > L2_CACHE_SIZE_BYTES)
1588 continue;
1590 aloop = vloops[i];
1591 vol = estimated_stmt_executions_int (aloop);
1592 if (vol == -1)
1593 vol = expected_loop_iterations (aloop);
1594 volume *= vol;
1597 /* Prepare the references in the form suitable for data dependence
1598 analysis. We ignore unanalyzable data references (the results
1599 are used just as a heuristics to estimate temporality of the
1600 references, hence we do not need to worry about correctness). */
1601 for (gr = refs; gr; gr = gr->next)
1602 for (ref = gr->refs; ref; ref = ref->next)
1604 dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
1605 ref->mem, ref->stmt, !ref->write_p);
1607 if (dr)
1609 ref->reuse_distance = volume;
1610 dr->aux = ref;
1611 datarefs.safe_push (dr);
1613 else
1614 no_other_refs = false;
1617 FOR_EACH_VEC_ELT (datarefs, i, dr)
1619 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1620 ref = (struct mem_ref *) dr->aux;
1621 if (ref->reuse_distance > dist)
1622 ref->reuse_distance = dist;
1624 if (no_other_refs)
1625 ref->independent_p = true;
1628 if (!compute_all_dependences (datarefs, &dependences, vloops, true))
1629 return false;
1631 FOR_EACH_VEC_ELT (dependences, i, dep)
1633 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1634 continue;
1636 ref = (struct mem_ref *) DDR_A (dep)->aux;
1637 refb = (struct mem_ref *) DDR_B (dep)->aux;
1639 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1640 || DDR_NUM_DIST_VECTS (dep) == 0)
1642 /* If the dependence cannot be analyzed, assume that there might be
1643 a reuse. */
1644 dist = 0;
1646 ref->independent_p = false;
1647 refb->independent_p = false;
1649 else
1651 /* The distance vectors are normalized to be always lexicographically
1652 positive, hence we cannot tell just from them whether DDR_A comes
1653 before DDR_B or vice versa. However, it is not important,
1654 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1655 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1656 in cache (and marking it as nontemporal would not affect
1657 anything). */
1659 dist = volume;
1660 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1662 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1663 loop_data_size, n);
1665 /* If this is a dependence in the innermost loop (i.e., the
1666 distances in all superloops are zero) and it is not
1667 the trivial self-dependence with distance zero, record that
1668 the references are not completely independent. */
1669 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1670 && (ref != refb
1671 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1673 ref->independent_p = false;
1674 refb->independent_p = false;
1677 /* Ignore accesses closer than
1678 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1679 so that we use nontemporal prefetches e.g. if single memory
1680 location is accessed several times in a single iteration of
1681 the loop. */
1682 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1683 continue;
1685 if (adist < dist)
1686 dist = adist;
1690 if (ref->reuse_distance > dist)
1691 ref->reuse_distance = dist;
1692 if (refb->reuse_distance > dist)
1693 refb->reuse_distance = dist;
1696 free_dependence_relations (dependences);
1697 free_data_refs (datarefs);
1698 free (loop_data_size);
1700 if (dump_file && (dump_flags & TDF_DETAILS))
1702 fprintf (dump_file, "Reuse distances:\n");
1703 for (gr = refs; gr; gr = gr->next)
1704 for (ref = gr->refs; ref; ref = ref->next)
1705 fprintf (dump_file, " ref %p distance %u\n",
1706 (void *) ref, ref->reuse_distance);
1709 return true;
1712 /* Determine whether or not the trip count to ahead ratio is too small based
1713 on prefitablility consideration.
1714 AHEAD: the iteration ahead distance,
1715 EST_NITER: the estimated trip count. */
1717 static bool
1718 trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1720 /* Assume trip count to ahead ratio is big enough if the trip count could not
1721 be estimated at compile time. */
1722 if (est_niter < 0)
1723 return false;
1725 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1727 if (dump_file && (dump_flags & TDF_DETAILS))
1728 fprintf (dump_file,
1729 "Not prefetching -- loop estimated to roll only %d times\n",
1730 (int) est_niter);
1731 return true;
1734 return false;
1737 /* Determine whether or not the number of memory references in the loop is
1738 reasonable based on the profitablity and compilation time considerations.
1739 NINSNS: estimated number of instructions in the loop,
1740 MEM_REF_COUNT: total number of memory references in the loop. */
1742 static bool
1743 mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
1745 int insn_to_mem_ratio;
1747 if (mem_ref_count == 0)
1748 return false;
1750 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1751 (compute_all_dependences) have high costs based on quadratic complexity.
1752 To avoid huge compilation time, we give up prefetching if mem_ref_count
1753 is too large. */
1754 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1755 return false;
1757 /* Prefetching improves performance by overlapping cache missing
1758 memory accesses with CPU operations. If the loop does not have
1759 enough CPU operations to overlap with memory operations, prefetching
1760 won't give a significant benefit. One approximate way of checking
1761 this is to require the ratio of instructions to memory references to
1762 be above a certain limit. This approximation works well in practice.
1763 TODO: Implement a more precise computation by estimating the time
1764 for each CPU or memory op in the loop. Time estimates for memory ops
1765 should account for cache misses. */
1766 insn_to_mem_ratio = ninsns / mem_ref_count;
1768 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1770 if (dump_file && (dump_flags & TDF_DETAILS))
1771 fprintf (dump_file,
1772 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1773 insn_to_mem_ratio);
1774 return false;
1777 return true;
1780 /* Determine whether or not the instruction to prefetch ratio in the loop is
1781 too small based on the profitablity consideration.
1782 NINSNS: estimated number of instructions in the loop,
1783 PREFETCH_COUNT: an estimate of the number of prefetches,
1784 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1786 static bool
1787 insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1788 unsigned unroll_factor)
1790 int insn_to_prefetch_ratio;
1792 /* Prefetching most likely causes performance degradation when the instruction
1793 to prefetch ratio is too small. Too many prefetch instructions in a loop
1794 may reduce the I-cache performance.
1795 (unroll_factor * ninsns) is used to estimate the number of instructions in
1796 the unrolled loop. This implementation is a bit simplistic -- the number
1797 of issued prefetch instructions is also affected by unrolling. So,
1798 prefetch_mod and the unroll factor should be taken into account when
1799 determining prefetch_count. Also, the number of insns of the unrolled
1800 loop will usually be significantly smaller than the number of insns of the
1801 original loop * unroll_factor (at least the induction variable increases
1802 and the exit branches will get eliminated), so it might be better to use
1803 tree_estimate_loop_size + estimated_unrolled_size. */
1804 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1805 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1807 if (dump_file && (dump_flags & TDF_DETAILS))
1808 fprintf (dump_file,
1809 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1810 insn_to_prefetch_ratio);
1811 return true;
1814 return false;
1818 /* Issue prefetch instructions for array references in LOOP. Returns
1819 true if the LOOP was unrolled. */
1821 static bool
1822 loop_prefetch_arrays (struct loop *loop)
1824 struct mem_ref_group *refs;
1825 unsigned ahead, ninsns, time, unroll_factor;
1826 HOST_WIDE_INT est_niter;
1827 struct tree_niter_desc desc;
1828 bool unrolled = false, no_other_refs;
1829 unsigned prefetch_count;
1830 unsigned mem_ref_count;
1832 if (optimize_loop_nest_for_size_p (loop))
1834 if (dump_file && (dump_flags & TDF_DETAILS))
1835 fprintf (dump_file, " ignored (cold area)\n");
1836 return false;
1839 /* FIXME: the time should be weighted by the probabilities of the blocks in
1840 the loop body. */
1841 time = tree_num_loop_insns (loop, &eni_time_weights);
1842 if (time == 0)
1843 return false;
1845 ahead = (PREFETCH_LATENCY + time - 1) / time;
1846 est_niter = estimated_stmt_executions_int (loop);
1847 if (est_niter == -1)
1848 est_niter = max_stmt_executions_int (loop);
1850 /* Prefetching is not likely to be profitable if the trip count to ahead
1851 ratio is too small. */
1852 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1853 return false;
1855 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1857 /* Step 1: gather the memory references. */
1858 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1860 /* Give up prefetching if the number of memory references in the
1861 loop is not reasonable based on profitablity and compilation time
1862 considerations. */
1863 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1864 goto fail;
1866 /* Step 2: estimate the reuse effects. */
1867 prune_by_reuse (refs);
1869 if (nothing_to_prefetch_p (refs))
1870 goto fail;
1872 if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
1873 goto fail;
1875 /* Step 3: determine unroll factor. */
1876 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1877 est_niter);
1879 /* Estimate prefetch count for the unrolled loop. */
1880 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1881 if (prefetch_count == 0)
1882 goto fail;
1884 if (dump_file && (dump_flags & TDF_DETAILS))
1885 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1886 HOST_WIDE_INT_PRINT_DEC "\n"
1887 "insn count %d, mem ref count %d, prefetch count %d\n",
1888 ahead, unroll_factor, est_niter,
1889 ninsns, mem_ref_count, prefetch_count);
1891 /* Prefetching is not likely to be profitable if the instruction to prefetch
1892 ratio is too small. */
1893 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1894 unroll_factor))
1895 goto fail;
1897 mark_nontemporal_stores (loop, refs);
1899 /* Step 4: what to prefetch? */
1900 if (!schedule_prefetches (refs, unroll_factor, ahead))
1901 goto fail;
1903 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1904 iterations so that we do not issue superfluous prefetches. */
1905 if (unroll_factor != 1)
1907 tree_unroll_loop (loop, unroll_factor,
1908 single_dom_exit (loop), &desc);
1909 unrolled = true;
1912 /* Step 6: issue the prefetches. */
1913 issue_prefetches (refs, unroll_factor, ahead);
1915 fail:
1916 release_mem_refs (refs);
1917 return unrolled;
1920 /* Issue prefetch instructions for array references in loops. */
1922 unsigned int
1923 tree_ssa_prefetch_arrays (void)
1925 struct loop *loop;
1926 bool unrolled = false;
1927 int todo_flags = 0;
1929 if (!targetm.have_prefetch ()
1930 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1931 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1932 of processor costs and i486 does not have prefetch, but
1933 -march=pentium4 causes targetm.have_prefetch to be true. Ugh. */
1934 || PREFETCH_BLOCK == 0)
1935 return 0;
1937 if (dump_file && (dump_flags & TDF_DETAILS))
1939 fprintf (dump_file, "Prefetching parameters:\n");
1940 fprintf (dump_file, " simultaneous prefetches: %d\n",
1941 SIMULTANEOUS_PREFETCHES);
1942 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1943 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1944 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1945 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1946 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1947 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1948 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
1949 MIN_INSN_TO_PREFETCH_RATIO);
1950 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
1951 PREFETCH_MIN_INSN_TO_MEM_RATIO);
1952 fprintf (dump_file, "\n");
1955 initialize_original_copy_tables ();
1957 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
1959 tree type = build_function_type_list (void_type_node,
1960 const_ptr_type_node, NULL_TREE);
1961 tree decl = add_builtin_function ("__builtin_prefetch", type,
1962 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1963 NULL, NULL_TREE);
1964 DECL_IS_NOVOPS (decl) = true;
1965 set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
1968 /* We assume that size of cache line is a power of two, so verify this
1969 here. */
1970 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1972 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1974 if (dump_file && (dump_flags & TDF_DETAILS))
1975 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1977 unrolled |= loop_prefetch_arrays (loop);
1979 if (dump_file && (dump_flags & TDF_DETAILS))
1980 fprintf (dump_file, "\n\n");
1983 if (unrolled)
1985 scev_reset ();
1986 todo_flags |= TODO_cleanup_cfg;
1989 free_original_copy_tables ();
1990 return todo_flags;
1993 /* Prefetching. */
1995 namespace {
1997 const pass_data pass_data_loop_prefetch =
1999 GIMPLE_PASS, /* type */
2000 "aprefetch", /* name */
2001 OPTGROUP_LOOP, /* optinfo_flags */
2002 TV_TREE_PREFETCH, /* tv_id */
2003 ( PROP_cfg | PROP_ssa ), /* properties_required */
2004 0, /* properties_provided */
2005 0, /* properties_destroyed */
2006 0, /* todo_flags_start */
2007 0, /* todo_flags_finish */
2010 class pass_loop_prefetch : public gimple_opt_pass
2012 public:
2013 pass_loop_prefetch (gcc::context *ctxt)
2014 : gimple_opt_pass (pass_data_loop_prefetch, ctxt)
2017 /* opt_pass methods: */
2018 virtual bool gate (function *) { return flag_prefetch_loop_arrays > 0; }
2019 virtual unsigned int execute (function *);
2021 }; // class pass_loop_prefetch
2023 unsigned int
2024 pass_loop_prefetch::execute (function *fun)
2026 if (number_of_loops (fun) <= 1)
2027 return 0;
2029 return tree_ssa_prefetch_arrays ();
2032 } // anon namespace
2034 gimple_opt_pass *
2035 make_pass_loop_prefetch (gcc::context *ctxt)
2037 return new pass_loop_prefetch (ctxt);