2 Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
37 #include "tree-pass.h"
39 #include "insn-config.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
46 #include "langhooks.h"
47 #include "tree-inline.h"
48 #include "tree-data-ref.h"
51 /* This pass inserts prefetch instructions to optimize cache usage during
52 accesses to arrays in loops. It processes loops sequentially and:
54 1) Gathers all memory references in the single loop.
55 2) For each of the references it decides when it is profitable to prefetch
56 it. To do it, we evaluate the reuse among the accesses, and determines
57 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
58 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
59 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
60 iterations of the loop that are zero modulo PREFETCH_MOD). For example
61 (assuming cache line size is 64 bytes, char has size 1 byte and there
62 is no hardware sequential prefetch):
65 for (i = 0; i < max; i++)
72 a[187*i + 50] = ...; (5)
75 (0) obviously has PREFETCH_BEFORE 1
76 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
77 location 64 iterations before it, and PREFETCH_MOD 64 (since
78 it hits the same cache line otherwise).
79 (2) has PREFETCH_MOD 64
80 (3) has PREFETCH_MOD 4
81 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
82 the cache line accessed by (4) is the same with probability only
84 (5) has PREFETCH_MOD 1 as well.
86 Additionally, we use data dependence analysis to determine for each
87 reference the distance till the first reuse; this information is used
88 to determine the temporality of the issued prefetch instruction.
90 3) We determine how much ahead we need to prefetch. The number of
91 iterations needed is time to fetch / time spent in one iteration of
92 the loop. The problem is that we do not know either of these values,
93 so we just make a heuristic guess based on a magic (possibly)
94 target-specific constant and size of the loop.
96 4) Determine which of the references we prefetch. We take into account
97 that there is a maximum number of simultaneous prefetches (provided
98 by machine description). We prefetch as many prefetches as possible
99 while still within this bound (starting with those with lowest
100 prefetch_mod, since they are responsible for most of the cache
103 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
104 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
105 prefetching nonaccessed memory.
106 TODO -- actually implement peeling.
108 6) We actually emit the prefetch instructions. ??? Perhaps emit the
109 prefetch instructions with guards in cases where 5) was not sufficient
110 to satisfy the constraints?
113 -- write and use more general reuse analysis (that could be also used
114 in other cache aimed loop optimizations)
115 -- make it behave sanely together with the prefetches given by user
116 (now we just ignore them; at the very least we should avoid
117 optimizing loops in that user put his own prefetches)
118 -- we assume cache line size alignment of arrays; this could be
121 /* Magic constants follow. These should be replaced by machine specific
124 /* True if write can be prefetched by a read prefetch. */
126 #ifndef WRITE_CAN_USE_READ_PREFETCH
127 #define WRITE_CAN_USE_READ_PREFETCH 1
130 /* True if read can be prefetched by a write prefetch. */
132 #ifndef READ_CAN_USE_WRITE_PREFETCH
133 #define READ_CAN_USE_WRITE_PREFETCH 0
136 /* The size of the block loaded by a single prefetch. Usually, this is
137 the same as cache line size (at the moment, we only consider one level
138 of cache hierarchy). */
140 #ifndef PREFETCH_BLOCK
141 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
144 /* Do we have a forward hardware sequential prefetching? */
146 #ifndef HAVE_FORWARD_PREFETCH
147 #define HAVE_FORWARD_PREFETCH 0
150 /* Do we have a backward hardware sequential prefetching? */
152 #ifndef HAVE_BACKWARD_PREFETCH
153 #define HAVE_BACKWARD_PREFETCH 0
156 /* In some cases we are only able to determine that there is a certain
157 probability that the two accesses hit the same cache line. In this
158 case, we issue the prefetches for both of them if this probability
159 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
161 #ifndef ACCEPTABLE_MISS_RATE
162 #define ACCEPTABLE_MISS_RATE 50
165 #ifndef HAVE_prefetch
166 #define HAVE_prefetch 0
169 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
170 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
172 /* We consider a memory access nontemporal if it is not reused sooner than
173 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
174 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
175 so that we use nontemporal prefetches e.g. if single memory location
176 is accessed several times in a single iteration of the loop. */
177 #define NONTEMPORAL_FRACTION 16
179 /* In case we have to emit a memory fence instruction after the loop that
180 uses nontemporal stores, this defines the builtin to use. */
182 #ifndef FENCE_FOLLOWING_MOVNT
183 #define FENCE_FOLLOWING_MOVNT NULL_TREE
186 /* The group of references between that reuse may occur. */
190 tree base
; /* Base of the reference. */
191 HOST_WIDE_INT step
; /* Step of the reference. */
192 struct mem_ref
*refs
; /* References in the group. */
193 struct mem_ref_group
*next
; /* Next group of references. */
196 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
198 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
200 /* The memory reference. */
204 gimple stmt
; /* Statement in that the reference appears. */
205 tree mem
; /* The reference. */
206 HOST_WIDE_INT delta
; /* Constant offset of the reference. */
207 struct mem_ref_group
*group
; /* The group of references it belongs to. */
208 unsigned HOST_WIDE_INT prefetch_mod
;
209 /* Prefetch only each PREFETCH_MOD-th
211 unsigned HOST_WIDE_INT prefetch_before
;
212 /* Prefetch only first PREFETCH_BEFORE
214 unsigned reuse_distance
; /* The amount of data accessed before the first
215 reuse of this value. */
216 struct mem_ref
*next
; /* The next reference in the group. */
217 unsigned write_p
: 1; /* Is it a write? */
218 unsigned independent_p
: 1; /* True if the reference is independent on
219 all other references inside the loop. */
220 unsigned issue_prefetch_p
: 1; /* Should we really issue the prefetch? */
221 unsigned storent_p
: 1; /* True if we changed the store to a
225 /* Dumps information about reference REF to FILE. */
228 dump_mem_ref (FILE *file
, struct mem_ref
*ref
)
230 fprintf (file
, "Reference %p:\n", (void *) ref
);
232 fprintf (file
, " group %p (base ", (void *) ref
->group
);
233 print_generic_expr (file
, ref
->group
->base
, TDF_SLIM
);
234 fprintf (file
, ", step ");
235 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ref
->group
->step
);
236 fprintf (file
, ")\n");
238 fprintf (file
, " delta ");
239 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ref
->delta
);
240 fprintf (file
, "\n");
242 fprintf (file
, " %s\n", ref
->write_p
? "write" : "read");
244 fprintf (file
, "\n");
247 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
250 static struct mem_ref_group
*
251 find_or_create_group (struct mem_ref_group
**groups
, tree base
,
254 struct mem_ref_group
*group
;
256 for (; *groups
; groups
= &(*groups
)->next
)
258 if ((*groups
)->step
== step
259 && operand_equal_p ((*groups
)->base
, base
, 0))
262 /* Keep the list of groups sorted by decreasing step. */
263 if ((*groups
)->step
< step
)
267 group
= XNEW (struct mem_ref_group
);
271 group
->next
= *groups
;
277 /* Records a memory reference MEM in GROUP with offset DELTA and write status
278 WRITE_P. The reference occurs in statement STMT. */
281 record_ref (struct mem_ref_group
*group
, gimple stmt
, tree mem
,
282 HOST_WIDE_INT delta
, bool write_p
)
284 struct mem_ref
**aref
;
286 /* Do not record the same address twice. */
287 for (aref
= &group
->refs
; *aref
; aref
= &(*aref
)->next
)
289 /* It does not have to be possible for write reference to reuse the read
290 prefetch, or vice versa. */
291 if (!WRITE_CAN_USE_READ_PREFETCH
293 && !(*aref
)->write_p
)
295 if (!READ_CAN_USE_WRITE_PREFETCH
300 if ((*aref
)->delta
== delta
)
304 (*aref
) = XNEW (struct mem_ref
);
305 (*aref
)->stmt
= stmt
;
307 (*aref
)->delta
= delta
;
308 (*aref
)->write_p
= write_p
;
309 (*aref
)->prefetch_before
= PREFETCH_ALL
;
310 (*aref
)->prefetch_mod
= 1;
311 (*aref
)->reuse_distance
= 0;
312 (*aref
)->issue_prefetch_p
= false;
313 (*aref
)->group
= group
;
314 (*aref
)->next
= NULL
;
315 (*aref
)->independent_p
= false;
316 (*aref
)->storent_p
= false;
318 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
319 dump_mem_ref (dump_file
, *aref
);
322 /* Release memory references in GROUPS. */
325 release_mem_refs (struct mem_ref_group
*groups
)
327 struct mem_ref_group
*next_g
;
328 struct mem_ref
*ref
, *next_r
;
330 for (; groups
; groups
= next_g
)
332 next_g
= groups
->next
;
333 for (ref
= groups
->refs
; ref
; ref
= next_r
)
342 /* A structure used to pass arguments to idx_analyze_ref. */
346 struct loop
*loop
; /* Loop of the reference. */
347 gimple stmt
; /* Statement of the reference. */
348 HOST_WIDE_INT
*step
; /* Step of the memory reference. */
349 HOST_WIDE_INT
*delta
; /* Offset of the memory reference. */
352 /* Analyzes a single INDEX of a memory reference to obtain information
353 described at analyze_ref. Callback for for_each_index. */
356 idx_analyze_ref (tree base
, tree
*index
, void *data
)
358 struct ar_data
*ar_data
= (struct ar_data
*) data
;
359 tree ibase
, step
, stepsize
;
360 HOST_WIDE_INT istep
, idelta
= 0, imult
= 1;
363 if (TREE_CODE (base
) == MISALIGNED_INDIRECT_REF
364 || TREE_CODE (base
) == ALIGN_INDIRECT_REF
)
367 if (!simple_iv (ar_data
->loop
, loop_containing_stmt (ar_data
->stmt
),
373 if (!cst_and_fits_in_hwi (step
))
375 istep
= int_cst_value (step
);
377 if (TREE_CODE (ibase
) == POINTER_PLUS_EXPR
378 && cst_and_fits_in_hwi (TREE_OPERAND (ibase
, 1)))
380 idelta
= int_cst_value (TREE_OPERAND (ibase
, 1));
381 ibase
= TREE_OPERAND (ibase
, 0);
383 if (cst_and_fits_in_hwi (ibase
))
385 idelta
+= int_cst_value (ibase
);
386 ibase
= build_int_cst (TREE_TYPE (ibase
), 0);
389 if (TREE_CODE (base
) == ARRAY_REF
)
391 stepsize
= array_ref_element_size (base
);
392 if (!cst_and_fits_in_hwi (stepsize
))
394 imult
= int_cst_value (stepsize
);
400 *ar_data
->step
+= istep
;
401 *ar_data
->delta
+= idelta
;
407 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
408 STEP are integer constants and iter is number of iterations of LOOP. The
409 reference occurs in statement STMT. Strips nonaddressable component
410 references from REF_P. */
413 analyze_ref (struct loop
*loop
, tree
*ref_p
, tree
*base
,
414 HOST_WIDE_INT
*step
, HOST_WIDE_INT
*delta
,
417 struct ar_data ar_data
;
419 HOST_WIDE_INT bit_offset
;
425 /* First strip off the component references. Ignore bitfields. */
426 if (TREE_CODE (ref
) == COMPONENT_REF
427 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref
, 1)))
428 ref
= TREE_OPERAND (ref
, 0);
432 for (; TREE_CODE (ref
) == COMPONENT_REF
; ref
= TREE_OPERAND (ref
, 0))
434 off
= DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref
, 1));
435 bit_offset
= TREE_INT_CST_LOW (off
);
436 gcc_assert (bit_offset
% BITS_PER_UNIT
== 0);
438 *delta
+= bit_offset
/ BITS_PER_UNIT
;
441 *base
= unshare_expr (ref
);
445 ar_data
.delta
= delta
;
446 return for_each_index (base
, idx_analyze_ref
, &ar_data
);
449 /* Record a memory reference REF to the list REFS. The reference occurs in
450 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
451 reference was recorded, false otherwise. */
454 gather_memory_references_ref (struct loop
*loop
, struct mem_ref_group
**refs
,
455 tree ref
, bool write_p
, gimple stmt
)
458 HOST_WIDE_INT step
, delta
;
459 struct mem_ref_group
*agrp
;
461 if (get_base_address (ref
) == NULL
)
464 if (!analyze_ref (loop
, &ref
, &base
, &step
, &delta
, stmt
))
467 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
468 are integer constants. */
469 agrp
= find_or_create_group (refs
, base
, step
);
470 record_ref (agrp
, stmt
, ref
, delta
, write_p
);
475 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
476 true if there are no other memory references inside the loop. */
478 static struct mem_ref_group
*
479 gather_memory_references (struct loop
*loop
, bool *no_other_refs
)
481 basic_block
*body
= get_loop_body_in_dom_order (loop
);
484 gimple_stmt_iterator bsi
;
487 struct mem_ref_group
*refs
= NULL
;
489 *no_other_refs
= true;
491 /* Scan the loop body in order, so that the former references precede the
493 for (i
= 0; i
< loop
->num_nodes
; i
++)
496 if (bb
->loop_father
!= loop
)
499 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
501 stmt
= gsi_stmt (bsi
);
503 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
505 if (gimple_vuse (stmt
)
506 || (is_gimple_call (stmt
)
507 && !(gimple_call_flags (stmt
) & ECF_CONST
)))
508 *no_other_refs
= false;
512 lhs
= gimple_assign_lhs (stmt
);
513 rhs
= gimple_assign_rhs1 (stmt
);
515 if (REFERENCE_CLASS_P (rhs
))
516 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
518 if (REFERENCE_CLASS_P (lhs
))
519 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
528 /* Prune the prefetch candidate REF using the self-reuse. */
531 prune_ref_by_self_reuse (struct mem_ref
*ref
)
533 HOST_WIDE_INT step
= ref
->group
->step
;
534 bool backward
= step
< 0;
538 /* Prefetch references to invariant address just once. */
539 ref
->prefetch_before
= 1;
546 if (step
> PREFETCH_BLOCK
)
549 if ((backward
&& HAVE_BACKWARD_PREFETCH
)
550 || (!backward
&& HAVE_FORWARD_PREFETCH
))
552 ref
->prefetch_before
= 1;
556 ref
->prefetch_mod
= PREFETCH_BLOCK
/ step
;
559 /* Divides X by BY, rounding down. */
562 ddown (HOST_WIDE_INT x
, unsigned HOST_WIDE_INT by
)
569 return (x
+ by
- 1) / by
;
572 /* Prune the prefetch candidate REF using the reuse with BY.
573 If BY_IS_BEFORE is true, BY is before REF in the loop. */
576 prune_ref_by_group_reuse (struct mem_ref
*ref
, struct mem_ref
*by
,
579 HOST_WIDE_INT step
= ref
->group
->step
;
580 bool backward
= step
< 0;
581 HOST_WIDE_INT delta_r
= ref
->delta
, delta_b
= by
->delta
;
582 HOST_WIDE_INT delta
= delta_b
- delta_r
;
583 HOST_WIDE_INT hit_from
;
584 unsigned HOST_WIDE_INT prefetch_before
, prefetch_block
;
588 /* If the references has the same address, only prefetch the
591 ref
->prefetch_before
= 0;
598 /* If the reference addresses are invariant and fall into the
599 same cache line, prefetch just the first one. */
603 if (ddown (ref
->delta
, PREFETCH_BLOCK
)
604 != ddown (by
->delta
, PREFETCH_BLOCK
))
607 ref
->prefetch_before
= 0;
611 /* Only prune the reference that is behind in the array. */
617 /* Transform the data so that we may assume that the accesses
621 delta_r
= PREFETCH_BLOCK
- 1 - delta_r
;
622 delta_b
= PREFETCH_BLOCK
- 1 - delta_b
;
630 /* Check whether the two references are likely to hit the same cache
631 line, and how distant the iterations in that it occurs are from
634 if (step
<= PREFETCH_BLOCK
)
636 /* The accesses are sure to meet. Let us check when. */
637 hit_from
= ddown (delta_b
, PREFETCH_BLOCK
) * PREFETCH_BLOCK
;
638 prefetch_before
= (hit_from
- delta_r
+ step
- 1) / step
;
640 if (prefetch_before
< ref
->prefetch_before
)
641 ref
->prefetch_before
= prefetch_before
;
646 /* A more complicated case. First let us ensure that size of cache line
647 and step are coprime (here we assume that PREFETCH_BLOCK is a power
649 prefetch_block
= PREFETCH_BLOCK
;
650 while ((step
& 1) == 0
651 && prefetch_block
> 1)
654 prefetch_block
>>= 1;
658 /* Now step > prefetch_block, and step and prefetch_block are coprime.
659 Determine the probability that the accesses hit the same cache line. */
661 prefetch_before
= delta
/ step
;
663 if ((unsigned HOST_WIDE_INT
) delta
664 <= (prefetch_block
* ACCEPTABLE_MISS_RATE
/ 1000))
666 if (prefetch_before
< ref
->prefetch_before
)
667 ref
->prefetch_before
= prefetch_before
;
672 /* Try also the following iteration. */
674 delta
= step
- delta
;
675 if ((unsigned HOST_WIDE_INT
) delta
676 <= (prefetch_block
* ACCEPTABLE_MISS_RATE
/ 1000))
678 if (prefetch_before
< ref
->prefetch_before
)
679 ref
->prefetch_before
= prefetch_before
;
684 /* The ref probably does not reuse by. */
688 /* Prune the prefetch candidate REF using the reuses with other references
692 prune_ref_by_reuse (struct mem_ref
*ref
, struct mem_ref
*refs
)
694 struct mem_ref
*prune_by
;
697 prune_ref_by_self_reuse (ref
);
699 for (prune_by
= refs
; prune_by
; prune_by
= prune_by
->next
)
707 if (!WRITE_CAN_USE_READ_PREFETCH
709 && !prune_by
->write_p
)
711 if (!READ_CAN_USE_WRITE_PREFETCH
713 && prune_by
->write_p
)
716 prune_ref_by_group_reuse (ref
, prune_by
, before
);
720 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
723 prune_group_by_reuse (struct mem_ref_group
*group
)
725 struct mem_ref
*ref_pruned
;
727 for (ref_pruned
= group
->refs
; ref_pruned
; ref_pruned
= ref_pruned
->next
)
729 prune_ref_by_reuse (ref_pruned
, group
->refs
);
731 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
733 fprintf (dump_file
, "Reference %p:", (void *) ref_pruned
);
735 if (ref_pruned
->prefetch_before
== PREFETCH_ALL
736 && ref_pruned
->prefetch_mod
== 1)
737 fprintf (dump_file
, " no restrictions");
738 else if (ref_pruned
->prefetch_before
== 0)
739 fprintf (dump_file
, " do not prefetch");
740 else if (ref_pruned
->prefetch_before
<= ref_pruned
->prefetch_mod
)
741 fprintf (dump_file
, " prefetch once");
744 if (ref_pruned
->prefetch_before
!= PREFETCH_ALL
)
746 fprintf (dump_file
, " prefetch before ");
747 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
748 ref_pruned
->prefetch_before
);
750 if (ref_pruned
->prefetch_mod
!= 1)
752 fprintf (dump_file
, " prefetch mod ");
753 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
754 ref_pruned
->prefetch_mod
);
757 fprintf (dump_file
, "\n");
762 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
765 prune_by_reuse (struct mem_ref_group
*groups
)
767 for (; groups
; groups
= groups
->next
)
768 prune_group_by_reuse (groups
);
771 /* Returns true if we should issue prefetch for REF. */
774 should_issue_prefetch_p (struct mem_ref
*ref
)
776 /* For now do not issue prefetches for only first few of the
778 if (ref
->prefetch_before
!= PREFETCH_ALL
)
781 /* Do not prefetch nontemporal stores. */
788 /* Decide which of the prefetch candidates in GROUPS to prefetch.
789 AHEAD is the number of iterations to prefetch ahead (which corresponds
790 to the number of simultaneous instances of one prefetch running at a
791 time). UNROLL_FACTOR is the factor by that the loop is going to be
792 unrolled. Returns true if there is anything to prefetch. */
795 schedule_prefetches (struct mem_ref_group
*groups
, unsigned unroll_factor
,
798 unsigned remaining_prefetch_slots
, n_prefetches
, prefetch_slots
;
799 unsigned slots_per_prefetch
;
803 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
804 remaining_prefetch_slots
= SIMULTANEOUS_PREFETCHES
;
806 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
807 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
808 it will need a prefetch slot. */
809 slots_per_prefetch
= (ahead
+ unroll_factor
/ 2) / unroll_factor
;
810 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
811 fprintf (dump_file
, "Each prefetch instruction takes %u prefetch slots.\n",
814 /* For now we just take memory references one by one and issue
815 prefetches for as many as possible. The groups are sorted
816 starting with the largest step, since the references with
817 large step are more likely to cause many cache misses. */
819 for (; groups
; groups
= groups
->next
)
820 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
822 if (!should_issue_prefetch_p (ref
))
825 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
826 and we unroll the loop UNROLL_FACTOR times, we need to insert
827 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
829 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
830 / ref
->prefetch_mod
);
831 prefetch_slots
= n_prefetches
* slots_per_prefetch
;
833 /* If more than half of the prefetches would be lost anyway, do not
834 issue the prefetch. */
835 if (2 * remaining_prefetch_slots
< prefetch_slots
)
838 ref
->issue_prefetch_p
= true;
840 if (remaining_prefetch_slots
<= prefetch_slots
)
842 remaining_prefetch_slots
-= prefetch_slots
;
849 /* Determine whether there is any reference suitable for prefetching
853 anything_to_prefetch_p (struct mem_ref_group
*groups
)
857 for (; groups
; groups
= groups
->next
)
858 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
859 if (should_issue_prefetch_p (ref
))
865 /* Issue prefetches for the reference REF into loop as decided before.
866 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
867 is the factor by which LOOP was unrolled. */
870 issue_prefetch_ref (struct mem_ref
*ref
, unsigned unroll_factor
, unsigned ahead
)
873 tree addr
, addr_base
, write_p
, local
;
875 gimple_stmt_iterator bsi
;
876 unsigned n_prefetches
, ap
;
877 bool nontemporal
= ref
->reuse_distance
>= L2_CACHE_SIZE_BYTES
;
879 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
880 fprintf (dump_file
, "Issued%s prefetch for %p.\n",
881 nontemporal
? " nontemporal" : "",
884 bsi
= gsi_for_stmt (ref
->stmt
);
886 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
887 / ref
->prefetch_mod
);
888 addr_base
= build_fold_addr_expr_with_type (ref
->mem
, ptr_type_node
);
889 addr_base
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr_base
),
890 true, NULL
, true, GSI_SAME_STMT
);
891 write_p
= ref
->write_p
? integer_one_node
: integer_zero_node
;
892 local
= build_int_cst (integer_type_node
, nontemporal
? 0 : 3);
894 for (ap
= 0; ap
< n_prefetches
; ap
++)
896 /* Determine the address to prefetch. */
897 delta
= (ahead
+ ap
* ref
->prefetch_mod
) * ref
->group
->step
;
898 addr
= fold_build2 (POINTER_PLUS_EXPR
, ptr_type_node
,
899 addr_base
, size_int (delta
));
900 addr
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr
), true, NULL
,
901 true, GSI_SAME_STMT
);
903 /* Create the prefetch instruction. */
904 prefetch
= gimple_build_call (built_in_decls
[BUILT_IN_PREFETCH
],
905 3, addr
, write_p
, local
);
906 gsi_insert_before (&bsi
, prefetch
, GSI_SAME_STMT
);
910 /* Issue prefetches for the references in GROUPS into loop as decided before.
911 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
912 factor by that LOOP was unrolled. */
915 issue_prefetches (struct mem_ref_group
*groups
,
916 unsigned unroll_factor
, unsigned ahead
)
920 for (; groups
; groups
= groups
->next
)
921 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
922 if (ref
->issue_prefetch_p
)
923 issue_prefetch_ref (ref
, unroll_factor
, ahead
);
926 /* Returns true if REF is a memory write for that a nontemporal store insn
930 nontemporal_store_p (struct mem_ref
*ref
)
932 enum machine_mode mode
;
935 /* REF must be a write that is not reused. We require it to be independent
936 on all other memory references in the loop, as the nontemporal stores may
937 be reordered with respect to other memory references. */
939 || !ref
->independent_p
940 || ref
->reuse_distance
< L2_CACHE_SIZE_BYTES
)
943 /* Check that we have the storent instruction for the mode. */
944 mode
= TYPE_MODE (TREE_TYPE (ref
->mem
));
948 code
= optab_handler (storent_optab
, mode
)->insn_code
;
949 return code
!= CODE_FOR_nothing
;
952 /* If REF is a nontemporal store, we mark the corresponding modify statement
953 and return true. Otherwise, we return false. */
956 mark_nontemporal_store (struct mem_ref
*ref
)
958 if (!nontemporal_store_p (ref
))
961 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
962 fprintf (dump_file
, "Marked reference %p as a nontemporal store.\n",
965 gimple_assign_set_nontemporal_move (ref
->stmt
, true);
966 ref
->storent_p
= true;
971 /* Issue a memory fence instruction after LOOP. */
974 emit_mfence_after_loop (struct loop
*loop
)
976 VEC (edge
, heap
) *exits
= get_loop_exit_edges (loop
);
979 gimple_stmt_iterator bsi
;
982 for (i
= 0; VEC_iterate (edge
, exits
, i
, exit
); i
++)
984 call
= gimple_build_call (FENCE_FOLLOWING_MOVNT
, 0);
986 if (!single_pred_p (exit
->dest
)
987 /* If possible, we prefer not to insert the fence on other paths
989 && !(exit
->flags
& EDGE_ABNORMAL
))
990 split_loop_exit_edge (exit
);
991 bsi
= gsi_after_labels (exit
->dest
);
993 gsi_insert_before (&bsi
, call
, GSI_NEW_STMT
);
994 mark_virtual_ops_for_renaming (call
);
997 VEC_free (edge
, heap
, exits
);
998 update_ssa (TODO_update_ssa_only_virtuals
);
1001 /* Returns true if we can use storent in loop, false otherwise. */
1004 may_use_storent_in_loop_p (struct loop
*loop
)
1008 if (loop
->inner
!= NULL
)
1011 /* If we must issue a mfence insn after using storent, check that there
1012 is a suitable place for it at each of the loop exits. */
1013 if (FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1015 VEC (edge
, heap
) *exits
= get_loop_exit_edges (loop
);
1019 for (i
= 0; VEC_iterate (edge
, exits
, i
, exit
); i
++)
1020 if ((exit
->flags
& EDGE_ABNORMAL
)
1021 && exit
->dest
== EXIT_BLOCK_PTR
)
1024 VEC_free (edge
, heap
, exits
);
1030 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1031 references in the loop. */
1034 mark_nontemporal_stores (struct loop
*loop
, struct mem_ref_group
*groups
)
1036 struct mem_ref
*ref
;
1039 if (!may_use_storent_in_loop_p (loop
))
1042 for (; groups
; groups
= groups
->next
)
1043 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1044 any
|= mark_nontemporal_store (ref
);
1046 if (any
&& FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1047 emit_mfence_after_loop (loop
);
1050 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1051 this is the case, fill in DESC by the description of number of
1055 should_unroll_loop_p (struct loop
*loop
, struct tree_niter_desc
*desc
,
1058 if (!can_unroll_loop_p (loop
, factor
, desc
))
1061 /* We only consider loops without control flow for unrolling. This is not
1062 a hard restriction -- tree_unroll_loop works with arbitrary loops
1063 as well; but the unrolling/prefetching is usually more profitable for
1064 loops consisting of a single basic block, and we want to limit the
1066 if (loop
->num_nodes
> 2)
1072 /* Determine the coefficient by that unroll LOOP, from the information
1073 contained in the list of memory references REFS. Description of
1074 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1075 insns of the LOOP. EST_NITER is the estimated number of iterations of
1076 the loop, or -1 if no estimate is available. */
1079 determine_unroll_factor (struct loop
*loop
, struct mem_ref_group
*refs
,
1080 unsigned ninsns
, struct tree_niter_desc
*desc
,
1081 HOST_WIDE_INT est_niter
)
1083 unsigned upper_bound
;
1084 unsigned nfactor
, factor
, mod_constraint
;
1085 struct mem_ref_group
*agp
;
1086 struct mem_ref
*ref
;
1088 /* First check whether the loop is not too large to unroll. We ignore
1089 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1090 from unrolling them enough to make exactly one cache line covered by each
1091 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1092 us from unrolling the loops too many times in cases where we only expect
1093 gains from better scheduling and decreasing loop overhead, which is not
1095 upper_bound
= PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS
) / ninsns
;
1097 /* If we unrolled the loop more times than it iterates, the unrolled version
1098 of the loop would be never entered. */
1099 if (est_niter
>= 0 && est_niter
< (HOST_WIDE_INT
) upper_bound
)
1100 upper_bound
= est_niter
;
1102 if (upper_bound
<= 1)
1105 /* Choose the factor so that we may prefetch each cache just once,
1106 but bound the unrolling by UPPER_BOUND. */
1108 for (agp
= refs
; agp
; agp
= agp
->next
)
1109 for (ref
= agp
->refs
; ref
; ref
= ref
->next
)
1110 if (should_issue_prefetch_p (ref
))
1112 mod_constraint
= ref
->prefetch_mod
;
1113 nfactor
= least_common_multiple (mod_constraint
, factor
);
1114 if (nfactor
<= upper_bound
)
1118 if (!should_unroll_loop_p (loop
, desc
, factor
))
1124 /* Returns the total volume of the memory references REFS, taking into account
1125 reuses in the innermost loop and cache line size. TODO -- we should also
1126 take into account reuses across the iterations of the loops in the loop
1130 volume_of_references (struct mem_ref_group
*refs
)
1132 unsigned volume
= 0;
1133 struct mem_ref_group
*gr
;
1134 struct mem_ref
*ref
;
1136 for (gr
= refs
; gr
; gr
= gr
->next
)
1137 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1139 /* Almost always reuses another value? */
1140 if (ref
->prefetch_before
!= PREFETCH_ALL
)
1143 /* If several iterations access the same cache line, use the size of
1144 the line divided by this number. Otherwise, a cache line is
1145 accessed in each iteration. TODO -- in the latter case, we should
1146 take the size of the reference into account, rounding it up on cache
1147 line size multiple. */
1148 volume
+= L1_CACHE_LINE_SIZE
/ ref
->prefetch_mod
;
1153 /* Returns the volume of memory references accessed across VEC iterations of
1154 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1155 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1158 volume_of_dist_vector (lambda_vector vec
, unsigned *loop_sizes
, unsigned n
)
1162 for (i
= 0; i
< n
; i
++)
1169 gcc_assert (vec
[i
] > 0);
1171 /* We ignore the parts of the distance vector in subloops, since usually
1172 the numbers of iterations are much smaller. */
1173 return loop_sizes
[i
] * vec
[i
];
1176 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1177 at the position corresponding to the loop of the step. N is the depth
1178 of the considered loop nest, and, LOOP is its innermost loop. */
1181 add_subscript_strides (tree access_fn
, unsigned stride
,
1182 HOST_WIDE_INT
*strides
, unsigned n
, struct loop
*loop
)
1186 HOST_WIDE_INT astep
;
1187 unsigned min_depth
= loop_depth (loop
) - n
;
1189 while (TREE_CODE (access_fn
) == POLYNOMIAL_CHREC
)
1191 aloop
= get_chrec_loop (access_fn
);
1192 step
= CHREC_RIGHT (access_fn
);
1193 access_fn
= CHREC_LEFT (access_fn
);
1195 if ((unsigned) loop_depth (aloop
) <= min_depth
)
1198 if (host_integerp (step
, 0))
1199 astep
= tree_low_cst (step
, 0);
1201 astep
= L1_CACHE_LINE_SIZE
;
1203 strides
[n
- 1 - loop_depth (loop
) + loop_depth (aloop
)] += astep
* stride
;
1208 /* Returns the volume of memory references accessed between two consecutive
1209 self-reuses of the reference DR. We consider the subscripts of DR in N
1210 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1211 loops. LOOP is the innermost loop of the current loop nest. */
1214 self_reuse_distance (data_reference_p dr
, unsigned *loop_sizes
, unsigned n
,
1217 tree stride
, access_fn
;
1218 HOST_WIDE_INT
*strides
, astride
;
1219 VEC (tree
, heap
) *access_fns
;
1220 tree ref
= DR_REF (dr
);
1221 unsigned i
, ret
= ~0u;
1223 /* In the following example:
1225 for (i = 0; i < N; i++)
1226 for (j = 0; j < N; j++)
1228 the same cache line is accessed each N steps (except if the change from
1229 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1230 we cannot rely purely on the results of the data dependence analysis.
1232 Instead, we compute the stride of the reference in each loop, and consider
1233 the innermost loop in that the stride is less than cache size. */
1235 strides
= XCNEWVEC (HOST_WIDE_INT
, n
);
1236 access_fns
= DR_ACCESS_FNS (dr
);
1238 for (i
= 0; VEC_iterate (tree
, access_fns
, i
, access_fn
); i
++)
1240 /* Keep track of the reference corresponding to the subscript, so that we
1242 while (handled_component_p (ref
) && TREE_CODE (ref
) != ARRAY_REF
)
1243 ref
= TREE_OPERAND (ref
, 0);
1245 if (TREE_CODE (ref
) == ARRAY_REF
)
1247 stride
= TYPE_SIZE_UNIT (TREE_TYPE (ref
));
1248 if (host_integerp (stride
, 1))
1249 astride
= tree_low_cst (stride
, 1);
1251 astride
= L1_CACHE_LINE_SIZE
;
1253 ref
= TREE_OPERAND (ref
, 0);
1258 add_subscript_strides (access_fn
, astride
, strides
, n
, loop
);
1261 for (i
= n
; i
-- > 0; )
1263 unsigned HOST_WIDE_INT s
;
1265 s
= strides
[i
] < 0 ? -strides
[i
] : strides
[i
];
1267 if (s
< (unsigned) L1_CACHE_LINE_SIZE
1269 > (unsigned) (L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)))
1271 ret
= loop_sizes
[i
];
1280 /* Determines the distance till the first reuse of each reference in REFS
1281 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1282 memory references in the loop. */
1285 determine_loop_nest_reuse (struct loop
*loop
, struct mem_ref_group
*refs
,
1288 struct loop
*nest
, *aloop
;
1289 VEC (data_reference_p
, heap
) *datarefs
= NULL
;
1290 VEC (ddr_p
, heap
) *dependences
= NULL
;
1291 struct mem_ref_group
*gr
;
1292 struct mem_ref
*ref
, *refb
;
1293 VEC (loop_p
, heap
) *vloops
= NULL
;
1294 unsigned *loop_data_size
;
1296 unsigned volume
, dist
, adist
;
1298 data_reference_p dr
;
1304 /* Find the outermost loop of the loop nest of loop (we require that
1305 there are no sibling loops inside the nest). */
1309 aloop
= loop_outer (nest
);
1311 if (aloop
== current_loops
->tree_root
1312 || aloop
->inner
->next
)
1318 /* For each loop, determine the amount of data accessed in each iteration.
1319 We use this to estimate whether the reference is evicted from the
1320 cache before its reuse. */
1321 find_loop_nest (nest
, &vloops
);
1322 n
= VEC_length (loop_p
, vloops
);
1323 loop_data_size
= XNEWVEC (unsigned, n
);
1324 volume
= volume_of_references (refs
);
1328 loop_data_size
[i
] = volume
;
1329 /* Bound the volume by the L2 cache size, since above this bound,
1330 all dependence distances are equivalent. */
1331 if (volume
> L2_CACHE_SIZE_BYTES
)
1334 aloop
= VEC_index (loop_p
, vloops
, i
);
1335 vol
= estimated_loop_iterations_int (aloop
, false);
1337 vol
= expected_loop_iterations (aloop
);
1341 /* Prepare the references in the form suitable for data dependence
1342 analysis. We ignore unanalyzable data references (the results
1343 are used just as a heuristics to estimate temporality of the
1344 references, hence we do not need to worry about correctness). */
1345 for (gr
= refs
; gr
; gr
= gr
->next
)
1346 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1348 dr
= create_data_ref (nest
, ref
->mem
, ref
->stmt
, !ref
->write_p
);
1352 ref
->reuse_distance
= volume
;
1354 VEC_safe_push (data_reference_p
, heap
, datarefs
, dr
);
1357 no_other_refs
= false;
1360 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1362 dist
= self_reuse_distance (dr
, loop_data_size
, n
, loop
);
1363 ref
= (struct mem_ref
*) dr
->aux
;
1364 if (ref
->reuse_distance
> dist
)
1365 ref
->reuse_distance
= dist
;
1368 ref
->independent_p
= true;
1371 compute_all_dependences (datarefs
, &dependences
, vloops
, true);
1373 for (i
= 0; VEC_iterate (ddr_p
, dependences
, i
, dep
); i
++)
1375 if (DDR_ARE_DEPENDENT (dep
) == chrec_known
)
1378 ref
= (struct mem_ref
*) DDR_A (dep
)->aux
;
1379 refb
= (struct mem_ref
*) DDR_B (dep
)->aux
;
1381 if (DDR_ARE_DEPENDENT (dep
) == chrec_dont_know
1382 || DDR_NUM_DIST_VECTS (dep
) == 0)
1384 /* If the dependence cannot be analyzed, assume that there might be
1388 ref
->independent_p
= false;
1389 refb
->independent_p
= false;
1393 /* The distance vectors are normalized to be always lexicographically
1394 positive, hence we cannot tell just from them whether DDR_A comes
1395 before DDR_B or vice versa. However, it is not important,
1396 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1397 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1398 in cache (and marking it as nontemporal would not affect
1402 for (j
= 0; j
< DDR_NUM_DIST_VECTS (dep
); j
++)
1404 adist
= volume_of_dist_vector (DDR_DIST_VECT (dep
, j
),
1407 /* If this is a dependence in the innermost loop (i.e., the
1408 distances in all superloops are zero) and it is not
1409 the trivial self-dependence with distance zero, record that
1410 the references are not completely independent. */
1411 if (lambda_vector_zerop (DDR_DIST_VECT (dep
, j
), n
- 1)
1413 || DDR_DIST_VECT (dep
, j
)[n
-1] != 0))
1415 ref
->independent_p
= false;
1416 refb
->independent_p
= false;
1419 /* Ignore accesses closer than
1420 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1421 so that we use nontemporal prefetches e.g. if single memory
1422 location is accessed several times in a single iteration of
1424 if (adist
< L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)
1432 if (ref
->reuse_distance
> dist
)
1433 ref
->reuse_distance
= dist
;
1434 if (refb
->reuse_distance
> dist
)
1435 refb
->reuse_distance
= dist
;
1438 free_dependence_relations (dependences
);
1439 free_data_refs (datarefs
);
1440 free (loop_data_size
);
1442 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1444 fprintf (dump_file
, "Reuse distances:\n");
1445 for (gr
= refs
; gr
; gr
= gr
->next
)
1446 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1447 fprintf (dump_file
, " ref %p distance %u\n",
1448 (void *) ref
, ref
->reuse_distance
);
1452 /* Issue prefetch instructions for array references in LOOP. Returns
1453 true if the LOOP was unrolled. */
1456 loop_prefetch_arrays (struct loop
*loop
)
1458 struct mem_ref_group
*refs
;
1459 unsigned ahead
, ninsns
, time
, unroll_factor
;
1460 HOST_WIDE_INT est_niter
;
1461 struct tree_niter_desc desc
;
1462 bool unrolled
= false, no_other_refs
;
1464 if (optimize_loop_nest_for_size_p (loop
))
1466 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1467 fprintf (dump_file
, " ignored (cold area)\n");
1471 /* Step 1: gather the memory references. */
1472 refs
= gather_memory_references (loop
, &no_other_refs
);
1474 /* Step 2: estimate the reuse effects. */
1475 prune_by_reuse (refs
);
1477 if (!anything_to_prefetch_p (refs
))
1480 determine_loop_nest_reuse (loop
, refs
, no_other_refs
);
1482 /* Step 3: determine the ahead and unroll factor. */
1484 /* FIXME: the time should be weighted by the probabilities of the blocks in
1486 time
= tree_num_loop_insns (loop
, &eni_time_weights
);
1487 ahead
= (PREFETCH_LATENCY
+ time
- 1) / time
;
1488 est_niter
= estimated_loop_iterations_int (loop
, false);
1490 /* The prefetches will run for AHEAD iterations of the original loop. Unless
1491 the loop rolls at least AHEAD times, prefetching the references does not
1493 if (est_niter
>= 0 && est_niter
<= (HOST_WIDE_INT
) ahead
)
1495 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1497 "Not prefetching -- loop estimated to roll only %d times\n",
1502 mark_nontemporal_stores (loop
, refs
);
1504 ninsns
= tree_num_loop_insns (loop
, &eni_size_weights
);
1505 unroll_factor
= determine_unroll_factor (loop
, refs
, ninsns
, &desc
,
1507 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1508 fprintf (dump_file
, "Ahead %d, unroll factor %d\n", ahead
, unroll_factor
);
1510 /* Step 4: what to prefetch? */
1511 if (!schedule_prefetches (refs
, unroll_factor
, ahead
))
1514 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1515 iterations so that we do not issue superfluous prefetches. */
1516 if (unroll_factor
!= 1)
1518 tree_unroll_loop (loop
, unroll_factor
,
1519 single_dom_exit (loop
), &desc
);
1523 /* Step 6: issue the prefetches. */
1524 issue_prefetches (refs
, unroll_factor
, ahead
);
1527 release_mem_refs (refs
);
1531 /* Issue prefetch instructions for array references in loops. */
1534 tree_ssa_prefetch_arrays (void)
1538 bool unrolled
= false;
1542 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1543 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1544 of processor costs and i486 does not have prefetch, but
1545 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1546 || PREFETCH_BLOCK
== 0)
1549 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1551 fprintf (dump_file
, "Prefetching parameters:\n");
1552 fprintf (dump_file
, " simultaneous prefetches: %d\n",
1553 SIMULTANEOUS_PREFETCHES
);
1554 fprintf (dump_file
, " prefetch latency: %d\n", PREFETCH_LATENCY
);
1555 fprintf (dump_file
, " prefetch block size: %d\n", PREFETCH_BLOCK
);
1556 fprintf (dump_file
, " L1 cache size: %d lines, %d kB\n",
1557 L1_CACHE_SIZE_BYTES
/ L1_CACHE_LINE_SIZE
, L1_CACHE_SIZE
);
1558 fprintf (dump_file
, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE
);
1559 fprintf (dump_file
, " L2 cache size: %d kB\n", L2_CACHE_SIZE
);
1560 fprintf (dump_file
, "\n");
1563 initialize_original_copy_tables ();
1565 if (!built_in_decls
[BUILT_IN_PREFETCH
])
1567 tree type
= build_function_type (void_type_node
,
1568 tree_cons (NULL_TREE
,
1569 const_ptr_type_node
,
1571 tree decl
= add_builtin_function ("__builtin_prefetch", type
,
1572 BUILT_IN_PREFETCH
, BUILT_IN_NORMAL
,
1574 DECL_IS_NOVOPS (decl
) = true;
1575 built_in_decls
[BUILT_IN_PREFETCH
] = decl
;
1578 /* We assume that size of cache line is a power of two, so verify this
1580 gcc_assert ((PREFETCH_BLOCK
& (PREFETCH_BLOCK
- 1)) == 0);
1582 FOR_EACH_LOOP (li
, loop
, LI_FROM_INNERMOST
)
1584 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1585 fprintf (dump_file
, "Processing loop %d:\n", loop
->num
);
1587 unrolled
|= loop_prefetch_arrays (loop
);
1589 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1590 fprintf (dump_file
, "\n\n");
1596 todo_flags
|= TODO_cleanup_cfg
;
1599 free_original_copy_tables ();