compiler: don't use sink as parameter in method expression thunk
[official-gcc.git] / gcc / gimple-range-cache.cc
blob5df744184c4c5c0eff2e40eed83cedf7531c115e
1 /* Gimple ranger SSA cache implementation.
2 Copyright (C) 2017-2022 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "gimple-range.h"
31 #include "tree-cfg.h"
32 #include "target.h"
33 #include "attribs.h"
34 #include "gimple-iterator.h"
35 #include "gimple-walk.h"
36 #include "cfganal.h"
38 #define DEBUG_RANGE_CACHE (dump_file \
39 && (param_ranger_debug & RANGER_DEBUG_CACHE))
41 // This class represents the API into a cache of ranges for an SSA_NAME.
42 // Routines must be implemented to set, get, and query if a value is set.
44 class ssa_block_ranges
46 public:
47 ssa_block_ranges (tree t) : m_type (t) { }
48 virtual bool set_bb_range (const_basic_block bb, const vrange &r) = 0;
49 virtual bool get_bb_range (vrange &r, const_basic_block bb) = 0;
50 virtual bool bb_range_p (const_basic_block bb) = 0;
52 void dump(FILE *f);
53 private:
54 tree m_type;
57 // Print the list of known ranges for file F in a nice format.
59 void
60 ssa_block_ranges::dump (FILE *f)
62 basic_block bb;
63 Value_Range r (m_type);
65 FOR_EACH_BB_FN (bb, cfun)
66 if (get_bb_range (r, bb))
68 fprintf (f, "BB%d -> ", bb->index);
69 r.dump (f);
70 fprintf (f, "\n");
74 // This class implements the range cache as a linear vector, indexed by BB.
75 // It caches a varying and undefined range which are used instead of
76 // allocating new ones each time.
78 class sbr_vector : public ssa_block_ranges
80 public:
81 sbr_vector (tree t, vrange_allocator *allocator);
83 virtual bool set_bb_range (const_basic_block bb, const vrange &r) override;
84 virtual bool get_bb_range (vrange &r, const_basic_block bb) override;
85 virtual bool bb_range_p (const_basic_block bb) override;
86 protected:
87 vrange **m_tab; // Non growing vector.
88 int m_tab_size;
89 vrange *m_varying;
90 vrange *m_undefined;
91 tree m_type;
92 vrange_allocator *m_range_allocator;
93 void grow ();
97 // Initialize a block cache for an ssa_name of type T.
99 sbr_vector::sbr_vector (tree t, vrange_allocator *allocator)
100 : ssa_block_ranges (t)
102 gcc_checking_assert (TYPE_P (t));
103 m_type = t;
104 m_range_allocator = allocator;
105 m_tab_size = last_basic_block_for_fn (cfun) + 1;
106 m_tab = static_cast <vrange **>
107 (allocator->alloc (m_tab_size * sizeof (vrange *)));
108 memset (m_tab, 0, m_tab_size * sizeof (vrange *));
110 // Create the cached type range.
111 m_varying = m_range_allocator->alloc_vrange (t);
112 m_undefined = m_range_allocator->alloc_vrange (t);
113 m_varying->set_varying (t);
114 m_undefined->set_undefined ();
117 // Grow the vector when the CFG has increased in size.
119 void
120 sbr_vector::grow ()
122 int curr_bb_size = last_basic_block_for_fn (cfun);
123 gcc_checking_assert (curr_bb_size > m_tab_size);
125 // Increase the max of a)128, b)needed increase * 2, c)10% of current_size.
126 int inc = MAX ((curr_bb_size - m_tab_size) * 2, 128);
127 inc = MAX (inc, curr_bb_size / 10);
128 int new_size = inc + curr_bb_size;
130 // Allocate new memory, copy the old vector and clear the new space.
131 vrange **t = static_cast <vrange **>
132 (m_range_allocator->alloc (new_size * sizeof (vrange *)));
133 memcpy (t, m_tab, m_tab_size * sizeof (vrange *));
134 memset (t + m_tab_size, 0, (new_size - m_tab_size) * sizeof (vrange *));
136 m_tab = t;
137 m_tab_size = new_size;
140 // Set the range for block BB to be R.
142 bool
143 sbr_vector::set_bb_range (const_basic_block bb, const vrange &r)
145 vrange *m;
146 if (bb->index >= m_tab_size)
147 grow ();
148 if (r.varying_p ())
149 m = m_varying;
150 else if (r.undefined_p ())
151 m = m_undefined;
152 else
153 m = m_range_allocator->clone (r);
154 m_tab[bb->index] = m;
155 return true;
158 // Return the range associated with block BB in R. Return false if
159 // there is no range.
161 bool
162 sbr_vector::get_bb_range (vrange &r, const_basic_block bb)
164 if (bb->index >= m_tab_size)
165 return false;
166 vrange *m = m_tab[bb->index];
167 if (m)
169 r = *m;
170 return true;
172 return false;
175 // Return true if a range is present.
177 bool
178 sbr_vector::bb_range_p (const_basic_block bb)
180 if (bb->index < m_tab_size)
181 return m_tab[bb->index] != NULL;
182 return false;
185 // This class implements the on entry cache via a sparse bitmap.
186 // It uses the quad bit routines to access 4 bits at a time.
187 // A value of 0 (the default) means there is no entry, and a value of
188 // 1 thru SBR_NUM represents an element in the m_range vector.
189 // Varying is given the first value (1) and pre-cached.
190 // SBR_NUM + 1 represents the value of UNDEFINED, and is never stored.
191 // SBR_NUM is the number of values that can be cached.
192 // Indexes are 1..SBR_NUM and are stored locally at m_range[0..SBR_NUM-1]
194 #define SBR_NUM 14
195 #define SBR_UNDEF SBR_NUM + 1
196 #define SBR_VARYING 1
198 class sbr_sparse_bitmap : public ssa_block_ranges
200 public:
201 sbr_sparse_bitmap (tree t, vrange_allocator *allocator, bitmap_obstack *bm);
202 virtual bool set_bb_range (const_basic_block bb, const vrange &r) override;
203 virtual bool get_bb_range (vrange &r, const_basic_block bb) override;
204 virtual bool bb_range_p (const_basic_block bb) override;
205 private:
206 void bitmap_set_quad (bitmap head, int quad, int quad_value);
207 int bitmap_get_quad (const_bitmap head, int quad);
208 vrange_allocator *m_range_allocator;
209 vrange *m_range[SBR_NUM];
210 bitmap_head bitvec;
211 tree m_type;
214 // Initialize a block cache for an ssa_name of type T.
216 sbr_sparse_bitmap::sbr_sparse_bitmap (tree t, vrange_allocator *allocator,
217 bitmap_obstack *bm)
218 : ssa_block_ranges (t)
220 gcc_checking_assert (TYPE_P (t));
221 m_type = t;
222 bitmap_initialize (&bitvec, bm);
223 bitmap_tree_view (&bitvec);
224 m_range_allocator = allocator;
225 // Pre-cache varying.
226 m_range[0] = m_range_allocator->alloc_vrange (t);
227 m_range[0]->set_varying (t);
228 // Pre-cache zero and non-zero values for pointers.
229 if (POINTER_TYPE_P (t))
231 m_range[1] = m_range_allocator->alloc_vrange (t);
232 m_range[1]->set_nonzero (t);
233 m_range[2] = m_range_allocator->alloc_vrange (t);
234 m_range[2]->set_zero (t);
236 else
237 m_range[1] = m_range[2] = NULL;
238 // Clear SBR_NUM entries.
239 for (int x = 3; x < SBR_NUM; x++)
240 m_range[x] = 0;
243 // Set 4 bit values in a sparse bitmap. This allows a bitmap to
244 // function as a sparse array of 4 bit values.
245 // QUAD is the index, QUAD_VALUE is the 4 bit value to set.
247 inline void
248 sbr_sparse_bitmap::bitmap_set_quad (bitmap head, int quad, int quad_value)
250 bitmap_set_aligned_chunk (head, quad, 4, (BITMAP_WORD) quad_value);
253 // Get a 4 bit value from a sparse bitmap. This allows a bitmap to
254 // function as a sparse array of 4 bit values.
255 // QUAD is the index.
256 inline int
257 sbr_sparse_bitmap::bitmap_get_quad (const_bitmap head, int quad)
259 return (int) bitmap_get_aligned_chunk (head, quad, 4);
262 // Set the range on entry to basic block BB to R.
264 bool
265 sbr_sparse_bitmap::set_bb_range (const_basic_block bb, const vrange &r)
267 if (r.undefined_p ())
269 bitmap_set_quad (&bitvec, bb->index, SBR_UNDEF);
270 return true;
273 // Loop thru the values to see if R is already present.
274 for (int x = 0; x < SBR_NUM; x++)
275 if (!m_range[x] || r == *(m_range[x]))
277 if (!m_range[x])
278 m_range[x] = m_range_allocator->clone (r);
279 bitmap_set_quad (&bitvec, bb->index, x + 1);
280 return true;
282 // All values are taken, default to VARYING.
283 bitmap_set_quad (&bitvec, bb->index, SBR_VARYING);
284 return false;
287 // Return the range associated with block BB in R. Return false if
288 // there is no range.
290 bool
291 sbr_sparse_bitmap::get_bb_range (vrange &r, const_basic_block bb)
293 int value = bitmap_get_quad (&bitvec, bb->index);
295 if (!value)
296 return false;
298 gcc_checking_assert (value <= SBR_UNDEF);
299 if (value == SBR_UNDEF)
300 r.set_undefined ();
301 else
302 r = *(m_range[value - 1]);
303 return true;
306 // Return true if a range is present.
308 bool
309 sbr_sparse_bitmap::bb_range_p (const_basic_block bb)
311 return (bitmap_get_quad (&bitvec, bb->index) != 0);
314 // -------------------------------------------------------------------------
316 // Initialize the block cache.
318 block_range_cache::block_range_cache ()
320 bitmap_obstack_initialize (&m_bitmaps);
321 m_ssa_ranges.create (0);
322 m_ssa_ranges.safe_grow_cleared (num_ssa_names);
323 m_range_allocator = new vrange_allocator;
326 // Remove any m_block_caches which have been created.
328 block_range_cache::~block_range_cache ()
330 delete m_range_allocator;
331 // Release the vector itself.
332 m_ssa_ranges.release ();
333 bitmap_obstack_release (&m_bitmaps);
336 // Set the range for NAME on entry to block BB to R.
337 // If it has not been accessed yet, allocate it first.
339 bool
340 block_range_cache::set_bb_range (tree name, const_basic_block bb,
341 const vrange &r)
343 unsigned v = SSA_NAME_VERSION (name);
344 if (v >= m_ssa_ranges.length ())
345 m_ssa_ranges.safe_grow_cleared (num_ssa_names + 1);
347 if (!m_ssa_ranges[v])
349 // Use sparse representation if there are too many basic blocks.
350 if (last_basic_block_for_fn (cfun) > param_evrp_sparse_threshold)
352 void *r = m_range_allocator->alloc (sizeof (sbr_sparse_bitmap));
353 m_ssa_ranges[v] = new (r) sbr_sparse_bitmap (TREE_TYPE (name),
354 m_range_allocator,
355 &m_bitmaps);
357 else
359 // Otherwise use the default vector implemntation.
360 void *r = m_range_allocator->alloc (sizeof (sbr_vector));
361 m_ssa_ranges[v] = new (r) sbr_vector (TREE_TYPE (name),
362 m_range_allocator);
365 return m_ssa_ranges[v]->set_bb_range (bb, r);
369 // Return a pointer to the ssa_block_cache for NAME. If it has not been
370 // accessed yet, return NULL.
372 inline ssa_block_ranges *
373 block_range_cache::query_block_ranges (tree name)
375 unsigned v = SSA_NAME_VERSION (name);
376 if (v >= m_ssa_ranges.length () || !m_ssa_ranges[v])
377 return NULL;
378 return m_ssa_ranges[v];
383 // Return the range for NAME on entry to BB in R. Return true if there
384 // is one.
386 bool
387 block_range_cache::get_bb_range (vrange &r, tree name, const_basic_block bb)
389 ssa_block_ranges *ptr = query_block_ranges (name);
390 if (ptr)
391 return ptr->get_bb_range (r, bb);
392 return false;
395 // Return true if NAME has a range set in block BB.
397 bool
398 block_range_cache::bb_range_p (tree name, const_basic_block bb)
400 ssa_block_ranges *ptr = query_block_ranges (name);
401 if (ptr)
402 return ptr->bb_range_p (bb);
403 return false;
406 // Print all known block caches to file F.
408 void
409 block_range_cache::dump (FILE *f)
411 unsigned x;
412 for (x = 0; x < m_ssa_ranges.length (); ++x)
414 if (m_ssa_ranges[x])
416 fprintf (f, " Ranges for ");
417 print_generic_expr (f, ssa_name (x), TDF_NONE);
418 fprintf (f, ":\n");
419 m_ssa_ranges[x]->dump (f);
420 fprintf (f, "\n");
425 // Print all known ranges on entry to blobk BB to file F.
427 void
428 block_range_cache::dump (FILE *f, basic_block bb, bool print_varying)
430 unsigned x;
431 bool summarize_varying = false;
432 for (x = 1; x < m_ssa_ranges.length (); ++x)
434 if (!gimple_range_ssa_p (ssa_name (x)))
435 continue;
437 Value_Range r (TREE_TYPE (ssa_name (x)));
438 if (m_ssa_ranges[x] && m_ssa_ranges[x]->get_bb_range (r, bb))
440 if (!print_varying && r.varying_p ())
442 summarize_varying = true;
443 continue;
445 print_generic_expr (f, ssa_name (x), TDF_NONE);
446 fprintf (f, "\t");
447 r.dump(f);
448 fprintf (f, "\n");
451 // If there were any varying entries, lump them all together.
452 if (summarize_varying)
454 fprintf (f, "VARYING_P on entry : ");
455 for (x = 1; x < num_ssa_names; ++x)
457 if (!gimple_range_ssa_p (ssa_name (x)))
458 continue;
460 Value_Range r (TREE_TYPE (ssa_name (x)));
461 if (m_ssa_ranges[x] && m_ssa_ranges[x]->get_bb_range (r, bb))
463 if (r.varying_p ())
465 print_generic_expr (f, ssa_name (x), TDF_NONE);
466 fprintf (f, " ");
470 fprintf (f, "\n");
474 // -------------------------------------------------------------------------
476 // Initialize a global cache.
478 ssa_global_cache::ssa_global_cache ()
480 m_tab.create (0);
481 m_range_allocator = new vrange_allocator;
484 // Deconstruct a global cache.
486 ssa_global_cache::~ssa_global_cache ()
488 m_tab.release ();
489 delete m_range_allocator;
492 // Retrieve the global range of NAME from cache memory if it exists.
493 // Return the value in R.
495 bool
496 ssa_global_cache::get_global_range (vrange &r, tree name) const
498 unsigned v = SSA_NAME_VERSION (name);
499 if (v >= m_tab.length ())
500 return false;
502 vrange *stow = m_tab[v];
503 if (!stow)
504 return false;
505 r = *stow;
506 return true;
509 // Set the range for NAME to R in the global cache.
510 // Return TRUE if there was already a range set, otherwise false.
512 bool
513 ssa_global_cache::set_global_range (tree name, const vrange &r)
515 unsigned v = SSA_NAME_VERSION (name);
516 if (v >= m_tab.length ())
517 m_tab.safe_grow_cleared (num_ssa_names + 1);
519 vrange *m = m_tab[v];
520 if (m && m->fits_p (r))
521 *m = r;
522 else
523 m_tab[v] = m_range_allocator->clone (r);
524 return m != NULL;
527 // Set the range for NAME to R in the glonbal cache.
529 void
530 ssa_global_cache::clear_global_range (tree name)
532 unsigned v = SSA_NAME_VERSION (name);
533 if (v >= m_tab.length ())
534 m_tab.safe_grow_cleared (num_ssa_names + 1);
535 m_tab[v] = NULL;
538 // Clear the global cache.
540 void
541 ssa_global_cache::clear ()
543 if (m_tab.address ())
544 memset (m_tab.address(), 0, m_tab.length () * sizeof (vrange *));
547 // Dump the contents of the global cache to F.
549 void
550 ssa_global_cache::dump (FILE *f)
552 /* Cleared after the table header has been printed. */
553 bool print_header = true;
554 for (unsigned x = 1; x < num_ssa_names; x++)
556 if (!gimple_range_ssa_p (ssa_name (x)))
557 continue;
558 Value_Range r (TREE_TYPE (ssa_name (x)));
559 if (get_global_range (r, ssa_name (x)) && !r.varying_p ())
561 if (print_header)
563 /* Print the header only when there's something else
564 to print below. */
565 fprintf (f, "Non-varying global ranges:\n");
566 fprintf (f, "=========================:\n");
567 print_header = false;
570 print_generic_expr (f, ssa_name (x), TDF_NONE);
571 fprintf (f, " : ");
572 r.dump (f);
573 fprintf (f, "\n");
577 if (!print_header)
578 fputc ('\n', f);
581 // --------------------------------------------------------------------------
584 // This class will manage the timestamps for each ssa_name.
585 // When a value is calculated, the timestamp is set to the current time.
586 // Current time is then incremented. Any dependencies will already have
587 // been calculated, and will thus have older timestamps.
588 // If one of those values is ever calculated again, it will get a newer
589 // timestamp, and the "current_p" check will fail.
591 class temporal_cache
593 public:
594 temporal_cache ();
595 ~temporal_cache ();
596 bool current_p (tree name, tree dep1, tree dep2) const;
597 void set_timestamp (tree name);
598 void set_always_current (tree name);
599 private:
600 unsigned temporal_value (unsigned ssa) const;
602 unsigned m_current_time;
603 vec <unsigned> m_timestamp;
606 inline
607 temporal_cache::temporal_cache ()
609 m_current_time = 1;
610 m_timestamp.create (0);
611 m_timestamp.safe_grow_cleared (num_ssa_names);
614 inline
615 temporal_cache::~temporal_cache ()
617 m_timestamp.release ();
620 // Return the timestamp value for SSA, or 0 if there isnt one.
622 inline unsigned
623 temporal_cache::temporal_value (unsigned ssa) const
625 if (ssa >= m_timestamp.length ())
626 return 0;
627 return m_timestamp[ssa];
630 // Return TRUE if the timestampe for NAME is newer than any of its dependents.
631 // Up to 2 dependencies can be checked.
633 bool
634 temporal_cache::current_p (tree name, tree dep1, tree dep2) const
636 unsigned ts = temporal_value (SSA_NAME_VERSION (name));
637 if (ts == 0)
638 return true;
640 // Any non-registered dependencies will have a value of 0 and thus be older.
641 // Return true if time is newer than either dependent.
643 if (dep1 && ts < temporal_value (SSA_NAME_VERSION (dep1)))
644 return false;
645 if (dep2 && ts < temporal_value (SSA_NAME_VERSION (dep2)))
646 return false;
648 return true;
651 // This increments the global timer and sets the timestamp for NAME.
653 inline void
654 temporal_cache::set_timestamp (tree name)
656 unsigned v = SSA_NAME_VERSION (name);
657 if (v >= m_timestamp.length ())
658 m_timestamp.safe_grow_cleared (num_ssa_names + 20);
659 m_timestamp[v] = ++m_current_time;
662 // Set the timestamp to 0, marking it as "always up to date".
664 inline void
665 temporal_cache::set_always_current (tree name)
667 unsigned v = SSA_NAME_VERSION (name);
668 if (v >= m_timestamp.length ())
669 m_timestamp.safe_grow_cleared (num_ssa_names + 20);
670 m_timestamp[v] = 0;
673 // --------------------------------------------------------------------------
675 // This class provides an abstraction of a list of blocks to be updated
676 // by the cache. It is currently a stack but could be changed. It also
677 // maintains a list of blocks which have failed propagation, and does not
678 // enter any of those blocks into the list.
680 // A vector over the BBs is maintained, and an entry of 0 means it is not in
681 // a list. Otherwise, the entry is the next block in the list. -1 terminates
682 // the list. m_head points to the top of the list, -1 if the list is empty.
684 class update_list
686 public:
687 update_list ();
688 ~update_list ();
689 void add (basic_block bb);
690 basic_block pop ();
691 inline bool empty_p () { return m_update_head == -1; }
692 inline void clear_failures () { bitmap_clear (m_propfail); }
693 inline void propagation_failed (basic_block bb)
694 { bitmap_set_bit (m_propfail, bb->index); }
695 private:
696 vec<int> m_update_list;
697 int m_update_head;
698 bitmap m_propfail;
701 // Create an update list.
703 update_list::update_list ()
705 m_update_list.create (0);
706 m_update_list.safe_grow_cleared (last_basic_block_for_fn (cfun) + 64);
707 m_update_head = -1;
708 m_propfail = BITMAP_ALLOC (NULL);
711 // Destroy an update list.
713 update_list::~update_list ()
715 m_update_list.release ();
716 BITMAP_FREE (m_propfail);
719 // Add BB to the list of blocks to update, unless it's already in the list.
721 void
722 update_list::add (basic_block bb)
724 int i = bb->index;
725 // If propagation has failed for BB, or its already in the list, don't
726 // add it again.
727 if ((unsigned)i >= m_update_list.length ())
728 m_update_list.safe_grow_cleared (i + 64);
729 if (!m_update_list[i] && !bitmap_bit_p (m_propfail, i))
731 if (empty_p ())
733 m_update_head = i;
734 m_update_list[i] = -1;
736 else
738 gcc_checking_assert (m_update_head > 0);
739 m_update_list[i] = m_update_head;
740 m_update_head = i;
745 // Remove a block from the list.
747 basic_block
748 update_list::pop ()
750 gcc_checking_assert (!empty_p ());
751 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, m_update_head);
752 int pop = m_update_head;
753 m_update_head = m_update_list[pop];
754 m_update_list[pop] = 0;
755 return bb;
758 // --------------------------------------------------------------------------
760 ranger_cache::ranger_cache (int not_executable_flag, bool use_imm_uses)
761 : m_gori (not_executable_flag),
762 m_exit (use_imm_uses)
764 m_workback.create (0);
765 m_workback.safe_grow_cleared (last_basic_block_for_fn (cfun));
766 m_workback.truncate (0);
767 m_temporal = new temporal_cache;
768 // If DOM info is available, spawn an oracle as well.
769 if (dom_info_available_p (CDI_DOMINATORS))
770 m_oracle = new dom_oracle ();
771 else
772 m_oracle = NULL;
774 unsigned x, lim = last_basic_block_for_fn (cfun);
775 // Calculate outgoing range info upfront. This will fully populate the
776 // m_maybe_variant bitmap which will help eliminate processing of names
777 // which never have their ranges adjusted.
778 for (x = 0; x < lim ; x++)
780 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, x);
781 if (bb)
782 m_gori.exports (bb);
784 m_update = new update_list ();
787 ranger_cache::~ranger_cache ()
789 delete m_update;
790 if (m_oracle)
791 delete m_oracle;
792 delete m_temporal;
793 m_workback.release ();
796 // Dump the global caches to file F. if GORI_DUMP is true, dump the
797 // gori map as well.
799 void
800 ranger_cache::dump (FILE *f)
802 m_globals.dump (f);
803 fprintf (f, "\n");
806 // Dump the caches for basic block BB to file F.
808 void
809 ranger_cache::dump_bb (FILE *f, basic_block bb)
811 m_gori.gori_map::dump (f, bb, false);
812 m_on_entry.dump (f, bb);
813 if (m_oracle)
814 m_oracle->dump (f, bb);
817 // Get the global range for NAME, and return in R. Return false if the
818 // global range is not set, and return the legacy global value in R.
820 bool
821 ranger_cache::get_global_range (vrange &r, tree name) const
823 if (m_globals.get_global_range (r, name))
824 return true;
825 gimple_range_global (r, name);
826 return false;
829 // Get the global range for NAME, and return in R. Return false if the
830 // global range is not set, and R will contain the legacy global value.
831 // CURRENT_P is set to true if the value was in cache and not stale.
832 // Otherwise, set CURRENT_P to false and mark as it always current.
833 // If the global cache did not have a value, initialize it as well.
834 // After this call, the global cache will have a value.
836 bool
837 ranger_cache::get_global_range (vrange &r, tree name, bool &current_p)
839 bool had_global = get_global_range (r, name);
841 // If there was a global value, set current flag, otherwise set a value.
842 current_p = false;
843 if (had_global)
844 current_p = r.singleton_p ()
845 || m_temporal->current_p (name, m_gori.depend1 (name),
846 m_gori.depend2 (name));
847 else
848 m_globals.set_global_range (name, r);
850 // If the existing value was not current, mark it as always current.
851 if (!current_p)
852 m_temporal->set_always_current (name);
853 return had_global;
856 // Set the global range of NAME to R and give it a timestamp.
858 void
859 ranger_cache::set_global_range (tree name, const vrange &r)
861 if (m_globals.set_global_range (name, r))
863 // If there was already a range set, propagate the new value.
864 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (name));
865 if (!bb)
866 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
868 if (DEBUG_RANGE_CACHE)
869 fprintf (dump_file, " GLOBAL :");
871 propagate_updated_value (name, bb);
873 // Constants no longer need to tracked. Any further refinement has to be
874 // undefined. Propagation works better with constants. PR 100512.
875 // Pointers which resolve to non-zero also do not need
876 // tracking in the cache as they will never change. See PR 98866.
877 // Timestamp must always be updated, or dependent calculations may
878 // not include this latest value. PR 100774.
880 if (r.singleton_p ()
881 || (POINTER_TYPE_P (TREE_TYPE (name)) && r.nonzero_p ()))
882 m_gori.set_range_invariant (name);
883 m_temporal->set_timestamp (name);
886 // Provide lookup for the gori-computes class to access the best known range
887 // of an ssa_name in any given basic block. Note, this does no additonal
888 // lookups, just accesses the data that is already known.
890 // Get the range of NAME when the def occurs in block BB. If BB is NULL
891 // get the best global value available.
893 void
894 ranger_cache::range_of_def (vrange &r, tree name, basic_block bb)
896 gcc_checking_assert (gimple_range_ssa_p (name));
897 gcc_checking_assert (!bb || bb == gimple_bb (SSA_NAME_DEF_STMT (name)));
899 // Pick up the best global range available.
900 if (!m_globals.get_global_range (r, name))
902 // If that fails, try to calculate the range using just global values.
903 gimple *s = SSA_NAME_DEF_STMT (name);
904 if (gimple_get_lhs (s) == name)
905 fold_range (r, s, get_global_range_query ());
906 else
907 gimple_range_global (r, name);
911 // Get the range of NAME as it occurs on entry to block BB. Use MODE for
912 // lookups.
914 void
915 ranger_cache::entry_range (vrange &r, tree name, basic_block bb,
916 enum rfd_mode mode)
918 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
920 gimple_range_global (r, name);
921 return;
924 // Look for the on-entry value of name in BB from the cache.
925 // Otherwise pick up the best available global value.
926 if (!m_on_entry.get_bb_range (r, name, bb))
927 if (!range_from_dom (r, name, bb, mode))
928 range_of_def (r, name);
931 // Get the range of NAME as it occurs on exit from block BB. Use MODE for
932 // lookups.
934 void
935 ranger_cache::exit_range (vrange &r, tree name, basic_block bb,
936 enum rfd_mode mode)
938 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
940 gimple_range_global (r, name);
941 return;
944 gimple *s = SSA_NAME_DEF_STMT (name);
945 basic_block def_bb = gimple_bb (s);
946 if (def_bb == bb)
947 range_of_def (r, name, bb);
948 else
949 entry_range (r, name, bb, mode);
952 // Get the range of NAME on edge E using MODE, return the result in R.
953 // Always returns a range and true.
955 bool
956 ranger_cache::edge_range (vrange &r, edge e, tree name, enum rfd_mode mode)
958 exit_range (r, name, e->src, mode);
959 // If this is not an abnormal edge, check for inferred ranges on exit.
960 if ((e->flags & (EDGE_EH | EDGE_ABNORMAL)) == 0)
961 m_exit.maybe_adjust_range (r, name, e->src);
962 int_range_max er;
963 if (m_gori.outgoing_edge_range_p (er, e, name, *this))
964 r.intersect (er);
965 return true;
970 // Implement range_of_expr.
972 bool
973 ranger_cache::range_of_expr (vrange &r, tree name, gimple *stmt)
975 if (!gimple_range_ssa_p (name))
977 get_tree_range (r, name, stmt);
978 return true;
981 basic_block bb = gimple_bb (stmt);
982 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
983 basic_block def_bb = gimple_bb (def_stmt);
985 if (bb == def_bb)
986 range_of_def (r, name, bb);
987 else
988 entry_range (r, name, bb, RFD_NONE);
989 return true;
993 // Implement range_on_edge. Always return the best available range using
994 // the current cache values.
996 bool
997 ranger_cache::range_on_edge (vrange &r, edge e, tree expr)
999 if (gimple_range_ssa_p (expr))
1000 return edge_range (r, e, expr, RFD_NONE);
1001 return get_tree_range (r, expr, NULL);
1004 // Return a static range for NAME on entry to basic block BB in R. If
1005 // calc is true, fill any cache entries required between BB and the
1006 // def block for NAME. Otherwise, return false if the cache is empty.
1008 bool
1009 ranger_cache::block_range (vrange &r, basic_block bb, tree name, bool calc)
1011 gcc_checking_assert (gimple_range_ssa_p (name));
1013 // If there are no range calculations anywhere in the IL, global range
1014 // applies everywhere, so don't bother caching it.
1015 if (!m_gori.has_edge_range_p (name))
1016 return false;
1018 if (calc)
1020 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1021 basic_block def_bb = NULL;
1022 if (def_stmt)
1023 def_bb = gimple_bb (def_stmt);;
1024 if (!def_bb)
1026 // If we get to the entry block, this better be a default def
1027 // or range_on_entry was called for a block not dominated by
1028 // the def.
1029 gcc_checking_assert (SSA_NAME_IS_DEFAULT_DEF (name));
1030 def_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1033 // There is no range on entry for the definition block.
1034 if (def_bb == bb)
1035 return false;
1037 // Otherwise, go figure out what is known in predecessor blocks.
1038 fill_block_cache (name, bb, def_bb);
1039 gcc_checking_assert (m_on_entry.bb_range_p (name, bb));
1041 return m_on_entry.get_bb_range (r, name, bb);
1044 // If there is anything in the propagation update_list, continue
1045 // processing NAME until the list of blocks is empty.
1047 void
1048 ranger_cache::propagate_cache (tree name)
1050 basic_block bb;
1051 edge_iterator ei;
1052 edge e;
1053 tree type = TREE_TYPE (name);
1054 Value_Range new_range (type);
1055 Value_Range current_range (type);
1056 Value_Range e_range (type);
1058 // Process each block by seeing if its calculated range on entry is
1059 // the same as its cached value. If there is a difference, update
1060 // the cache to reflect the new value, and check to see if any
1061 // successors have cache entries which may need to be checked for
1062 // updates.
1064 while (!m_update->empty_p ())
1066 bb = m_update->pop ();
1067 gcc_checking_assert (m_on_entry.bb_range_p (name, bb));
1068 m_on_entry.get_bb_range (current_range, name, bb);
1070 if (DEBUG_RANGE_CACHE)
1072 fprintf (dump_file, "FWD visiting block %d for ", bb->index);
1073 print_generic_expr (dump_file, name, TDF_SLIM);
1074 fprintf (dump_file, " starting range : ");
1075 current_range.dump (dump_file);
1076 fprintf (dump_file, "\n");
1079 // Calculate the "new" range on entry by unioning the pred edges.
1080 new_range.set_undefined ();
1081 FOR_EACH_EDGE (e, ei, bb->preds)
1083 range_on_edge (e_range, e, name);
1084 if (DEBUG_RANGE_CACHE)
1086 fprintf (dump_file, " edge %d->%d :", e->src->index, bb->index);
1087 e_range.dump (dump_file);
1088 fprintf (dump_file, "\n");
1090 new_range.union_ (e_range);
1091 if (new_range.varying_p ())
1092 break;
1095 // If the range on entry has changed, update it.
1096 if (new_range != current_range)
1098 bool ok_p = m_on_entry.set_bb_range (name, bb, new_range);
1099 // If the cache couldn't set the value, mark it as failed.
1100 if (!ok_p)
1101 m_update->propagation_failed (bb);
1102 if (DEBUG_RANGE_CACHE)
1104 if (!ok_p)
1106 fprintf (dump_file, " Cache failure to store value:");
1107 print_generic_expr (dump_file, name, TDF_SLIM);
1108 fprintf (dump_file, " ");
1110 else
1112 fprintf (dump_file, " Updating range to ");
1113 new_range.dump (dump_file);
1115 fprintf (dump_file, "\n Updating blocks :");
1117 // Mark each successor that has a range to re-check its range
1118 FOR_EACH_EDGE (e, ei, bb->succs)
1119 if (m_on_entry.bb_range_p (name, e->dest))
1121 if (DEBUG_RANGE_CACHE)
1122 fprintf (dump_file, " bb%d",e->dest->index);
1123 m_update->add (e->dest);
1125 if (DEBUG_RANGE_CACHE)
1126 fprintf (dump_file, "\n");
1129 if (DEBUG_RANGE_CACHE)
1131 fprintf (dump_file, "DONE visiting blocks for ");
1132 print_generic_expr (dump_file, name, TDF_SLIM);
1133 fprintf (dump_file, "\n");
1135 m_update->clear_failures ();
1138 // Check to see if an update to the value for NAME in BB has any effect
1139 // on values already in the on-entry cache for successor blocks.
1140 // If it does, update them. Don't visit any blocks which dont have a cache
1141 // entry.
1143 void
1144 ranger_cache::propagate_updated_value (tree name, basic_block bb)
1146 edge e;
1147 edge_iterator ei;
1149 // The update work list should be empty at this point.
1150 gcc_checking_assert (m_update->empty_p ());
1151 gcc_checking_assert (bb);
1153 if (DEBUG_RANGE_CACHE)
1155 fprintf (dump_file, " UPDATE cache for ");
1156 print_generic_expr (dump_file, name, TDF_SLIM);
1157 fprintf (dump_file, " in BB %d : successors : ", bb->index);
1159 FOR_EACH_EDGE (e, ei, bb->succs)
1161 // Only update active cache entries.
1162 if (m_on_entry.bb_range_p (name, e->dest))
1164 m_update->add (e->dest);
1165 if (DEBUG_RANGE_CACHE)
1166 fprintf (dump_file, " UPDATE: bb%d", e->dest->index);
1169 if (!m_update->empty_p ())
1171 if (DEBUG_RANGE_CACHE)
1172 fprintf (dump_file, "\n");
1173 propagate_cache (name);
1175 else
1177 if (DEBUG_RANGE_CACHE)
1178 fprintf (dump_file, " : No updates!\n");
1182 // Make sure that the range-on-entry cache for NAME is set for block BB.
1183 // Work back through the CFG to DEF_BB ensuring the range is calculated
1184 // on the block/edges leading back to that point.
1186 void
1187 ranger_cache::fill_block_cache (tree name, basic_block bb, basic_block def_bb)
1189 edge_iterator ei;
1190 edge e;
1191 Value_Range block_result (TREE_TYPE (name));
1192 Value_Range undefined (TREE_TYPE (name));
1194 // At this point we shouldn't be looking at the def, entry or exit block.
1195 gcc_checking_assert (bb != def_bb && bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) &&
1196 bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
1197 gcc_checking_assert (m_workback.length () == 0);
1199 // If the block cache is set, then we've already visited this block.
1200 if (m_on_entry.bb_range_p (name, bb))
1201 return;
1203 if (DEBUG_RANGE_CACHE)
1205 fprintf (dump_file, "\n");
1206 print_generic_expr (dump_file, name, TDF_SLIM);
1207 fprintf (dump_file, " : ");
1210 // Check if a dominators can supply the range.
1211 if (range_from_dom (block_result, name, bb, RFD_FILL))
1213 m_on_entry.set_bb_range (name, bb, block_result);
1214 if (DEBUG_RANGE_CACHE)
1216 fprintf (dump_file, "Filled from dominator! : ");
1217 block_result.dump (dump_file);
1218 fprintf (dump_file, "\n");
1220 gcc_checking_assert (m_workback.length () == 0);
1221 return;
1224 // Visit each block back to the DEF. Initialize each one to UNDEFINED.
1225 // m_visited at the end will contain all the blocks that we needed to set
1226 // the range_on_entry cache for.
1227 m_workback.quick_push (bb);
1228 undefined.set_undefined ();
1229 m_on_entry.set_bb_range (name, bb, undefined);
1230 gcc_checking_assert (m_update->empty_p ());
1232 while (m_workback.length () > 0)
1234 basic_block node = m_workback.pop ();
1235 if (DEBUG_RANGE_CACHE)
1237 fprintf (dump_file, "BACK visiting block %d for ", node->index);
1238 print_generic_expr (dump_file, name, TDF_SLIM);
1239 fprintf (dump_file, "\n");
1242 FOR_EACH_EDGE (e, ei, node->preds)
1244 basic_block pred = e->src;
1245 Value_Range r (TREE_TYPE (name));
1247 if (DEBUG_RANGE_CACHE)
1248 fprintf (dump_file, " %d->%d ",e->src->index, e->dest->index);
1250 // If the pred block is the def block add this BB to update list.
1251 if (pred == def_bb)
1253 m_update->add (node);
1254 continue;
1257 // If the pred is entry but NOT def, then it is used before
1258 // defined, it'll get set to [] and no need to update it.
1259 if (pred == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1261 if (DEBUG_RANGE_CACHE)
1262 fprintf (dump_file, "entry: bail.");
1263 continue;
1266 // Regardless of whether we have visited pred or not, if the
1267 // pred has inferred ranges, revisit this block.
1268 // Don't search the DOM tree.
1269 if (m_exit.has_range_p (name, pred))
1271 if (DEBUG_RANGE_CACHE)
1272 fprintf (dump_file, "Inferred range: update ");
1273 m_update->add (node);
1276 // If the pred block already has a range, or if it can contribute
1277 // something new. Ie, the edge generates a range of some sort.
1278 if (m_on_entry.get_bb_range (r, name, pred))
1280 if (DEBUG_RANGE_CACHE)
1282 fprintf (dump_file, "has cache, ");
1283 r.dump (dump_file);
1284 fprintf (dump_file, ", ");
1286 if (!r.undefined_p () || m_gori.has_edge_range_p (name, e))
1288 m_update->add (node);
1289 if (DEBUG_RANGE_CACHE)
1290 fprintf (dump_file, "update. ");
1292 continue;
1295 if (DEBUG_RANGE_CACHE)
1296 fprintf (dump_file, "pushing undefined pred block.\n");
1297 // If the pred hasn't been visited (has no range), add it to
1298 // the list.
1299 gcc_checking_assert (!m_on_entry.bb_range_p (name, pred));
1300 m_on_entry.set_bb_range (name, pred, undefined);
1301 m_workback.quick_push (pred);
1305 if (DEBUG_RANGE_CACHE)
1306 fprintf (dump_file, "\n");
1308 // Now fill in the marked blocks with values.
1309 propagate_cache (name);
1310 if (DEBUG_RANGE_CACHE)
1311 fprintf (dump_file, " Propagation update done.\n");
1315 // Get the range of NAME from dominators of BB and return it in R. Search the
1316 // dominator tree based on MODE.
1318 bool
1319 ranger_cache::range_from_dom (vrange &r, tree name, basic_block start_bb,
1320 enum rfd_mode mode)
1322 if (mode == RFD_NONE || !dom_info_available_p (CDI_DOMINATORS))
1323 return false;
1325 // Search back to the definition block or entry block.
1326 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (name));
1327 if (def_bb == NULL)
1328 def_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1330 basic_block bb;
1331 basic_block prev_bb = start_bb;
1333 // Track any inferred ranges seen.
1334 int_range_max infer (TREE_TYPE (name));
1336 // Range on entry to the DEF block should not be queried.
1337 gcc_checking_assert (start_bb != def_bb);
1338 unsigned start_limit = m_workback.length ();
1340 // Default value is global range.
1341 get_global_range (r, name);
1343 // Search until a value is found, pushing outgoing edges encountered.
1344 for (bb = get_immediate_dominator (CDI_DOMINATORS, start_bb);
1346 prev_bb = bb, bb = get_immediate_dominator (CDI_DOMINATORS, bb))
1348 // Accumulate any block exit inferred ranges.
1349 m_exit.maybe_adjust_range (infer, name, bb);
1351 // This block has an outgoing range.
1352 if (m_gori.has_edge_range_p (name, bb))
1354 // Only outgoing ranges to single_pred blocks are dominated by
1355 // outgoing edge ranges, so those can be simply adjusted on the fly.
1356 edge e = find_edge (bb, prev_bb);
1357 if (e && single_pred_p (prev_bb))
1358 m_workback.quick_push (prev_bb);
1359 else if (mode == RFD_FILL)
1361 // Multiple incoming edges, so recursively satisfy this block,
1362 // store the range, then calculate the incoming range for PREV_BB.
1363 if (def_bb != bb)
1365 range_from_dom (r, name, bb, RFD_FILL);
1366 // If the range can't be store, don't try to accumulate
1367 // the range in PREV_BB due to excessive recalculations.
1368 if (!m_on_entry.set_bb_range (name, bb, r))
1369 break;
1371 // With the dominator set, we should be able to cheaply query
1372 // each incoming edge now and accumulate the results.
1373 r.set_undefined ();
1374 edge_iterator ei;
1375 Value_Range er (TREE_TYPE (name));
1376 FOR_EACH_EDGE (e, ei, prev_bb->preds)
1378 edge_range (er, e, name, RFD_READ_ONLY);
1379 r.union_ (er);
1381 // Set the cache in PREV_BB so it is not calculated again.
1382 m_on_entry.set_bb_range (name, prev_bb, r);
1383 break;
1387 if (def_bb == bb)
1388 break;
1390 if (m_on_entry.get_bb_range (r, name, bb))
1391 break;
1394 if (DEBUG_RANGE_CACHE)
1396 fprintf (dump_file, "CACHE: BB %d DOM query, found ", start_bb->index);
1397 r.dump (dump_file);
1398 if (bb)
1399 fprintf (dump_file, " at BB%d\n", bb->index);
1400 else
1401 fprintf (dump_file, " at function top\n");
1404 // Now process any outgoing edges that we seen along the way.
1405 while (m_workback.length () > start_limit)
1407 int_range_max er;
1408 prev_bb = m_workback.pop ();
1409 edge e = single_pred_edge (prev_bb);
1410 bb = e->src;
1412 if (m_gori.outgoing_edge_range_p (er, e, name, *this))
1414 r.intersect (er);
1415 // If this is a normal edge, apply any inferred ranges.
1416 if ((e->flags & (EDGE_EH | EDGE_ABNORMAL)) == 0)
1417 m_exit.maybe_adjust_range (r, name, bb);
1419 if (DEBUG_RANGE_CACHE)
1421 fprintf (dump_file, "CACHE: Adjusted edge range for %d->%d : ",
1422 bb->index, prev_bb->index);
1423 r.dump (dump_file);
1424 fprintf (dump_file, "\n");
1429 // Apply non-null if appropriate.
1430 if (!has_abnormal_call_or_eh_pred_edge_p (start_bb))
1431 r.intersect (infer);
1433 if (DEBUG_RANGE_CACHE)
1435 fprintf (dump_file, "CACHE: Range for DOM returns : ");
1436 r.dump (dump_file);
1437 fprintf (dump_file, "\n");
1439 return true;
1442 // This routine is used during a block walk to move the state of non-null for
1443 // any operands on stmt S to nonnull.
1445 void
1446 ranger_cache::apply_inferred_ranges (gimple *s)
1448 int_range_max r;
1449 bool update = true;
1451 basic_block bb = gimple_bb (s);
1452 gimple_infer_range infer(s);
1453 if (infer.num () == 0)
1454 return;
1456 // Do not update the on-entry cache for block ending stmts.
1457 if (stmt_ends_bb_p (s))
1459 edge_iterator ei;
1460 edge e;
1461 FOR_EACH_EDGE (e, ei, gimple_bb (s)->succs)
1462 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1463 break;
1464 if (e == NULL)
1465 update = false;
1468 for (unsigned x = 0; x < infer.num (); x++)
1470 tree name = infer.name (x);
1471 m_exit.add_range (name, bb, infer.range (x));
1472 if (update)
1474 if (!m_on_entry.get_bb_range (r, name, bb))
1475 exit_range (r, name, bb, RFD_READ_ONLY);
1476 if (r.intersect (infer.range (x)))
1478 m_on_entry.set_bb_range (name, bb, r);
1479 // If this range was invariant before, remove invariance.
1480 if (!m_gori.has_edge_range_p (name))
1481 m_gori.set_range_invariant (name, false);