Fix type in the changelog entry,
[official-gcc.git] / gcc / ira-build.c
blobf49591c6e53bb18e58161474ebd50c6bdd229f39
1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "rtl.h"
27 #include "df.h"
28 #include "tm_p.h"
29 #include "target.h"
30 #include "regs.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "diagnostic-core.h"
34 #include "params.h"
35 #include "reload.h"
36 #include "sparseset.h"
37 #include "cfgloop.h"
38 #include "ira.h"
39 #include "alloc-pool.h"
40 #include "ira-int.h"
41 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
43 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
44 ira_loop_tree_node_t);
46 /* The root of the loop tree corresponding to the all function. */
47 ira_loop_tree_node_t ira_loop_tree_root;
49 /* Height of the loop tree. */
50 int ira_loop_tree_height;
52 /* All nodes representing basic blocks are referred through the
53 following array. We can not use basic block member `aux' for this
54 because it is used for insertion of insns on edges. */
55 ira_loop_tree_node_t ira_bb_nodes;
57 /* All nodes representing loops are referred through the following
58 array. */
59 ira_loop_tree_node_t ira_loop_nodes;
61 /* And size of the ira_loop_nodes array. */
62 unsigned int ira_loop_nodes_count;
64 /* Map regno -> allocnos with given regno (see comments for
65 allocno member `next_regno_allocno'). */
66 ira_allocno_t *ira_regno_allocno_map;
68 /* Array of references to all allocnos. The order number of the
69 allocno corresponds to the index in the array. Removed allocnos
70 have NULL element value. */
71 ira_allocno_t *ira_allocnos;
73 /* Sizes of the previous array. */
74 int ira_allocnos_num;
76 /* Count of conflict record structures we've created, used when creating
77 a new conflict id. */
78 int ira_objects_num;
80 /* Map a conflict id to its conflict record. */
81 ira_object_t *ira_object_id_map;
83 /* Array of references to all allocno preferences. The order number
84 of the preference corresponds to the index in the array. */
85 ira_pref_t *ira_prefs;
87 /* Size of the previous array. */
88 int ira_prefs_num;
90 /* Array of references to all copies. The order number of the copy
91 corresponds to the index in the array. Removed copies have NULL
92 element value. */
93 ira_copy_t *ira_copies;
95 /* Size of the previous array. */
96 int ira_copies_num;
100 /* LAST_BASIC_BLOCK before generating additional insns because of live
101 range splitting. Emitting insns on a critical edge creates a new
102 basic block. */
103 static int last_basic_block_before_change;
105 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
106 the member loop_num. */
107 static void
108 init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
110 int max_regno = max_reg_num ();
112 node->regno_allocno_map
113 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
114 memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
115 memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
116 node->all_allocnos = ira_allocate_bitmap ();
117 node->modified_regnos = ira_allocate_bitmap ();
118 node->border_allocnos = ira_allocate_bitmap ();
119 node->local_copies = ira_allocate_bitmap ();
120 node->loop_num = loop_num;
121 node->children = NULL;
122 node->subloops = NULL;
126 /* The following function allocates the loop tree nodes. If
127 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
128 the root which corresponds the all function) will be not allocated
129 but nodes will still be allocated for basic blocks. */
130 static void
131 create_loop_tree_nodes (void)
133 unsigned int i, j;
134 bool skip_p;
135 edge_iterator ei;
136 edge e;
137 vec<edge> edges;
138 loop_p loop;
140 ira_bb_nodes
141 = ((struct ira_loop_tree_node *)
142 ira_allocate (sizeof (struct ira_loop_tree_node)
143 * last_basic_block_for_fn (cfun)));
144 last_basic_block_before_change = last_basic_block_for_fn (cfun);
145 for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
147 ira_bb_nodes[i].regno_allocno_map = NULL;
148 memset (ira_bb_nodes[i].reg_pressure, 0,
149 sizeof (ira_bb_nodes[i].reg_pressure));
150 ira_bb_nodes[i].all_allocnos = NULL;
151 ira_bb_nodes[i].modified_regnos = NULL;
152 ira_bb_nodes[i].border_allocnos = NULL;
153 ira_bb_nodes[i].local_copies = NULL;
155 if (current_loops == NULL)
157 ira_loop_nodes_count = 1;
158 ira_loop_nodes = ((struct ira_loop_tree_node *)
159 ira_allocate (sizeof (struct ira_loop_tree_node)));
160 init_loop_tree_node (ira_loop_nodes, 0);
161 return;
163 ira_loop_nodes_count = number_of_loops (cfun);
164 ira_loop_nodes = ((struct ira_loop_tree_node *)
165 ira_allocate (sizeof (struct ira_loop_tree_node)
166 * ira_loop_nodes_count));
167 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
169 if (loop_outer (loop) != NULL)
171 ira_loop_nodes[i].regno_allocno_map = NULL;
172 skip_p = false;
173 FOR_EACH_EDGE (e, ei, loop->header->preds)
174 if (e->src != loop->latch
175 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
177 skip_p = true;
178 break;
180 if (skip_p)
181 continue;
182 edges = get_loop_exit_edges (loop);
183 FOR_EACH_VEC_ELT (edges, j, e)
184 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
186 skip_p = true;
187 break;
189 edges.release ();
190 if (skip_p)
191 continue;
193 init_loop_tree_node (&ira_loop_nodes[i], loop->num);
197 /* The function returns TRUE if there are more one allocation
198 region. */
199 static bool
200 more_one_region_p (void)
202 unsigned int i;
203 loop_p loop;
205 if (current_loops != NULL)
206 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
207 if (ira_loop_nodes[i].regno_allocno_map != NULL
208 && ira_loop_tree_root != &ira_loop_nodes[i])
209 return true;
210 return false;
213 /* Free the loop tree node of a loop. */
214 static void
215 finish_loop_tree_node (ira_loop_tree_node_t loop)
217 if (loop->regno_allocno_map != NULL)
219 ira_assert (loop->bb == NULL);
220 ira_free_bitmap (loop->local_copies);
221 ira_free_bitmap (loop->border_allocnos);
222 ira_free_bitmap (loop->modified_regnos);
223 ira_free_bitmap (loop->all_allocnos);
224 ira_free (loop->regno_allocno_map);
225 loop->regno_allocno_map = NULL;
229 /* Free the loop tree nodes. */
230 static void
231 finish_loop_tree_nodes (void)
233 unsigned int i;
235 for (i = 0; i < ira_loop_nodes_count; i++)
236 finish_loop_tree_node (&ira_loop_nodes[i]);
237 ira_free (ira_loop_nodes);
238 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
240 if (ira_bb_nodes[i].local_copies != NULL)
241 ira_free_bitmap (ira_bb_nodes[i].local_copies);
242 if (ira_bb_nodes[i].border_allocnos != NULL)
243 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
244 if (ira_bb_nodes[i].modified_regnos != NULL)
245 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
246 if (ira_bb_nodes[i].all_allocnos != NULL)
247 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
248 if (ira_bb_nodes[i].regno_allocno_map != NULL)
249 ira_free (ira_bb_nodes[i].regno_allocno_map);
251 ira_free (ira_bb_nodes);
256 /* The following recursive function adds LOOP to the loop tree
257 hierarchy. LOOP is added only once. If LOOP is NULL we adding
258 loop designating the whole function when CFG loops are not
259 built. */
260 static void
261 add_loop_to_tree (struct loop *loop)
263 int loop_num;
264 struct loop *parent;
265 ira_loop_tree_node_t loop_node, parent_node;
267 /* We can not use loop node access macros here because of potential
268 checking and because the nodes are not initialized enough
269 yet. */
270 if (loop != NULL && loop_outer (loop) != NULL)
271 add_loop_to_tree (loop_outer (loop));
272 loop_num = loop != NULL ? loop->num : 0;
273 if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
274 && ira_loop_nodes[loop_num].children == NULL)
276 /* We have not added loop node to the tree yet. */
277 loop_node = &ira_loop_nodes[loop_num];
278 loop_node->loop = loop;
279 loop_node->bb = NULL;
280 if (loop == NULL)
281 parent = NULL;
282 else
284 for (parent = loop_outer (loop);
285 parent != NULL;
286 parent = loop_outer (parent))
287 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
288 break;
290 if (parent == NULL)
292 loop_node->next = NULL;
293 loop_node->subloop_next = NULL;
294 loop_node->parent = NULL;
296 else
298 parent_node = &ira_loop_nodes[parent->num];
299 loop_node->next = parent_node->children;
300 parent_node->children = loop_node;
301 loop_node->subloop_next = parent_node->subloops;
302 parent_node->subloops = loop_node;
303 loop_node->parent = parent_node;
308 /* The following recursive function sets up levels of nodes of the
309 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
310 The function returns maximal value of level in the tree + 1. */
311 static int
312 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
314 int height, max_height;
315 ira_loop_tree_node_t subloop_node;
317 ira_assert (loop_node->bb == NULL);
318 loop_node->level = level;
319 max_height = level + 1;
320 for (subloop_node = loop_node->subloops;
321 subloop_node != NULL;
322 subloop_node = subloop_node->subloop_next)
324 ira_assert (subloop_node->bb == NULL);
325 height = setup_loop_tree_level (subloop_node, level + 1);
326 if (height > max_height)
327 max_height = height;
329 return max_height;
332 /* Create the loop tree. The algorithm is designed to provide correct
333 order of loops (they are ordered by their last loop BB) and basic
334 blocks in the chain formed by member next. */
335 static void
336 form_loop_tree (void)
338 basic_block bb;
339 struct loop *parent;
340 ira_loop_tree_node_t bb_node, loop_node;
342 /* We can not use loop/bb node access macros because of potential
343 checking and because the nodes are not initialized enough
344 yet. */
345 FOR_EACH_BB_FN (bb, cfun)
347 bb_node = &ira_bb_nodes[bb->index];
348 bb_node->bb = bb;
349 bb_node->loop = NULL;
350 bb_node->subloops = NULL;
351 bb_node->children = NULL;
352 bb_node->subloop_next = NULL;
353 bb_node->next = NULL;
354 if (current_loops == NULL)
355 parent = NULL;
356 else
358 for (parent = bb->loop_father;
359 parent != NULL;
360 parent = loop_outer (parent))
361 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
362 break;
364 add_loop_to_tree (parent);
365 loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
366 bb_node->next = loop_node->children;
367 bb_node->parent = loop_node;
368 loop_node->children = bb_node;
370 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
371 ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
372 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
377 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
378 tree nodes. */
379 static void
380 rebuild_regno_allocno_maps (void)
382 unsigned int l;
383 int max_regno, regno;
384 ira_allocno_t a;
385 ira_loop_tree_node_t loop_tree_node;
386 loop_p loop;
387 ira_allocno_iterator ai;
389 ira_assert (current_loops != NULL);
390 max_regno = max_reg_num ();
391 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
392 if (ira_loop_nodes[l].regno_allocno_map != NULL)
394 ira_free (ira_loop_nodes[l].regno_allocno_map);
395 ira_loop_nodes[l].regno_allocno_map
396 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
397 * max_regno);
398 memset (ira_loop_nodes[l].regno_allocno_map, 0,
399 sizeof (ira_allocno_t) * max_regno);
401 ira_free (ira_regno_allocno_map);
402 ira_regno_allocno_map
403 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
404 memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
405 FOR_EACH_ALLOCNO (a, ai)
407 if (ALLOCNO_CAP_MEMBER (a) != NULL)
408 /* Caps are not in the regno allocno maps. */
409 continue;
410 regno = ALLOCNO_REGNO (a);
411 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
412 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
413 ira_regno_allocno_map[regno] = a;
414 if (loop_tree_node->regno_allocno_map[regno] == NULL)
415 /* Remember that we can create temporary allocnos to break
416 cycles in register shuffle. */
417 loop_tree_node->regno_allocno_map[regno] = a;
422 /* Pools for allocnos, allocno live ranges and objects. */
423 static object_allocator<live_range> live_range_pool ("live ranges");
424 static object_allocator<ira_allocno> allocno_pool ("allocnos");
425 static object_allocator<ira_object> object_pool ("objects");
427 /* Vec containing references to all created allocnos. It is a
428 container of array allocnos. */
429 static vec<ira_allocno_t> allocno_vec;
431 /* Vec containing references to all created ira_objects. It is a
432 container of ira_object_id_map. */
433 static vec<ira_object_t> ira_object_id_map_vec;
435 /* Initialize data concerning allocnos. */
436 static void
437 initiate_allocnos (void)
439 allocno_vec.create (max_reg_num () * 2);
440 ira_allocnos = NULL;
441 ira_allocnos_num = 0;
442 ira_objects_num = 0;
443 ira_object_id_map_vec.create (max_reg_num () * 2);
444 ira_object_id_map = NULL;
445 ira_regno_allocno_map
446 = (ira_allocno_t *) ira_allocate (max_reg_num ()
447 * sizeof (ira_allocno_t));
448 memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
451 /* Create and return an object corresponding to a new allocno A. */
452 static ira_object_t
453 ira_create_object (ira_allocno_t a, int subword)
455 enum reg_class aclass = ALLOCNO_CLASS (a);
456 ira_object_t obj = object_pool.allocate ();
458 OBJECT_ALLOCNO (obj) = a;
459 OBJECT_SUBWORD (obj) = subword;
460 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
461 OBJECT_CONFLICT_VEC_P (obj) = false;
462 OBJECT_CONFLICT_ARRAY (obj) = NULL;
463 OBJECT_NUM_CONFLICTS (obj) = 0;
464 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
465 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
466 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
467 reg_class_contents[aclass]);
468 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
469 reg_class_contents[aclass]);
470 OBJECT_MIN (obj) = INT_MAX;
471 OBJECT_MAX (obj) = -1;
472 OBJECT_LIVE_RANGES (obj) = NULL;
474 ira_object_id_map_vec.safe_push (obj);
475 ira_object_id_map
476 = ira_object_id_map_vec.address ();
477 ira_objects_num = ira_object_id_map_vec.length ();
479 return obj;
482 /* Create and return the allocno corresponding to REGNO in
483 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
484 same regno if CAP_P is FALSE. */
485 ira_allocno_t
486 ira_create_allocno (int regno, bool cap_p,
487 ira_loop_tree_node_t loop_tree_node)
489 ira_allocno_t a;
491 a = allocno_pool.allocate ();
492 ALLOCNO_REGNO (a) = regno;
493 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
494 if (! cap_p)
496 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
497 ira_regno_allocno_map[regno] = a;
498 if (loop_tree_node->regno_allocno_map[regno] == NULL)
499 /* Remember that we can create temporary allocnos to break
500 cycles in register shuffle on region borders (see
501 ira-emit.c). */
502 loop_tree_node->regno_allocno_map[regno] = a;
504 ALLOCNO_CAP (a) = NULL;
505 ALLOCNO_CAP_MEMBER (a) = NULL;
506 ALLOCNO_NUM (a) = ira_allocnos_num;
507 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
508 ALLOCNO_NREFS (a) = 0;
509 ALLOCNO_FREQ (a) = 0;
510 ALLOCNO_HARD_REGNO (a) = -1;
511 ALLOCNO_CALL_FREQ (a) = 0;
512 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
513 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
514 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
515 #ifdef STACK_REGS
516 ALLOCNO_NO_STACK_REG_P (a) = false;
517 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
518 #endif
519 ALLOCNO_DONT_REASSIGN_P (a) = false;
520 ALLOCNO_BAD_SPILL_P (a) = false;
521 ALLOCNO_ASSIGNED_P (a) = false;
522 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
523 ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
524 ALLOCNO_PREFS (a) = NULL;
525 ALLOCNO_COPIES (a) = NULL;
526 ALLOCNO_HARD_REG_COSTS (a) = NULL;
527 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
528 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
529 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
530 ALLOCNO_CLASS (a) = NO_REGS;
531 ALLOCNO_UPDATED_CLASS_COST (a) = 0;
532 ALLOCNO_CLASS_COST (a) = 0;
533 ALLOCNO_MEMORY_COST (a) = 0;
534 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
535 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
536 ALLOCNO_NUM_OBJECTS (a) = 0;
538 ALLOCNO_ADD_DATA (a) = NULL;
539 allocno_vec.safe_push (a);
540 ira_allocnos = allocno_vec.address ();
541 ira_allocnos_num = allocno_vec.length ();
543 return a;
546 /* Set up register class for A and update its conflict hard
547 registers. */
548 void
549 ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
551 ira_allocno_object_iterator oi;
552 ira_object_t obj;
554 ALLOCNO_CLASS (a) = aclass;
555 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
557 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
558 reg_class_contents[aclass]);
559 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
560 reg_class_contents[aclass]);
564 /* Determine the number of objects we should associate with allocno A
565 and allocate them. */
566 void
567 ira_create_allocno_objects (ira_allocno_t a)
569 machine_mode mode = ALLOCNO_MODE (a);
570 enum reg_class aclass = ALLOCNO_CLASS (a);
571 int n = ira_reg_class_max_nregs[aclass][mode];
572 int i;
574 if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
575 n = 1;
577 ALLOCNO_NUM_OBJECTS (a) = n;
578 for (i = 0; i < n; i++)
579 ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
582 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
583 ALLOCNO_OBJECT structures. This must be called after the allocno
584 classes are known. */
585 static void
586 create_allocno_objects (void)
588 ira_allocno_t a;
589 ira_allocno_iterator ai;
591 FOR_EACH_ALLOCNO (a, ai)
592 ira_create_allocno_objects (a);
595 /* Merge hard register conflict information for all objects associated with
596 allocno TO into the corresponding objects associated with FROM.
597 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
598 static void
599 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
600 bool total_only)
602 int i;
603 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
604 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
606 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
607 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
609 if (!total_only)
610 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
611 OBJECT_CONFLICT_HARD_REGS (from_obj));
612 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
613 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
615 #ifdef STACK_REGS
616 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
617 ALLOCNO_NO_STACK_REG_P (to) = true;
618 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
619 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
620 #endif
623 /* Update hard register conflict information for all objects associated with
624 A to include the regs in SET. */
625 void
626 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
628 ira_allocno_object_iterator i;
629 ira_object_t obj;
631 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
633 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
634 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
638 /* Return TRUE if a conflict vector with NUM elements is more
639 profitable than a conflict bit vector for OBJ. */
640 bool
641 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
643 int nw;
644 int max = OBJECT_MAX (obj);
645 int min = OBJECT_MIN (obj);
647 if (max < min)
648 /* We prefer a bit vector in such case because it does not result
649 in allocation. */
650 return false;
652 nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
653 return (2 * sizeof (ira_object_t) * (num + 1)
654 < 3 * nw * sizeof (IRA_INT_TYPE));
657 /* Allocates and initialize the conflict vector of OBJ for NUM
658 conflicting objects. */
659 void
660 ira_allocate_conflict_vec (ira_object_t obj, int num)
662 int size;
663 ira_object_t *vec;
665 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
666 num++; /* for NULL end marker */
667 size = sizeof (ira_object_t) * num;
668 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
669 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
670 vec[0] = NULL;
671 OBJECT_NUM_CONFLICTS (obj) = 0;
672 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
673 OBJECT_CONFLICT_VEC_P (obj) = true;
676 /* Allocate and initialize the conflict bit vector of OBJ. */
677 static void
678 allocate_conflict_bit_vec (ira_object_t obj)
680 unsigned int size;
682 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
683 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
684 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
685 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
686 memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
687 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
688 OBJECT_CONFLICT_VEC_P (obj) = false;
691 /* Allocate and initialize the conflict vector or conflict bit vector
692 of OBJ for NUM conflicting allocnos whatever is more profitable. */
693 void
694 ira_allocate_object_conflicts (ira_object_t obj, int num)
696 if (ira_conflict_vector_profitable_p (obj, num))
697 ira_allocate_conflict_vec (obj, num);
698 else
699 allocate_conflict_bit_vec (obj);
702 /* Add OBJ2 to the conflicts of OBJ1. */
703 static void
704 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
706 int num;
707 unsigned int size;
709 if (OBJECT_CONFLICT_VEC_P (obj1))
711 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
712 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
713 num = curr_num + 2;
714 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
716 ira_object_t *newvec;
717 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
718 newvec = (ira_object_t *) ira_allocate (size);
719 memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
720 ira_free (vec);
721 vec = newvec;
722 OBJECT_CONFLICT_ARRAY (obj1) = vec;
723 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
725 vec[num - 2] = obj2;
726 vec[num - 1] = NULL;
727 OBJECT_NUM_CONFLICTS (obj1)++;
729 else
731 int nw, added_head_nw, id;
732 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
734 id = OBJECT_CONFLICT_ID (obj2);
735 if (OBJECT_MIN (obj1) > id)
737 /* Expand head of the bit vector. */
738 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
739 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
740 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
741 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
743 memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
744 vec, nw * sizeof (IRA_INT_TYPE));
745 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
747 else
749 size
750 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
751 vec = (IRA_INT_TYPE *) ira_allocate (size);
752 memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
753 OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
754 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
755 memset ((char *) vec
756 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
757 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
758 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
759 OBJECT_CONFLICT_ARRAY (obj1) = vec;
760 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
762 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
764 else if (OBJECT_MAX (obj1) < id)
766 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
767 size = nw * sizeof (IRA_INT_TYPE);
768 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
770 /* Expand tail of the bit vector. */
771 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
772 vec = (IRA_INT_TYPE *) ira_allocate (size);
773 memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
774 memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
775 0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
776 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
777 OBJECT_CONFLICT_ARRAY (obj1) = vec;
778 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
780 OBJECT_MAX (obj1) = id;
782 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
786 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
787 static void
788 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
790 add_to_conflicts (obj1, obj2);
791 add_to_conflicts (obj2, obj1);
794 /* Clear all conflicts of OBJ. */
795 static void
796 clear_conflicts (ira_object_t obj)
798 if (OBJECT_CONFLICT_VEC_P (obj))
800 OBJECT_NUM_CONFLICTS (obj) = 0;
801 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
803 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
805 int nw;
807 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
808 memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
812 /* The array used to find duplications in conflict vectors of
813 allocnos. */
814 static int *conflict_check;
816 /* The value used to mark allocation presence in conflict vector of
817 the current allocno. */
818 static int curr_conflict_check_tick;
820 /* Remove duplications in conflict vector of OBJ. */
821 static void
822 compress_conflict_vec (ira_object_t obj)
824 ira_object_t *vec, conflict_obj;
825 int i, j;
827 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
828 vec = OBJECT_CONFLICT_VEC (obj);
829 curr_conflict_check_tick++;
830 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
832 int id = OBJECT_CONFLICT_ID (conflict_obj);
833 if (conflict_check[id] != curr_conflict_check_tick)
835 conflict_check[id] = curr_conflict_check_tick;
836 vec[j++] = conflict_obj;
839 OBJECT_NUM_CONFLICTS (obj) = j;
840 vec[j] = NULL;
843 /* Remove duplications in conflict vectors of all allocnos. */
844 static void
845 compress_conflict_vecs (void)
847 ira_object_t obj;
848 ira_object_iterator oi;
850 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
851 memset (conflict_check, 0, sizeof (int) * ira_objects_num);
852 curr_conflict_check_tick = 0;
853 FOR_EACH_OBJECT (obj, oi)
855 if (OBJECT_CONFLICT_VEC_P (obj))
856 compress_conflict_vec (obj);
858 ira_free (conflict_check);
861 /* This recursive function outputs allocno A and if it is a cap the
862 function outputs its members. */
863 void
864 ira_print_expanded_allocno (ira_allocno_t a)
866 basic_block bb;
868 fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
869 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
870 fprintf (ira_dump_file, ",b%d", bb->index);
871 else
872 fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
873 if (ALLOCNO_CAP_MEMBER (a) != NULL)
875 fprintf (ira_dump_file, ":");
876 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
878 fprintf (ira_dump_file, ")");
881 /* Create and return the cap representing allocno A in the
882 parent loop. */
883 static ira_allocno_t
884 create_cap_allocno (ira_allocno_t a)
886 ira_allocno_t cap;
887 ira_loop_tree_node_t parent;
888 enum reg_class aclass;
890 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
891 cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
892 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
893 ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
894 aclass = ALLOCNO_CLASS (a);
895 ira_set_allocno_class (cap, aclass);
896 ira_create_allocno_objects (cap);
897 ALLOCNO_CAP_MEMBER (cap) = a;
898 ALLOCNO_CAP (a) = cap;
899 ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
900 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
901 ira_allocate_and_copy_costs
902 (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
903 ira_allocate_and_copy_costs
904 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
905 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
906 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
907 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
908 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
909 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
911 merge_hard_reg_conflicts (a, cap, false);
913 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
914 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
915 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap),
916 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
917 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
919 fprintf (ira_dump_file, " Creating cap ");
920 ira_print_expanded_allocno (cap);
921 fprintf (ira_dump_file, "\n");
923 return cap;
926 /* Create and return a live range for OBJECT with given attributes. */
927 live_range_t
928 ira_create_live_range (ira_object_t obj, int start, int finish,
929 live_range_t next)
931 live_range_t p;
933 p = live_range_pool.allocate ();
934 p->object = obj;
935 p->start = start;
936 p->finish = finish;
937 p->next = next;
938 return p;
941 /* Create a new live range for OBJECT and queue it at the head of its
942 live range list. */
943 void
944 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
946 live_range_t p;
947 p = ira_create_live_range (object, start, finish,
948 OBJECT_LIVE_RANGES (object));
949 OBJECT_LIVE_RANGES (object) = p;
952 /* Copy allocno live range R and return the result. */
953 static live_range_t
954 copy_live_range (live_range_t r)
956 live_range_t p;
958 p = live_range_pool.allocate ();
959 *p = *r;
960 return p;
963 /* Copy allocno live range list given by its head R and return the
964 result. */
965 live_range_t
966 ira_copy_live_range_list (live_range_t r)
968 live_range_t p, first, last;
970 if (r == NULL)
971 return NULL;
972 for (first = last = NULL; r != NULL; r = r->next)
974 p = copy_live_range (r);
975 if (first == NULL)
976 first = p;
977 else
978 last->next = p;
979 last = p;
981 return first;
984 /* Merge ranges R1 and R2 and returns the result. The function
985 maintains the order of ranges and tries to minimize number of the
986 result ranges. */
987 live_range_t
988 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
990 live_range_t first, last;
992 if (r1 == NULL)
993 return r2;
994 if (r2 == NULL)
995 return r1;
996 for (first = last = NULL; r1 != NULL && r2 != NULL;)
998 if (r1->start < r2->start)
999 std::swap (r1, r2);
1000 if (r1->start <= r2->finish + 1)
1002 /* Intersected ranges: merge r1 and r2 into r1. */
1003 r1->start = r2->start;
1004 if (r1->finish < r2->finish)
1005 r1->finish = r2->finish;
1006 live_range_t temp = r2;
1007 r2 = r2->next;
1008 ira_finish_live_range (temp);
1009 if (r2 == NULL)
1011 /* To try to merge with subsequent ranges in r1. */
1012 r2 = r1->next;
1013 r1->next = NULL;
1016 else
1018 /* Add r1 to the result. */
1019 if (first == NULL)
1020 first = last = r1;
1021 else
1023 last->next = r1;
1024 last = r1;
1026 r1 = r1->next;
1027 if (r1 == NULL)
1029 /* To try to merge with subsequent ranges in r2. */
1030 r1 = r2->next;
1031 r2->next = NULL;
1035 if (r1 != NULL)
1037 if (first == NULL)
1038 first = r1;
1039 else
1040 last->next = r1;
1041 ira_assert (r1->next == NULL);
1043 else if (r2 != NULL)
1045 if (first == NULL)
1046 first = r2;
1047 else
1048 last->next = r2;
1049 ira_assert (r2->next == NULL);
1051 else
1053 ira_assert (last->next == NULL);
1055 return first;
1058 /* Return TRUE if live ranges R1 and R2 intersect. */
1059 bool
1060 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1062 /* Remember the live ranges are always kept ordered. */
1063 while (r1 != NULL && r2 != NULL)
1065 if (r1->start > r2->finish)
1066 r1 = r1->next;
1067 else if (r2->start > r1->finish)
1068 r2 = r2->next;
1069 else
1070 return true;
1072 return false;
1075 /* Free allocno live range R. */
1076 void
1077 ira_finish_live_range (live_range_t r)
1079 live_range_pool.remove (r);
1082 /* Free list of allocno live ranges starting with R. */
1083 void
1084 ira_finish_live_range_list (live_range_t r)
1086 live_range_t next_r;
1088 for (; r != NULL; r = next_r)
1090 next_r = r->next;
1091 ira_finish_live_range (r);
1095 /* Free updated register costs of allocno A. */
1096 void
1097 ira_free_allocno_updated_costs (ira_allocno_t a)
1099 enum reg_class aclass;
1101 aclass = ALLOCNO_CLASS (a);
1102 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1103 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1104 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1105 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1106 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1107 aclass);
1108 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1111 /* Free and nullify all cost vectors allocated earlier for allocno
1112 A. */
1113 static void
1114 ira_free_allocno_costs (ira_allocno_t a)
1116 enum reg_class aclass = ALLOCNO_CLASS (a);
1117 ira_object_t obj;
1118 ira_allocno_object_iterator oi;
1120 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1122 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1123 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1124 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1125 ira_free (OBJECT_CONFLICT_ARRAY (obj));
1126 object_pool.remove (obj);
1129 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1130 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1131 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1132 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1133 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1134 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1135 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1136 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1137 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1138 aclass);
1139 ALLOCNO_HARD_REG_COSTS (a) = NULL;
1140 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1141 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1142 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1145 /* Free the memory allocated for allocno A. */
1146 static void
1147 finish_allocno (ira_allocno_t a)
1149 ira_free_allocno_costs (a);
1150 allocno_pool.remove (a);
1153 /* Free the memory allocated for all allocnos. */
1154 static void
1155 finish_allocnos (void)
1157 ira_allocno_t a;
1158 ira_allocno_iterator ai;
1160 FOR_EACH_ALLOCNO (a, ai)
1161 finish_allocno (a);
1162 ira_free (ira_regno_allocno_map);
1163 ira_object_id_map_vec.release ();
1164 allocno_vec.release ();
1165 allocno_pool.release ();
1166 object_pool.release ();
1167 live_range_pool.release ();
1172 /* Pools for allocno preferences. */
1173 static object_allocator <ira_allocno_pref> pref_pool ("prefs");
1175 /* Vec containing references to all created preferences. It is a
1176 container of array ira_prefs. */
1177 static vec<ira_pref_t> pref_vec;
1179 /* The function initializes data concerning allocno prefs. */
1180 static void
1181 initiate_prefs (void)
1183 pref_vec.create (get_max_uid ());
1184 ira_prefs = NULL;
1185 ira_prefs_num = 0;
1188 /* Return pref for A and HARD_REGNO if any. */
1189 static ira_pref_t
1190 find_allocno_pref (ira_allocno_t a, int hard_regno)
1192 ira_pref_t pref;
1194 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1195 if (pref->allocno == a && pref->hard_regno == hard_regno)
1196 return pref;
1197 return NULL;
1200 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1201 ira_pref_t
1202 ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
1204 ira_pref_t pref;
1206 pref = pref_pool.allocate ();
1207 pref->num = ira_prefs_num;
1208 pref->allocno = a;
1209 pref->hard_regno = hard_regno;
1210 pref->freq = freq;
1211 pref_vec.safe_push (pref);
1212 ira_prefs = pref_vec.address ();
1213 ira_prefs_num = pref_vec.length ();
1214 return pref;
1217 /* Attach a pref PREF to the corresponding allocno. */
1218 static void
1219 add_allocno_pref_to_list (ira_pref_t pref)
1221 ira_allocno_t a = pref->allocno;
1223 pref->next_pref = ALLOCNO_PREFS (a);
1224 ALLOCNO_PREFS (a) = pref;
1227 /* Create (or update frequency if the pref already exists) the pref of
1228 allocnos A preferring HARD_REGNO with frequency FREQ. */
1229 void
1230 ira_add_allocno_pref (ira_allocno_t a, int hard_regno, int freq)
1232 ira_pref_t pref;
1234 if (freq <= 0)
1235 return;
1236 if ((pref = find_allocno_pref (a, hard_regno)) != NULL)
1238 pref->freq += freq;
1239 return;
1241 pref = ira_create_pref (a, hard_regno, freq);
1242 ira_assert (a != NULL);
1243 add_allocno_pref_to_list (pref);
1246 /* Print info about PREF into file F. */
1247 static void
1248 print_pref (FILE *f, ira_pref_t pref)
1250 fprintf (f, " pref%d:a%d(r%d)<-hr%d@%d\n", pref->num,
1251 ALLOCNO_NUM (pref->allocno), ALLOCNO_REGNO (pref->allocno),
1252 pref->hard_regno, pref->freq);
1255 /* Print info about PREF into stderr. */
1256 void
1257 ira_debug_pref (ira_pref_t pref)
1259 print_pref (stderr, pref);
1262 /* Print info about all prefs into file F. */
1263 static void
1264 print_prefs (FILE *f)
1266 ira_pref_t pref;
1267 ira_pref_iterator pi;
1269 FOR_EACH_PREF (pref, pi)
1270 print_pref (f, pref);
1273 /* Print info about all prefs into stderr. */
1274 void
1275 ira_debug_prefs (void)
1277 print_prefs (stderr);
1280 /* Print info about prefs involving allocno A into file F. */
1281 static void
1282 print_allocno_prefs (FILE *f, ira_allocno_t a)
1284 ira_pref_t pref;
1286 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1287 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1288 fprintf (f, " pref%d:hr%d@%d", pref->num, pref->hard_regno, pref->freq);
1289 fprintf (f, "\n");
1292 /* Print info about prefs involving allocno A into stderr. */
1293 void
1294 ira_debug_allocno_prefs (ira_allocno_t a)
1296 print_allocno_prefs (stderr, a);
1299 /* The function frees memory allocated for PREF. */
1300 static void
1301 finish_pref (ira_pref_t pref)
1303 ira_prefs[pref->num] = NULL;
1304 pref_pool.remove (pref);
1307 /* Remove PREF from the list of allocno prefs and free memory for
1308 it. */
1309 void
1310 ira_remove_pref (ira_pref_t pref)
1312 ira_pref_t cpref, prev;
1314 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1315 fprintf (ira_dump_file, " Removing pref%d:hr%d@%d\n",
1316 pref->num, pref->hard_regno, pref->freq);
1317 for (prev = NULL, cpref = ALLOCNO_PREFS (pref->allocno);
1318 cpref != NULL;
1319 prev = cpref, cpref = cpref->next_pref)
1320 if (cpref == pref)
1321 break;
1322 ira_assert (cpref != NULL);
1323 if (prev == NULL)
1324 ALLOCNO_PREFS (pref->allocno) = pref->next_pref;
1325 else
1326 prev->next_pref = pref->next_pref;
1327 finish_pref (pref);
1330 /* Remove all prefs of allocno A. */
1331 void
1332 ira_remove_allocno_prefs (ira_allocno_t a)
1334 ira_pref_t pref, next_pref;
1336 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
1338 next_pref = pref->next_pref;
1339 finish_pref (pref);
1341 ALLOCNO_PREFS (a) = NULL;
1344 /* Free memory allocated for all prefs. */
1345 static void
1346 finish_prefs (void)
1348 ira_pref_t pref;
1349 ira_pref_iterator pi;
1351 FOR_EACH_PREF (pref, pi)
1352 finish_pref (pref);
1353 pref_vec.release ();
1354 pref_pool.release ();
1359 /* Pools for copies. */
1360 static object_allocator<ira_allocno_copy> copy_pool ("copies");
1362 /* Vec containing references to all created copies. It is a
1363 container of array ira_copies. */
1364 static vec<ira_copy_t> copy_vec;
1366 /* The function initializes data concerning allocno copies. */
1367 static void
1368 initiate_copies (void)
1370 copy_vec.create (get_max_uid ());
1371 ira_copies = NULL;
1372 ira_copies_num = 0;
1375 /* Return copy connecting A1 and A2 and originated from INSN of
1376 LOOP_TREE_NODE if any. */
1377 static ira_copy_t
1378 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx_insn *insn,
1379 ira_loop_tree_node_t loop_tree_node)
1381 ira_copy_t cp, next_cp;
1382 ira_allocno_t another_a;
1384 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1386 if (cp->first == a1)
1388 next_cp = cp->next_first_allocno_copy;
1389 another_a = cp->second;
1391 else if (cp->second == a1)
1393 next_cp = cp->next_second_allocno_copy;
1394 another_a = cp->first;
1396 else
1397 gcc_unreachable ();
1398 if (another_a == a2 && cp->insn == insn
1399 && cp->loop_tree_node == loop_tree_node)
1400 return cp;
1402 return NULL;
1405 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1406 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1407 ira_copy_t
1408 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1409 bool constraint_p, rtx_insn *insn,
1410 ira_loop_tree_node_t loop_tree_node)
1412 ira_copy_t cp;
1414 cp = copy_pool.allocate ();
1415 cp->num = ira_copies_num;
1416 cp->first = first;
1417 cp->second = second;
1418 cp->freq = freq;
1419 cp->constraint_p = constraint_p;
1420 cp->insn = insn;
1421 cp->loop_tree_node = loop_tree_node;
1422 copy_vec.safe_push (cp);
1423 ira_copies = copy_vec.address ();
1424 ira_copies_num = copy_vec.length ();
1425 return cp;
1428 /* Attach a copy CP to allocnos involved into the copy. */
1429 static void
1430 add_allocno_copy_to_list (ira_copy_t cp)
1432 ira_allocno_t first = cp->first, second = cp->second;
1434 cp->prev_first_allocno_copy = NULL;
1435 cp->prev_second_allocno_copy = NULL;
1436 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1437 if (cp->next_first_allocno_copy != NULL)
1439 if (cp->next_first_allocno_copy->first == first)
1440 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1441 else
1442 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1444 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1445 if (cp->next_second_allocno_copy != NULL)
1447 if (cp->next_second_allocno_copy->second == second)
1448 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1449 else
1450 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1452 ALLOCNO_COPIES (first) = cp;
1453 ALLOCNO_COPIES (second) = cp;
1456 /* Make a copy CP a canonical copy where number of the
1457 first allocno is less than the second one. */
1458 static void
1459 swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1461 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1462 return;
1464 std::swap (cp->first, cp->second);
1465 std::swap (cp->prev_first_allocno_copy, cp->prev_second_allocno_copy);
1466 std::swap (cp->next_first_allocno_copy, cp->next_second_allocno_copy);
1469 /* Create (or update frequency if the copy already exists) and return
1470 the copy of allocnos FIRST and SECOND with frequency FREQ
1471 corresponding to move insn INSN (if any) and originated from
1472 LOOP_TREE_NODE. */
1473 ira_copy_t
1474 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1475 bool constraint_p, rtx_insn *insn,
1476 ira_loop_tree_node_t loop_tree_node)
1478 ira_copy_t cp;
1480 if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1482 cp->freq += freq;
1483 return cp;
1485 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1486 loop_tree_node);
1487 ira_assert (first != NULL && second != NULL);
1488 add_allocno_copy_to_list (cp);
1489 swap_allocno_copy_ends_if_necessary (cp);
1490 return cp;
1493 /* Print info about copy CP into file F. */
1494 static void
1495 print_copy (FILE *f, ira_copy_t cp)
1497 fprintf (f, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1498 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1499 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1500 cp->insn != NULL
1501 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1504 DEBUG_FUNCTION void
1505 debug (ira_allocno_copy &ref)
1507 print_copy (stderr, &ref);
1510 DEBUG_FUNCTION void
1511 debug (ira_allocno_copy *ptr)
1513 if (ptr)
1514 debug (*ptr);
1515 else
1516 fprintf (stderr, "<nil>\n");
1519 /* Print info about copy CP into stderr. */
1520 void
1521 ira_debug_copy (ira_copy_t cp)
1523 print_copy (stderr, cp);
1526 /* Print info about all copies into file F. */
1527 static void
1528 print_copies (FILE *f)
1530 ira_copy_t cp;
1531 ira_copy_iterator ci;
1533 FOR_EACH_COPY (cp, ci)
1534 print_copy (f, cp);
1537 /* Print info about all copies into stderr. */
1538 void
1539 ira_debug_copies (void)
1541 print_copies (stderr);
1544 /* Print info about copies involving allocno A into file F. */
1545 static void
1546 print_allocno_copies (FILE *f, ira_allocno_t a)
1548 ira_allocno_t another_a;
1549 ira_copy_t cp, next_cp;
1551 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1552 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1554 if (cp->first == a)
1556 next_cp = cp->next_first_allocno_copy;
1557 another_a = cp->second;
1559 else if (cp->second == a)
1561 next_cp = cp->next_second_allocno_copy;
1562 another_a = cp->first;
1564 else
1565 gcc_unreachable ();
1566 fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1567 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1569 fprintf (f, "\n");
1572 DEBUG_FUNCTION void
1573 debug (ira_allocno &ref)
1575 print_allocno_copies (stderr, &ref);
1578 DEBUG_FUNCTION void
1579 debug (ira_allocno *ptr)
1581 if (ptr)
1582 debug (*ptr);
1583 else
1584 fprintf (stderr, "<nil>\n");
1588 /* Print info about copies involving allocno A into stderr. */
1589 void
1590 ira_debug_allocno_copies (ira_allocno_t a)
1592 print_allocno_copies (stderr, a);
1595 /* The function frees memory allocated for copy CP. */
1596 static void
1597 finish_copy (ira_copy_t cp)
1599 copy_pool.remove (cp);
1603 /* Free memory allocated for all copies. */
1604 static void
1605 finish_copies (void)
1607 ira_copy_t cp;
1608 ira_copy_iterator ci;
1610 FOR_EACH_COPY (cp, ci)
1611 finish_copy (cp);
1612 copy_vec.release ();
1613 copy_pool.release ();
1618 /* Pools for cost vectors. It is defined only for allocno classes. */
1619 static pool_allocator *cost_vector_pool[N_REG_CLASSES];
1621 /* The function initiates work with hard register cost vectors. It
1622 creates allocation pool for each allocno class. */
1623 static void
1624 initiate_cost_vectors (void)
1626 int i;
1627 enum reg_class aclass;
1629 for (i = 0; i < ira_allocno_classes_num; i++)
1631 aclass = ira_allocno_classes[i];
1632 cost_vector_pool[aclass] = new pool_allocator
1633 ("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass]));
1637 /* Allocate and return a cost vector VEC for ACLASS. */
1638 int *
1639 ira_allocate_cost_vector (reg_class_t aclass)
1641 return (int*) cost_vector_pool[(int) aclass]->allocate ();
1644 /* Free a cost vector VEC for ACLASS. */
1645 void
1646 ira_free_cost_vector (int *vec, reg_class_t aclass)
1648 ira_assert (vec != NULL);
1649 cost_vector_pool[(int) aclass]->remove (vec);
1652 /* Finish work with hard register cost vectors. Release allocation
1653 pool for each allocno class. */
1654 static void
1655 finish_cost_vectors (void)
1657 int i;
1658 enum reg_class aclass;
1660 for (i = 0; i < ira_allocno_classes_num; i++)
1662 aclass = ira_allocno_classes[i];
1663 delete cost_vector_pool[aclass];
1669 /* Compute a post-ordering of the reverse control flow of the loop body
1670 designated by the children nodes of LOOP_NODE, whose body nodes in
1671 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1672 of the reverse loop body.
1674 For the post-order of the reverse CFG, we visit the basic blocks in
1675 LOOP_PREORDER array in the reverse order of where they appear.
1676 This is important: We do not just want to compute a post-order of
1677 the reverse CFG, we want to make a best-guess for a visiting order that
1678 minimizes the number of chain elements per allocno live range. If the
1679 blocks would be visited in a different order, we would still compute a
1680 correct post-ordering but it would be less likely that two nodes
1681 connected by an edge in the CFG are neighbours in the topsort. */
1683 static vec<ira_loop_tree_node_t>
1684 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
1685 vec<ira_loop_tree_node_t> loop_preorder)
1687 vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
1688 unsigned int n_loop_preorder;
1690 n_loop_preorder = loop_preorder.length ();
1691 if (n_loop_preorder != 0)
1693 ira_loop_tree_node_t subloop_node;
1694 unsigned int i;
1695 auto_vec<ira_loop_tree_node_t> dfs_stack;
1697 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1698 the flag to mark blocks we still have to visit to add them to
1699 our post-order. Define an alias to avoid confusion. */
1700 #define BB_TO_VISIT BB_VISITED
1702 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1704 gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
1705 subloop_node->bb->flags |= BB_TO_VISIT;
1708 topsort_nodes.create (n_loop_preorder);
1709 dfs_stack.create (n_loop_preorder);
1711 FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
1713 if (! (subloop_node->bb->flags & BB_TO_VISIT))
1714 continue;
1716 subloop_node->bb->flags &= ~BB_TO_VISIT;
1717 dfs_stack.quick_push (subloop_node);
1718 while (! dfs_stack.is_empty ())
1720 edge e;
1721 edge_iterator ei;
1723 ira_loop_tree_node_t n = dfs_stack.last ();
1724 FOR_EACH_EDGE (e, ei, n->bb->preds)
1726 ira_loop_tree_node_t pred_node;
1727 basic_block pred_bb = e->src;
1729 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1730 continue;
1732 pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
1733 if (pred_node != n
1734 && (pred_node->bb->flags & BB_TO_VISIT))
1736 pred_node->bb->flags &= ~BB_TO_VISIT;
1737 dfs_stack.quick_push (pred_node);
1740 if (n == dfs_stack.last ())
1742 dfs_stack.pop ();
1743 topsort_nodes.quick_push (n);
1748 #undef BB_TO_VISIT
1751 gcc_assert (topsort_nodes.length () == n_loop_preorder);
1752 return topsort_nodes;
1755 /* The current loop tree node and its regno allocno map. */
1756 ira_loop_tree_node_t ira_curr_loop_tree_node;
1757 ira_allocno_t *ira_curr_regno_allocno_map;
1759 /* This recursive function traverses loop tree with root LOOP_NODE
1760 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1761 correspondingly in preorder and postorder. The function sets up
1762 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1763 basic block nodes of LOOP_NODE is also processed (before its
1764 subloop nodes).
1766 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1767 the loop are passed in the *reverse* post-order of the *reverse*
1768 CFG. This is only used by ira_create_allocno_live_ranges, which
1769 wants to visit basic blocks in this order to minimize the number
1770 of elements per live range chain.
1771 Note that the loop tree nodes are still visited in the normal,
1772 forward post-order of the loop tree. */
1774 void
1775 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1776 void (*preorder_func) (ira_loop_tree_node_t),
1777 void (*postorder_func) (ira_loop_tree_node_t))
1779 ira_loop_tree_node_t subloop_node;
1781 ira_assert (loop_node->bb == NULL);
1782 ira_curr_loop_tree_node = loop_node;
1783 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1785 if (preorder_func != NULL)
1786 (*preorder_func) (loop_node);
1788 if (bb_p)
1790 auto_vec<ira_loop_tree_node_t> loop_preorder;
1791 unsigned int i;
1793 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1794 is set up such that nodes in the loop body appear in a pre-order
1795 of their place in the CFG. */
1796 for (subloop_node = loop_node->children;
1797 subloop_node != NULL;
1798 subloop_node = subloop_node->next)
1799 if (subloop_node->bb != NULL)
1800 loop_preorder.safe_push (subloop_node);
1802 if (preorder_func != NULL)
1803 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1804 (*preorder_func) (subloop_node);
1806 if (postorder_func != NULL)
1808 vec<ira_loop_tree_node_t> loop_rev_postorder =
1809 ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
1810 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
1811 (*postorder_func) (subloop_node);
1812 loop_rev_postorder.release ();
1816 for (subloop_node = loop_node->subloops;
1817 subloop_node != NULL;
1818 subloop_node = subloop_node->subloop_next)
1820 ira_assert (subloop_node->bb == NULL);
1821 ira_traverse_loop_tree (bb_p, subloop_node,
1822 preorder_func, postorder_func);
1825 ira_curr_loop_tree_node = loop_node;
1826 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1828 if (postorder_func != NULL)
1829 (*postorder_func) (loop_node);
1834 /* The basic block currently being processed. */
1835 static basic_block curr_bb;
1837 /* This recursive function creates allocnos corresponding to
1838 pseudo-registers containing in X. True OUTPUT_P means that X is
1839 an lvalue. PARENT corresponds to the parent expression of X. */
1840 static void
1841 create_insn_allocnos (rtx x, rtx outer, bool output_p)
1843 int i, j;
1844 const char *fmt;
1845 enum rtx_code code = GET_CODE (x);
1847 if (code == REG)
1849 int regno;
1851 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1853 ira_allocno_t a;
1855 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1857 a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1858 if (outer != NULL && GET_CODE (outer) == SUBREG)
1860 machine_mode wmode = GET_MODE (outer);
1861 if (GET_MODE_SIZE (wmode) > GET_MODE_SIZE (ALLOCNO_WMODE (a)))
1862 ALLOCNO_WMODE (a) = wmode;
1866 ALLOCNO_NREFS (a)++;
1867 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1868 if (output_p)
1869 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1871 return;
1873 else if (code == SET)
1875 create_insn_allocnos (SET_DEST (x), NULL, true);
1876 create_insn_allocnos (SET_SRC (x), NULL, false);
1877 return;
1879 else if (code == CLOBBER)
1881 create_insn_allocnos (XEXP (x, 0), NULL, true);
1882 return;
1884 else if (code == MEM)
1886 create_insn_allocnos (XEXP (x, 0), NULL, false);
1887 return;
1889 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1890 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1892 create_insn_allocnos (XEXP (x, 0), NULL, true);
1893 create_insn_allocnos (XEXP (x, 0), NULL, false);
1894 return;
1897 fmt = GET_RTX_FORMAT (code);
1898 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1900 if (fmt[i] == 'e')
1901 create_insn_allocnos (XEXP (x, i), x, output_p);
1902 else if (fmt[i] == 'E')
1903 for (j = 0; j < XVECLEN (x, i); j++)
1904 create_insn_allocnos (XVECEXP (x, i, j), x, output_p);
1908 /* Create allocnos corresponding to pseudo-registers living in the
1909 basic block represented by the corresponding loop tree node
1910 BB_NODE. */
1911 static void
1912 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1914 basic_block bb;
1915 rtx_insn *insn;
1916 unsigned int i;
1917 bitmap_iterator bi;
1919 curr_bb = bb = bb_node->bb;
1920 ira_assert (bb != NULL);
1921 FOR_BB_INSNS_REVERSE (bb, insn)
1922 if (NONDEBUG_INSN_P (insn))
1923 create_insn_allocnos (PATTERN (insn), NULL, false);
1924 /* It might be a allocno living through from one subloop to
1925 another. */
1926 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
1927 if (ira_curr_regno_allocno_map[i] == NULL)
1928 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1931 /* Create allocnos corresponding to pseudo-registers living on edge E
1932 (a loop entry or exit). Also mark the allocnos as living on the
1933 loop border. */
1934 static void
1935 create_loop_allocnos (edge e)
1937 unsigned int i;
1938 bitmap live_in_regs, border_allocnos;
1939 bitmap_iterator bi;
1940 ira_loop_tree_node_t parent;
1942 live_in_regs = df_get_live_in (e->dest);
1943 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1944 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e->src),
1945 FIRST_PSEUDO_REGISTER, i, bi)
1946 if (bitmap_bit_p (live_in_regs, i))
1948 if (ira_curr_regno_allocno_map[i] == NULL)
1950 /* The order of creations is important for right
1951 ira_regno_allocno_map. */
1952 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1953 && parent->regno_allocno_map[i] == NULL)
1954 ira_create_allocno (i, false, parent);
1955 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1957 bitmap_set_bit (border_allocnos,
1958 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1962 /* Create allocnos corresponding to pseudo-registers living in loop
1963 represented by the corresponding loop tree node LOOP_NODE. This
1964 function is called by ira_traverse_loop_tree. */
1965 static void
1966 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1968 if (loop_node->bb != NULL)
1969 create_bb_allocnos (loop_node);
1970 else if (loop_node != ira_loop_tree_root)
1972 int i;
1973 edge_iterator ei;
1974 edge e;
1975 vec<edge> edges;
1977 ira_assert (current_loops != NULL);
1978 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1979 if (e->src != loop_node->loop->latch)
1980 create_loop_allocnos (e);
1982 edges = get_loop_exit_edges (loop_node->loop);
1983 FOR_EACH_VEC_ELT (edges, i, e)
1984 create_loop_allocnos (e);
1985 edges.release ();
1989 /* Propagate information about allocnos modified inside the loop given
1990 by its LOOP_TREE_NODE to its parent. */
1991 static void
1992 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1994 if (loop_tree_node == ira_loop_tree_root)
1995 return;
1996 ira_assert (loop_tree_node->bb == NULL);
1997 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1998 loop_tree_node->modified_regnos);
2001 /* Propagate new info about allocno A (see comments about accumulated
2002 info in allocno definition) to the corresponding allocno on upper
2003 loop tree level. So allocnos on upper levels accumulate
2004 information about the corresponding allocnos in nested regions.
2005 The new info means allocno info finally calculated in this
2006 file. */
2007 static void
2008 propagate_allocno_info (void)
2010 int i;
2011 ira_allocno_t a, parent_a;
2012 ira_loop_tree_node_t parent;
2013 enum reg_class aclass;
2015 if (flag_ira_region != IRA_REGION_ALL
2016 && flag_ira_region != IRA_REGION_MIXED)
2017 return;
2018 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2019 for (a = ira_regno_allocno_map[i];
2020 a != NULL;
2021 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2022 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
2023 && (parent_a = parent->regno_allocno_map[i]) != NULL
2024 /* There are no caps yet at this point. So use
2025 border_allocnos to find allocnos for the propagation. */
2026 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
2027 ALLOCNO_NUM (a)))
2029 if (! ALLOCNO_BAD_SPILL_P (a))
2030 ALLOCNO_BAD_SPILL_P (parent_a) = false;
2031 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
2032 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
2033 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2034 merge_hard_reg_conflicts (a, parent_a, true);
2035 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2036 += ALLOCNO_CALLS_CROSSED_NUM (a);
2037 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2038 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
2039 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
2040 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2041 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2042 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2043 aclass = ALLOCNO_CLASS (a);
2044 ira_assert (aclass == ALLOCNO_CLASS (parent_a));
2045 ira_allocate_and_accumulate_costs
2046 (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
2047 ALLOCNO_HARD_REG_COSTS (a));
2048 ira_allocate_and_accumulate_costs
2049 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
2050 aclass,
2051 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
2052 ALLOCNO_CLASS_COST (parent_a)
2053 += ALLOCNO_CLASS_COST (a);
2054 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
2058 /* Create allocnos corresponding to pseudo-registers in the current
2059 function. Traverse the loop tree for this. */
2060 static void
2061 create_allocnos (void)
2063 /* We need to process BB first to correctly link allocnos by member
2064 next_regno_allocno. */
2065 ira_traverse_loop_tree (true, ira_loop_tree_root,
2066 create_loop_tree_node_allocnos, NULL);
2067 if (optimize)
2068 ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
2069 propagate_modified_regnos);
2074 /* The page contains function to remove some regions from a separate
2075 register allocation. We remove regions whose separate allocation
2076 will hardly improve the result. As a result we speed up regional
2077 register allocation. */
2079 /* The function changes the object in range list given by R to OBJ. */
2080 static void
2081 change_object_in_range_list (live_range_t r, ira_object_t obj)
2083 for (; r != NULL; r = r->next)
2084 r->object = obj;
2087 /* Move all live ranges associated with allocno FROM to allocno TO. */
2088 static void
2089 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2091 int i;
2092 int n = ALLOCNO_NUM_OBJECTS (from);
2094 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2096 for (i = 0; i < n; i++)
2098 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2099 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2100 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2102 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2104 fprintf (ira_dump_file,
2105 " Moving ranges of a%dr%d to a%dr%d: ",
2106 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2107 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2108 ira_print_live_range_list (ira_dump_file, lr);
2110 change_object_in_range_list (lr, to_obj);
2111 OBJECT_LIVE_RANGES (to_obj)
2112 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2113 OBJECT_LIVE_RANGES (from_obj) = NULL;
2117 static void
2118 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2120 int i;
2121 int n = ALLOCNO_NUM_OBJECTS (from);
2123 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2125 for (i = 0; i < n; i++)
2127 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2128 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2129 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2131 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2133 fprintf (ira_dump_file, " Copying ranges of a%dr%d to a%dr%d: ",
2134 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2135 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2136 ira_print_live_range_list (ira_dump_file, lr);
2138 lr = ira_copy_live_range_list (lr);
2139 change_object_in_range_list (lr, to_obj);
2140 OBJECT_LIVE_RANGES (to_obj)
2141 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2145 /* Return TRUE if NODE represents a loop with low register
2146 pressure. */
2147 static bool
2148 low_pressure_loop_node_p (ira_loop_tree_node_t node)
2150 int i;
2151 enum reg_class pclass;
2153 if (node->bb != NULL)
2154 return false;
2156 for (i = 0; i < ira_pressure_classes_num; i++)
2158 pclass = ira_pressure_classes[i];
2159 if (node->reg_pressure[pclass] > ira_class_hard_regs_num[pclass]
2160 && ira_class_hard_regs_num[pclass] > 1)
2161 return false;
2163 return true;
2166 #ifdef STACK_REGS
2167 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2168 form a region from such loop if the target use stack register
2169 because reg-stack.c can not deal with such edges. */
2170 static bool
2171 loop_with_complex_edge_p (struct loop *loop)
2173 int i;
2174 edge_iterator ei;
2175 edge e;
2176 vec<edge> edges;
2177 bool res;
2179 FOR_EACH_EDGE (e, ei, loop->header->preds)
2180 if (e->flags & EDGE_EH)
2181 return true;
2182 edges = get_loop_exit_edges (loop);
2183 res = false;
2184 FOR_EACH_VEC_ELT (edges, i, e)
2185 if (e->flags & EDGE_COMPLEX)
2187 res = true;
2188 break;
2190 edges.release ();
2191 return res;
2193 #endif
2195 /* Sort loops for marking them for removal. We put already marked
2196 loops first, then less frequent loops next, and then outer loops
2197 next. */
2198 static int
2199 loop_compare_func (const void *v1p, const void *v2p)
2201 int diff;
2202 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
2203 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
2205 ira_assert (l1->parent != NULL && l2->parent != NULL);
2206 if (l1->to_remove_p && ! l2->to_remove_p)
2207 return -1;
2208 if (! l1->to_remove_p && l2->to_remove_p)
2209 return 1;
2210 if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
2211 return diff;
2212 if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
2213 return diff;
2214 /* Make sorting stable. */
2215 return l1->loop_num - l2->loop_num;
2218 /* Mark loops which should be removed from regional allocation. We
2219 remove a loop with low register pressure inside another loop with
2220 register pressure. In this case a separate allocation of the loop
2221 hardly helps (for irregular register file architecture it could
2222 help by choosing a better hard register in the loop but we prefer
2223 faster allocation even in this case). We also remove cheap loops
2224 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
2225 exit or enter edges are removed too because the allocation might
2226 require put pseudo moves on the EH edges (we could still do this
2227 for pseudos with caller saved hard registers in some cases but it
2228 is impossible to say here or during top-down allocation pass what
2229 hard register the pseudos get finally). */
2230 static void
2231 mark_loops_for_removal (void)
2233 int i, n;
2234 ira_loop_tree_node_t *sorted_loops;
2235 loop_p loop;
2237 ira_assert (current_loops != NULL);
2238 sorted_loops
2239 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
2240 * number_of_loops (cfun));
2241 for (n = i = 0; vec_safe_iterate (get_loops (cfun), i, &loop); i++)
2242 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2244 if (ira_loop_nodes[i].parent == NULL)
2246 /* Don't remove the root. */
2247 ira_loop_nodes[i].to_remove_p = false;
2248 continue;
2250 sorted_loops[n++] = &ira_loop_nodes[i];
2251 ira_loop_nodes[i].to_remove_p
2252 = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
2253 && low_pressure_loop_node_p (&ira_loop_nodes[i]))
2254 #ifdef STACK_REGS
2255 || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
2256 #endif
2259 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
2260 for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
2262 sorted_loops[i]->to_remove_p = true;
2263 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2264 fprintf
2265 (ira_dump_file,
2266 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2267 sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
2268 sorted_loops[i]->loop->header->frequency,
2269 loop_depth (sorted_loops[i]->loop),
2270 low_pressure_loop_node_p (sorted_loops[i]->parent)
2271 && low_pressure_loop_node_p (sorted_loops[i])
2272 ? "low pressure" : "cheap loop");
2274 ira_free (sorted_loops);
2277 /* Mark all loops but root for removing. */
2278 static void
2279 mark_all_loops_for_removal (void)
2281 int i;
2282 loop_p loop;
2284 ira_assert (current_loops != NULL);
2285 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
2286 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2288 if (ira_loop_nodes[i].parent == NULL)
2290 /* Don't remove the root. */
2291 ira_loop_nodes[i].to_remove_p = false;
2292 continue;
2294 ira_loop_nodes[i].to_remove_p = true;
2295 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2296 fprintf
2297 (ira_dump_file,
2298 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2299 ira_loop_nodes[i].loop_num,
2300 ira_loop_nodes[i].loop->header->index,
2301 ira_loop_nodes[i].loop->header->frequency,
2302 loop_depth (ira_loop_nodes[i].loop));
2306 /* Definition of vector of loop tree nodes. */
2308 /* Vec containing references to all removed loop tree nodes. */
2309 static vec<ira_loop_tree_node_t> removed_loop_vec;
2311 /* Vec containing references to all children of loop tree nodes. */
2312 static vec<ira_loop_tree_node_t> children_vec;
2314 /* Remove subregions of NODE if their separate allocation will not
2315 improve the result. */
2316 static void
2317 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
2319 unsigned int start;
2320 bool remove_p;
2321 ira_loop_tree_node_t subnode;
2323 remove_p = node->to_remove_p;
2324 if (! remove_p)
2325 children_vec.safe_push (node);
2326 start = children_vec.length ();
2327 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2328 if (subnode->bb == NULL)
2329 remove_uneccesary_loop_nodes_from_loop_tree (subnode);
2330 else
2331 children_vec.safe_push (subnode);
2332 node->children = node->subloops = NULL;
2333 if (remove_p)
2335 removed_loop_vec.safe_push (node);
2336 return;
2338 while (children_vec.length () > start)
2340 subnode = children_vec.pop ();
2341 subnode->parent = node;
2342 subnode->next = node->children;
2343 node->children = subnode;
2344 if (subnode->bb == NULL)
2346 subnode->subloop_next = node->subloops;
2347 node->subloops = subnode;
2352 /* Return TRUE if NODE is inside PARENT. */
2353 static bool
2354 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2356 for (node = node->parent; node != NULL; node = node->parent)
2357 if (node == parent)
2358 return true;
2359 return false;
2362 /* Sort allocnos according to their order in regno allocno list. */
2363 static int
2364 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2366 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2367 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2368 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2369 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2371 if (loop_is_inside_p (n1, n2))
2372 return -1;
2373 else if (loop_is_inside_p (n2, n1))
2374 return 1;
2375 /* If allocnos are equally good, sort by allocno numbers, so that
2376 the results of qsort leave nothing to chance. We put allocnos
2377 with higher number first in the list because it is the original
2378 order for allocnos from loops on the same levels. */
2379 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2382 /* This array is used to sort allocnos to restore allocno order in
2383 the regno allocno list. */
2384 static ira_allocno_t *regno_allocnos;
2386 /* Restore allocno order for REGNO in the regno allocno list. */
2387 static void
2388 ira_rebuild_regno_allocno_list (int regno)
2390 int i, n;
2391 ira_allocno_t a;
2393 for (n = 0, a = ira_regno_allocno_map[regno];
2394 a != NULL;
2395 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2396 regno_allocnos[n++] = a;
2397 ira_assert (n > 0);
2398 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2399 regno_allocno_order_compare_func);
2400 for (i = 1; i < n; i++)
2401 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2402 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2403 ira_regno_allocno_map[regno] = regno_allocnos[0];
2404 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2405 fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2408 /* Propagate info from allocno FROM_A to allocno A. */
2409 static void
2410 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2412 enum reg_class aclass;
2414 merge_hard_reg_conflicts (from_a, a, false);
2415 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2416 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2417 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2418 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2419 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
2420 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
2421 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
2422 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a));
2424 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2425 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2426 if (! ALLOCNO_BAD_SPILL_P (from_a))
2427 ALLOCNO_BAD_SPILL_P (a) = false;
2428 aclass = ALLOCNO_CLASS (from_a);
2429 ira_assert (aclass == ALLOCNO_CLASS (a));
2430 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2431 ALLOCNO_HARD_REG_COSTS (from_a));
2432 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2433 aclass,
2434 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2435 ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2436 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2439 /* Remove allocnos from loops removed from the allocation
2440 consideration. */
2441 static void
2442 remove_unnecessary_allocnos (void)
2444 int regno;
2445 bool merged_p, rebuild_p;
2446 ira_allocno_t a, prev_a, next_a, parent_a;
2447 ira_loop_tree_node_t a_node, parent;
2449 merged_p = false;
2450 regno_allocnos = NULL;
2451 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2453 rebuild_p = false;
2454 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2455 a != NULL;
2456 a = next_a)
2458 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2459 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2460 if (! a_node->to_remove_p)
2461 prev_a = a;
2462 else
2464 for (parent = a_node->parent;
2465 (parent_a = parent->regno_allocno_map[regno]) == NULL
2466 && parent->to_remove_p;
2467 parent = parent->parent)
2469 if (parent_a == NULL)
2471 /* There are no allocnos with the same regno in
2472 upper region -- just move the allocno to the
2473 upper region. */
2474 prev_a = a;
2475 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2476 parent->regno_allocno_map[regno] = a;
2477 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2478 rebuild_p = true;
2480 else
2482 /* Remove the allocno and update info of allocno in
2483 the upper region. */
2484 if (prev_a == NULL)
2485 ira_regno_allocno_map[regno] = next_a;
2486 else
2487 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2488 move_allocno_live_ranges (a, parent_a);
2489 merged_p = true;
2490 propagate_some_info_from_allocno (parent_a, a);
2491 /* Remove it from the corresponding regno allocno
2492 map to avoid info propagation of subsequent
2493 allocno into this already removed allocno. */
2494 a_node->regno_allocno_map[regno] = NULL;
2495 ira_remove_allocno_prefs (a);
2496 finish_allocno (a);
2500 if (rebuild_p)
2501 /* We need to restore the order in regno allocno list. */
2503 if (regno_allocnos == NULL)
2504 regno_allocnos
2505 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2506 * ira_allocnos_num);
2507 ira_rebuild_regno_allocno_list (regno);
2510 if (merged_p)
2511 ira_rebuild_start_finish_chains ();
2512 if (regno_allocnos != NULL)
2513 ira_free (regno_allocnos);
2516 /* Remove allocnos from all loops but the root. */
2517 static void
2518 remove_low_level_allocnos (void)
2520 int regno;
2521 bool merged_p, propagate_p;
2522 ira_allocno_t a, top_a;
2523 ira_loop_tree_node_t a_node, parent;
2524 ira_allocno_iterator ai;
2526 merged_p = false;
2527 FOR_EACH_ALLOCNO (a, ai)
2529 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2530 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2531 continue;
2532 regno = ALLOCNO_REGNO (a);
2533 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2535 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2536 ira_loop_tree_root->regno_allocno_map[regno] = a;
2537 continue;
2539 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2540 /* Remove the allocno and update info of allocno in the upper
2541 region. */
2542 move_allocno_live_ranges (a, top_a);
2543 merged_p = true;
2544 if (propagate_p)
2545 propagate_some_info_from_allocno (top_a, a);
2547 FOR_EACH_ALLOCNO (a, ai)
2549 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2550 if (a_node == ira_loop_tree_root)
2551 continue;
2552 parent = a_node->parent;
2553 regno = ALLOCNO_REGNO (a);
2554 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2555 ira_assert (ALLOCNO_CAP (a) != NULL);
2556 else if (ALLOCNO_CAP (a) == NULL)
2557 ira_assert (parent->regno_allocno_map[regno] != NULL);
2559 FOR_EACH_ALLOCNO (a, ai)
2561 regno = ALLOCNO_REGNO (a);
2562 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2564 ira_object_t obj;
2565 ira_allocno_object_iterator oi;
2567 ira_regno_allocno_map[regno] = a;
2568 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2569 ALLOCNO_CAP_MEMBER (a) = NULL;
2570 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2571 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2572 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2573 #ifdef STACK_REGS
2574 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2575 ALLOCNO_NO_STACK_REG_P (a) = true;
2576 #endif
2578 else
2580 ira_remove_allocno_prefs (a);
2581 finish_allocno (a);
2584 if (merged_p)
2585 ira_rebuild_start_finish_chains ();
2588 /* Remove loops from consideration. We remove all loops except for
2589 root if ALL_P or loops for which a separate allocation will not
2590 improve the result. We have to do this after allocno creation and
2591 their costs and allocno class evaluation because only after that
2592 the register pressure can be known and is calculated. */
2593 static void
2594 remove_unnecessary_regions (bool all_p)
2596 if (current_loops == NULL)
2597 return;
2598 if (all_p)
2599 mark_all_loops_for_removal ();
2600 else
2601 mark_loops_for_removal ();
2602 children_vec.create (last_basic_block_for_fn (cfun)
2603 + number_of_loops (cfun));
2604 removed_loop_vec.create (last_basic_block_for_fn (cfun)
2605 + number_of_loops (cfun));
2606 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
2607 children_vec.release ();
2608 if (all_p)
2609 remove_low_level_allocnos ();
2610 else
2611 remove_unnecessary_allocnos ();
2612 while (removed_loop_vec.length () > 0)
2613 finish_loop_tree_node (removed_loop_vec.pop ());
2614 removed_loop_vec.release ();
2619 /* At this point true value of allocno attribute bad_spill_p means
2620 that there is an insn where allocno occurs and where the allocno
2621 can not be used as memory. The function updates the attribute, now
2622 it can be true only for allocnos which can not be used as memory in
2623 an insn and in whose live ranges there is other allocno deaths.
2624 Spilling allocnos with true value will not improve the code because
2625 it will not make other allocnos colorable and additional reloads
2626 for the corresponding pseudo will be generated in reload pass for
2627 each insn it occurs.
2629 This is a trick mentioned in one classic article of Chaitin etc
2630 which is frequently omitted in other implementations of RA based on
2631 graph coloring. */
2632 static void
2633 update_bad_spill_attribute (void)
2635 int i;
2636 ira_allocno_t a;
2637 ira_allocno_iterator ai;
2638 ira_allocno_object_iterator aoi;
2639 ira_object_t obj;
2640 live_range_t r;
2641 enum reg_class aclass;
2642 bitmap_head dead_points[N_REG_CLASSES];
2644 for (i = 0; i < ira_allocno_classes_num; i++)
2646 aclass = ira_allocno_classes[i];
2647 bitmap_initialize (&dead_points[aclass], &reg_obstack);
2649 FOR_EACH_ALLOCNO (a, ai)
2651 aclass = ALLOCNO_CLASS (a);
2652 if (aclass == NO_REGS)
2653 continue;
2654 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2655 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2656 bitmap_set_bit (&dead_points[aclass], r->finish);
2658 FOR_EACH_ALLOCNO (a, ai)
2660 aclass = ALLOCNO_CLASS (a);
2661 if (aclass == NO_REGS)
2662 continue;
2663 if (! ALLOCNO_BAD_SPILL_P (a))
2664 continue;
2665 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2667 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2669 for (i = r->start + 1; i < r->finish; i++)
2670 if (bitmap_bit_p (&dead_points[aclass], i))
2671 break;
2672 if (i < r->finish)
2673 break;
2675 if (r != NULL)
2677 ALLOCNO_BAD_SPILL_P (a) = false;
2678 break;
2682 for (i = 0; i < ira_allocno_classes_num; i++)
2684 aclass = ira_allocno_classes[i];
2685 bitmap_clear (&dead_points[aclass]);
2691 /* Set up minimal and maximal live range points for allocnos. */
2692 static void
2693 setup_min_max_allocno_live_range_point (void)
2695 int i;
2696 ira_allocno_t a, parent_a, cap;
2697 ira_allocno_iterator ai;
2698 #ifdef ENABLE_IRA_CHECKING
2699 ira_object_iterator oi;
2700 ira_object_t obj;
2701 #endif
2702 live_range_t r;
2703 ira_loop_tree_node_t parent;
2705 FOR_EACH_ALLOCNO (a, ai)
2707 int n = ALLOCNO_NUM_OBJECTS (a);
2709 for (i = 0; i < n; i++)
2711 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2712 r = OBJECT_LIVE_RANGES (obj);
2713 if (r == NULL)
2714 continue;
2715 OBJECT_MAX (obj) = r->finish;
2716 for (; r->next != NULL; r = r->next)
2718 OBJECT_MIN (obj) = r->start;
2721 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2722 for (a = ira_regno_allocno_map[i];
2723 a != NULL;
2724 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2726 int j;
2727 int n = ALLOCNO_NUM_OBJECTS (a);
2729 for (j = 0; j < n; j++)
2731 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2732 ira_object_t parent_obj;
2734 if (OBJECT_MAX (obj) < 0)
2735 continue;
2736 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2737 /* Accumulation of range info. */
2738 if (ALLOCNO_CAP (a) != NULL)
2740 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2742 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2743 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2744 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2745 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2746 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2748 continue;
2750 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2751 continue;
2752 parent_a = parent->regno_allocno_map[i];
2753 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2754 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2755 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2756 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2757 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2760 #ifdef ENABLE_IRA_CHECKING
2761 FOR_EACH_OBJECT (obj, oi)
2763 if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2764 && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2765 continue;
2766 gcc_unreachable ();
2768 #endif
2771 /* Sort allocnos according to their live ranges. Allocnos with
2772 smaller allocno class are put first unless we use priority
2773 coloring. Allocnos with the same class are ordered according
2774 their start (min). Allocnos with the same start are ordered
2775 according their finish (max). */
2776 static int
2777 object_range_compare_func (const void *v1p, const void *v2p)
2779 int diff;
2780 ira_object_t obj1 = *(const ira_object_t *) v1p;
2781 ira_object_t obj2 = *(const ira_object_t *) v2p;
2782 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2783 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2785 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2786 return diff;
2787 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2788 return diff;
2789 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2792 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2793 static void
2794 sort_conflict_id_map (void)
2796 int i, num;
2797 ira_allocno_t a;
2798 ira_allocno_iterator ai;
2800 num = 0;
2801 FOR_EACH_ALLOCNO (a, ai)
2803 ira_allocno_object_iterator oi;
2804 ira_object_t obj;
2806 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2807 ira_object_id_map[num++] = obj;
2809 if (num > 1)
2810 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2811 object_range_compare_func);
2812 for (i = 0; i < num; i++)
2814 ira_object_t obj = ira_object_id_map[i];
2816 gcc_assert (obj != NULL);
2817 OBJECT_CONFLICT_ID (obj) = i;
2819 for (i = num; i < ira_objects_num; i++)
2820 ira_object_id_map[i] = NULL;
2823 /* Set up minimal and maximal conflict ids of allocnos with which
2824 given allocno can conflict. */
2825 static void
2826 setup_min_max_conflict_allocno_ids (void)
2828 int aclass;
2829 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2830 int *live_range_min, *last_lived;
2831 int word0_min, word0_max;
2832 ira_allocno_t a;
2833 ira_allocno_iterator ai;
2835 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2836 aclass = -1;
2837 first_not_finished = -1;
2838 for (i = 0; i < ira_objects_num; i++)
2840 ira_object_t obj = ira_object_id_map[i];
2842 if (obj == NULL)
2843 continue;
2845 a = OBJECT_ALLOCNO (obj);
2847 if (aclass < 0)
2849 aclass = ALLOCNO_CLASS (a);
2850 min = i;
2851 first_not_finished = i;
2853 else
2855 start = OBJECT_MIN (obj);
2856 /* If we skip an allocno, the allocno with smaller ids will
2857 be also skipped because of the secondary sorting the
2858 range finishes (see function
2859 object_range_compare_func). */
2860 while (first_not_finished < i
2861 && start > OBJECT_MAX (ira_object_id_map
2862 [first_not_finished]))
2863 first_not_finished++;
2864 min = first_not_finished;
2866 if (min == i)
2867 /* We could increase min further in this case but it is good
2868 enough. */
2869 min++;
2870 live_range_min[i] = OBJECT_MIN (obj);
2871 OBJECT_MIN (obj) = min;
2873 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2874 aclass = -1;
2875 filled_area_start = -1;
2876 for (i = ira_objects_num - 1; i >= 0; i--)
2878 ira_object_t obj = ira_object_id_map[i];
2880 if (obj == NULL)
2881 continue;
2883 a = OBJECT_ALLOCNO (obj);
2884 if (aclass < 0)
2886 aclass = ALLOCNO_CLASS (a);
2887 for (j = 0; j < ira_max_point; j++)
2888 last_lived[j] = -1;
2889 filled_area_start = ira_max_point;
2891 min = live_range_min[i];
2892 finish = OBJECT_MAX (obj);
2893 max = last_lived[finish];
2894 if (max < 0)
2895 /* We could decrease max further in this case but it is good
2896 enough. */
2897 max = OBJECT_CONFLICT_ID (obj) - 1;
2898 OBJECT_MAX (obj) = max;
2899 /* In filling, we can go further A range finish to recognize
2900 intersection quickly because if the finish of subsequently
2901 processed allocno (it has smaller conflict id) range is
2902 further A range finish than they are definitely intersected
2903 (the reason for this is the allocnos with bigger conflict id
2904 have their range starts not smaller than allocnos with
2905 smaller ids. */
2906 for (j = min; j < filled_area_start; j++)
2907 last_lived[j] = i;
2908 filled_area_start = min;
2910 ira_free (last_lived);
2911 ira_free (live_range_min);
2913 /* For allocnos with more than one object, we may later record extra conflicts in
2914 subobject 0 that we cannot really know about here.
2915 For now, simply widen the min/max range of these subobjects. */
2917 word0_min = INT_MAX;
2918 word0_max = INT_MIN;
2920 FOR_EACH_ALLOCNO (a, ai)
2922 int n = ALLOCNO_NUM_OBJECTS (a);
2923 ira_object_t obj0;
2925 if (n < 2)
2926 continue;
2927 obj0 = ALLOCNO_OBJECT (a, 0);
2928 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2929 word0_min = OBJECT_CONFLICT_ID (obj0);
2930 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2931 word0_max = OBJECT_CONFLICT_ID (obj0);
2933 FOR_EACH_ALLOCNO (a, ai)
2935 int n = ALLOCNO_NUM_OBJECTS (a);
2936 ira_object_t obj0;
2938 if (n < 2)
2939 continue;
2940 obj0 = ALLOCNO_OBJECT (a, 0);
2941 if (OBJECT_MIN (obj0) > word0_min)
2942 OBJECT_MIN (obj0) = word0_min;
2943 if (OBJECT_MAX (obj0) < word0_max)
2944 OBJECT_MAX (obj0) = word0_max;
2950 static void
2951 create_caps (void)
2953 ira_allocno_t a;
2954 ira_allocno_iterator ai;
2955 ira_loop_tree_node_t loop_tree_node;
2957 FOR_EACH_ALLOCNO (a, ai)
2959 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2960 continue;
2961 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2962 create_cap_allocno (a);
2963 else if (ALLOCNO_CAP (a) == NULL)
2965 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2966 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2967 create_cap_allocno (a);
2974 /* The page contains code transforming more one region internal
2975 representation (IR) to one region IR which is necessary for reload.
2976 This transformation is called IR flattening. We might just rebuild
2977 the IR for one region but we don't do it because it takes a lot of
2978 time. */
2980 /* Map: regno -> allocnos which will finally represent the regno for
2981 IR with one region. */
2982 static ira_allocno_t *regno_top_level_allocno_map;
2984 /* Find the allocno that corresponds to A at a level one higher up in the
2985 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2986 ira_allocno_t
2987 ira_parent_allocno (ira_allocno_t a)
2989 ira_loop_tree_node_t parent;
2991 if (ALLOCNO_CAP (a) != NULL)
2992 return NULL;
2994 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2995 if (parent == NULL)
2996 return NULL;
2998 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
3001 /* Find the allocno that corresponds to A at a level one higher up in the
3002 loop tree. If ALLOCNO_CAP is set for A, return that. */
3003 ira_allocno_t
3004 ira_parent_or_cap_allocno (ira_allocno_t a)
3006 if (ALLOCNO_CAP (a) != NULL)
3007 return ALLOCNO_CAP (a);
3009 return ira_parent_allocno (a);
3012 /* Process all allocnos originated from pseudo REGNO and copy live
3013 ranges, hard reg conflicts, and allocno stack reg attributes from
3014 low level allocnos to final allocnos which are destinations of
3015 removed stores at a loop exit. Return true if we copied live
3016 ranges. */
3017 static bool
3018 copy_info_to_removed_store_destinations (int regno)
3020 ira_allocno_t a;
3021 ira_allocno_t parent_a = NULL;
3022 ira_loop_tree_node_t parent;
3023 bool merged_p;
3025 merged_p = false;
3026 for (a = ira_regno_allocno_map[regno];
3027 a != NULL;
3028 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3030 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
3031 /* This allocno will be removed. */
3032 continue;
3034 /* Caps will be removed. */
3035 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3036 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3037 parent != NULL;
3038 parent = parent->parent)
3039 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
3040 || (parent_a
3041 == regno_top_level_allocno_map[REGNO
3042 (allocno_emit_reg (parent_a))]
3043 && ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
3044 break;
3045 if (parent == NULL || parent_a == NULL)
3046 continue;
3048 copy_allocno_live_ranges (a, parent_a);
3049 merge_hard_reg_conflicts (a, parent_a, true);
3051 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
3052 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3053 += ALLOCNO_CALLS_CROSSED_NUM (a);
3054 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3055 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3056 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
3057 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
3058 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3059 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3060 merged_p = true;
3062 return merged_p;
3065 /* Flatten the IR. In other words, this function transforms IR as if
3066 it were built with one region (without loops). We could make it
3067 much simpler by rebuilding IR with one region, but unfortunately it
3068 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3069 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3070 IRA_MAX_POINT before emitting insns on the loop borders. */
3071 void
3072 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
3074 int i, j;
3075 bool keep_p;
3076 int hard_regs_num;
3077 bool new_pseudos_p, merged_p, mem_dest_p;
3078 unsigned int n;
3079 enum reg_class aclass;
3080 ira_allocno_t a, parent_a, first, second, node_first, node_second;
3081 ira_copy_t cp;
3082 ira_loop_tree_node_t node;
3083 live_range_t r;
3084 ira_allocno_iterator ai;
3085 ira_copy_iterator ci;
3087 regno_top_level_allocno_map
3088 = (ira_allocno_t *) ira_allocate (max_reg_num ()
3089 * sizeof (ira_allocno_t));
3090 memset (regno_top_level_allocno_map, 0,
3091 max_reg_num () * sizeof (ira_allocno_t));
3092 new_pseudos_p = merged_p = false;
3093 FOR_EACH_ALLOCNO (a, ai)
3095 ira_allocno_object_iterator oi;
3096 ira_object_t obj;
3098 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3099 /* Caps are not in the regno allocno maps and they are never
3100 will be transformed into allocnos existing after IR
3101 flattening. */
3102 continue;
3103 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3104 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
3105 OBJECT_CONFLICT_HARD_REGS (obj));
3106 #ifdef STACK_REGS
3107 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
3108 #endif
3110 /* Fix final allocno attributes. */
3111 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
3113 mem_dest_p = false;
3114 for (a = ira_regno_allocno_map[i];
3115 a != NULL;
3116 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3118 ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
3120 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3121 if (data->somewhere_renamed_p)
3122 new_pseudos_p = true;
3123 parent_a = ira_parent_allocno (a);
3124 if (parent_a == NULL)
3126 ALLOCNO_COPIES (a) = NULL;
3127 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3128 continue;
3130 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
3132 if (data->mem_optimized_dest != NULL)
3133 mem_dest_p = true;
3134 parent_data = ALLOCNO_EMIT_DATA (parent_a);
3135 if (REGNO (data->reg) == REGNO (parent_data->reg))
3137 merge_hard_reg_conflicts (a, parent_a, true);
3138 move_allocno_live_ranges (a, parent_a);
3139 merged_p = true;
3140 parent_data->mem_optimized_dest_p
3141 = (parent_data->mem_optimized_dest_p
3142 || data->mem_optimized_dest_p);
3143 continue;
3145 new_pseudos_p = true;
3146 for (;;)
3148 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
3149 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
3150 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
3151 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3152 -= ALLOCNO_CALLS_CROSSED_NUM (a);
3153 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3154 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3155 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3156 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3157 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
3158 && ALLOCNO_NREFS (parent_a) >= 0
3159 && ALLOCNO_FREQ (parent_a) >= 0);
3160 aclass = ALLOCNO_CLASS (parent_a);
3161 hard_regs_num = ira_class_hard_regs_num[aclass];
3162 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
3163 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
3164 for (j = 0; j < hard_regs_num; j++)
3165 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
3166 -= ALLOCNO_HARD_REG_COSTS (a)[j];
3167 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
3168 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
3169 for (j = 0; j < hard_regs_num; j++)
3170 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
3171 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
3172 ALLOCNO_CLASS_COST (parent_a)
3173 -= ALLOCNO_CLASS_COST (a);
3174 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
3175 parent_a = ira_parent_allocno (parent_a);
3176 if (parent_a == NULL)
3177 break;
3179 ALLOCNO_COPIES (a) = NULL;
3180 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3182 if (mem_dest_p && copy_info_to_removed_store_destinations (i))
3183 merged_p = true;
3185 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
3186 if (merged_p || ira_max_point_before_emit != ira_max_point)
3187 ira_rebuild_start_finish_chains ();
3188 if (new_pseudos_p)
3190 sparseset objects_live;
3192 /* Rebuild conflicts. */
3193 FOR_EACH_ALLOCNO (a, ai)
3195 ira_allocno_object_iterator oi;
3196 ira_object_t obj;
3198 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3199 || ALLOCNO_CAP_MEMBER (a) != NULL)
3200 continue;
3201 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3203 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3204 ira_assert (r->object == obj);
3205 clear_conflicts (obj);
3208 objects_live = sparseset_alloc (ira_objects_num);
3209 for (i = 0; i < ira_max_point; i++)
3211 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
3213 ira_object_t obj = r->object;
3215 a = OBJECT_ALLOCNO (obj);
3216 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3217 || ALLOCNO_CAP_MEMBER (a) != NULL)
3218 continue;
3220 aclass = ALLOCNO_CLASS (a);
3221 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
3223 ira_object_t live_obj = ira_object_id_map[n];
3224 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
3225 enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
3227 if (ira_reg_classes_intersect_p[aclass][live_aclass]
3228 /* Don't set up conflict for the allocno with itself. */
3229 && live_a != a)
3230 ira_add_conflict (obj, live_obj);
3232 sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
3235 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
3236 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
3238 sparseset_free (objects_live);
3239 compress_conflict_vecs ();
3241 /* Mark some copies for removing and change allocnos in the rest
3242 copies. */
3243 FOR_EACH_COPY (cp, ci)
3245 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
3246 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
3248 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3249 fprintf
3250 (ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3251 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
3252 ALLOCNO_NUM (cp->first),
3253 REGNO (allocno_emit_reg (cp->first)),
3254 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
3255 ALLOCNO_NUM (cp->second),
3256 REGNO (allocno_emit_reg (cp->second)));
3257 cp->loop_tree_node = NULL;
3258 continue;
3260 first
3261 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
3262 second
3263 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
3264 node = cp->loop_tree_node;
3265 if (node == NULL)
3266 keep_p = true; /* It copy generated in ira-emit.c. */
3267 else
3269 /* Check that the copy was not propagated from level on
3270 which we will have different pseudos. */
3271 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
3272 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
3273 keep_p = ((REGNO (allocno_emit_reg (first))
3274 == REGNO (allocno_emit_reg (node_first)))
3275 && (REGNO (allocno_emit_reg (second))
3276 == REGNO (allocno_emit_reg (node_second))));
3278 if (keep_p)
3280 cp->loop_tree_node = ira_loop_tree_root;
3281 cp->first = first;
3282 cp->second = second;
3284 else
3286 cp->loop_tree_node = NULL;
3287 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3288 fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
3289 cp->num, ALLOCNO_NUM (cp->first),
3290 REGNO (allocno_emit_reg (cp->first)),
3291 ALLOCNO_NUM (cp->second),
3292 REGNO (allocno_emit_reg (cp->second)));
3295 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3296 FOR_EACH_ALLOCNO (a, ai)
3298 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3299 || ALLOCNO_CAP_MEMBER (a) != NULL)
3301 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3302 fprintf (ira_dump_file, " Remove a%dr%d\n",
3303 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
3304 ira_remove_allocno_prefs (a);
3305 finish_allocno (a);
3306 continue;
3308 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
3309 ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
3310 ALLOCNO_CAP (a) = NULL;
3311 /* Restore updated costs for assignments from reload. */
3312 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3313 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
3314 if (! ALLOCNO_ASSIGNED_P (a))
3315 ira_free_allocno_updated_costs (a);
3316 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3317 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3319 /* Remove unnecessary copies. */
3320 FOR_EACH_COPY (cp, ci)
3322 if (cp->loop_tree_node == NULL)
3324 ira_copies[cp->num] = NULL;
3325 finish_copy (cp);
3326 continue;
3328 ira_assert
3329 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
3330 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
3331 add_allocno_copy_to_list (cp);
3332 swap_allocno_copy_ends_if_necessary (cp);
3334 rebuild_regno_allocno_maps ();
3335 if (ira_max_point != ira_max_point_before_emit)
3336 ira_compress_allocno_live_ranges ();
3337 ira_free (regno_top_level_allocno_map);
3342 #ifdef ENABLE_IRA_CHECKING
3343 /* Check creation of all allocnos. Allocnos on lower levels should
3344 have allocnos or caps on all upper levels. */
3345 static void
3346 check_allocno_creation (void)
3348 ira_allocno_t a;
3349 ira_allocno_iterator ai;
3350 ira_loop_tree_node_t loop_tree_node;
3352 FOR_EACH_ALLOCNO (a, ai)
3354 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3355 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3356 ALLOCNO_NUM (a)));
3357 if (loop_tree_node == ira_loop_tree_root)
3358 continue;
3359 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3360 ira_assert (ALLOCNO_CAP (a) != NULL);
3361 else if (ALLOCNO_CAP (a) == NULL)
3362 ira_assert (loop_tree_node->parent
3363 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3364 && bitmap_bit_p (loop_tree_node->border_allocnos,
3365 ALLOCNO_NUM (a)));
3368 #endif
3370 /* Identify allocnos which prefer a register class with a single hard register.
3371 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3372 less likely to use the preferred singleton register. */
3373 static void
3374 update_conflict_hard_reg_costs (void)
3376 ira_allocno_t a;
3377 ira_allocno_iterator ai;
3378 int i, index, min;
3380 FOR_EACH_ALLOCNO (a, ai)
3382 reg_class_t aclass = ALLOCNO_CLASS (a);
3383 reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3384 int singleton = ira_class_singleton[pref][ALLOCNO_MODE (a)];
3385 if (singleton < 0)
3386 continue;
3387 index = ira_class_hard_reg_index[(int) aclass][singleton];
3388 if (index < 0)
3389 continue;
3390 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3391 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3392 continue;
3393 min = INT_MAX;
3394 for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3395 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3396 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3397 min = ALLOCNO_HARD_REG_COSTS (a)[i];
3398 if (min == INT_MAX)
3399 continue;
3400 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3401 aclass, 0);
3402 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3403 -= min - ALLOCNO_CLASS_COST (a);
3407 /* Create a internal representation (IR) for IRA (allocnos, copies,
3408 loop tree nodes). The function returns TRUE if we generate loop
3409 structure (besides nodes representing all function and the basic
3410 blocks) for regional allocation. A true return means that we
3411 really need to flatten IR before the reload. */
3412 bool
3413 ira_build (void)
3415 bool loops_p;
3417 df_analyze ();
3418 initiate_cost_vectors ();
3419 initiate_allocnos ();
3420 initiate_prefs ();
3421 initiate_copies ();
3422 create_loop_tree_nodes ();
3423 form_loop_tree ();
3424 create_allocnos ();
3425 ira_costs ();
3426 create_allocno_objects ();
3427 ira_create_allocno_live_ranges ();
3428 remove_unnecessary_regions (false);
3429 ira_compress_allocno_live_ranges ();
3430 update_bad_spill_attribute ();
3431 loops_p = more_one_region_p ();
3432 if (loops_p)
3434 propagate_allocno_info ();
3435 create_caps ();
3437 ira_tune_allocno_costs ();
3438 #ifdef ENABLE_IRA_CHECKING
3439 check_allocno_creation ();
3440 #endif
3441 setup_min_max_allocno_live_range_point ();
3442 sort_conflict_id_map ();
3443 setup_min_max_conflict_allocno_ids ();
3444 ira_build_conflicts ();
3445 update_conflict_hard_reg_costs ();
3446 if (! ira_conflicts_p)
3448 ira_allocno_t a;
3449 ira_allocno_iterator ai;
3451 /* Remove all regions but root one. */
3452 if (loops_p)
3454 remove_unnecessary_regions (true);
3455 loops_p = false;
3457 /* We don't save hard registers around calls for fast allocation
3458 -- add caller clobbered registers as conflicting ones to
3459 allocno crossing calls. */
3460 FOR_EACH_ALLOCNO (a, ai)
3461 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3462 ior_hard_reg_conflicts (a, &call_used_reg_set);
3464 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3465 print_copies (ira_dump_file);
3466 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3467 print_prefs (ira_dump_file);
3468 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3470 int n, nr, nr_big;
3471 ira_allocno_t a;
3472 live_range_t r;
3473 ira_allocno_iterator ai;
3475 n = 0;
3476 nr = 0;
3477 nr_big = 0;
3478 FOR_EACH_ALLOCNO (a, ai)
3480 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3482 if (nobj > 1)
3483 nr_big++;
3484 for (j = 0; j < nobj; j++)
3486 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3487 n += OBJECT_NUM_CONFLICTS (obj);
3488 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3489 nr++;
3492 fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
3493 current_loops == NULL ? 1 : number_of_loops (cfun),
3494 n_basic_blocks_for_fn (cfun), ira_max_point);
3495 fprintf (ira_dump_file,
3496 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3497 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3499 return loops_p;
3502 /* Release the data created by function ira_build. */
3503 void
3504 ira_destroy (void)
3506 finish_loop_tree_nodes ();
3507 finish_prefs ();
3508 finish_copies ();
3509 finish_allocnos ();
3510 finish_cost_vectors ();
3511 ira_finish_allocno_live_ranges ();