2015-06-11 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / ira-build.c
blobf0d0ed3312cec449925907d0ca98e2da837f6266
1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "target.h"
28 #include "regs.h"
29 #include "flags.h"
30 #include "hard-reg-set.h"
31 #include "predict.h"
32 #include "input.h"
33 #include "function.h"
34 #include "dominance.h"
35 #include "cfg.h"
36 #include "basic-block.h"
37 #include "insn-config.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "params.h"
41 #include "df.h"
42 #include "reload.h"
43 #include "sparseset.h"
44 #include "ira-int.h"
45 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
47 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
48 ira_loop_tree_node_t);
50 /* The root of the loop tree corresponding to the all function. */
51 ira_loop_tree_node_t ira_loop_tree_root;
53 /* Height of the loop tree. */
54 int ira_loop_tree_height;
56 /* All nodes representing basic blocks are referred through the
57 following array. We can not use basic block member `aux' for this
58 because it is used for insertion of insns on edges. */
59 ira_loop_tree_node_t ira_bb_nodes;
61 /* All nodes representing loops are referred through the following
62 array. */
63 ira_loop_tree_node_t ira_loop_nodes;
65 /* And size of the ira_loop_nodes array. */
66 unsigned int ira_loop_nodes_count;
68 /* Map regno -> allocnos with given regno (see comments for
69 allocno member `next_regno_allocno'). */
70 ira_allocno_t *ira_regno_allocno_map;
72 /* Array of references to all allocnos. The order number of the
73 allocno corresponds to the index in the array. Removed allocnos
74 have NULL element value. */
75 ira_allocno_t *ira_allocnos;
77 /* Sizes of the previous array. */
78 int ira_allocnos_num;
80 /* Count of conflict record structures we've created, used when creating
81 a new conflict id. */
82 int ira_objects_num;
84 /* Map a conflict id to its conflict record. */
85 ira_object_t *ira_object_id_map;
87 /* Array of references to all allocno preferences. The order number
88 of the preference corresponds to the index in the array. */
89 ira_pref_t *ira_prefs;
91 /* Size of the previous array. */
92 int ira_prefs_num;
94 /* Array of references to all copies. The order number of the copy
95 corresponds to the index in the array. Removed copies have NULL
96 element value. */
97 ira_copy_t *ira_copies;
99 /* Size of the previous array. */
100 int ira_copies_num;
104 /* LAST_BASIC_BLOCK before generating additional insns because of live
105 range splitting. Emitting insns on a critical edge creates a new
106 basic block. */
107 static int last_basic_block_before_change;
109 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
110 the member loop_num. */
111 static void
112 init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
114 int max_regno = max_reg_num ();
116 node->regno_allocno_map
117 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
118 memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
119 memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
120 node->all_allocnos = ira_allocate_bitmap ();
121 node->modified_regnos = ira_allocate_bitmap ();
122 node->border_allocnos = ira_allocate_bitmap ();
123 node->local_copies = ira_allocate_bitmap ();
124 node->loop_num = loop_num;
125 node->children = NULL;
126 node->subloops = NULL;
130 /* The following function allocates the loop tree nodes. If
131 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
132 the root which corresponds the all function) will be not allocated
133 but nodes will still be allocated for basic blocks. */
134 static void
135 create_loop_tree_nodes (void)
137 unsigned int i, j;
138 bool skip_p;
139 edge_iterator ei;
140 edge e;
141 vec<edge> edges;
142 loop_p loop;
144 ira_bb_nodes
145 = ((struct ira_loop_tree_node *)
146 ira_allocate (sizeof (struct ira_loop_tree_node)
147 * last_basic_block_for_fn (cfun)));
148 last_basic_block_before_change = last_basic_block_for_fn (cfun);
149 for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
151 ira_bb_nodes[i].regno_allocno_map = NULL;
152 memset (ira_bb_nodes[i].reg_pressure, 0,
153 sizeof (ira_bb_nodes[i].reg_pressure));
154 ira_bb_nodes[i].all_allocnos = NULL;
155 ira_bb_nodes[i].modified_regnos = NULL;
156 ira_bb_nodes[i].border_allocnos = NULL;
157 ira_bb_nodes[i].local_copies = NULL;
159 if (current_loops == NULL)
161 ira_loop_nodes_count = 1;
162 ira_loop_nodes = ((struct ira_loop_tree_node *)
163 ira_allocate (sizeof (struct ira_loop_tree_node)));
164 init_loop_tree_node (ira_loop_nodes, 0);
165 return;
167 ira_loop_nodes_count = number_of_loops (cfun);
168 ira_loop_nodes = ((struct ira_loop_tree_node *)
169 ira_allocate (sizeof (struct ira_loop_tree_node)
170 * ira_loop_nodes_count));
171 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
173 if (loop_outer (loop) != NULL)
175 ira_loop_nodes[i].regno_allocno_map = NULL;
176 skip_p = false;
177 FOR_EACH_EDGE (e, ei, loop->header->preds)
178 if (e->src != loop->latch
179 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
181 skip_p = true;
182 break;
184 if (skip_p)
185 continue;
186 edges = get_loop_exit_edges (loop);
187 FOR_EACH_VEC_ELT (edges, j, e)
188 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
190 skip_p = true;
191 break;
193 edges.release ();
194 if (skip_p)
195 continue;
197 init_loop_tree_node (&ira_loop_nodes[i], loop->num);
201 /* The function returns TRUE if there are more one allocation
202 region. */
203 static bool
204 more_one_region_p (void)
206 unsigned int i;
207 loop_p loop;
209 if (current_loops != NULL)
210 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
211 if (ira_loop_nodes[i].regno_allocno_map != NULL
212 && ira_loop_tree_root != &ira_loop_nodes[i])
213 return true;
214 return false;
217 /* Free the loop tree node of a loop. */
218 static void
219 finish_loop_tree_node (ira_loop_tree_node_t loop)
221 if (loop->regno_allocno_map != NULL)
223 ira_assert (loop->bb == NULL);
224 ira_free_bitmap (loop->local_copies);
225 ira_free_bitmap (loop->border_allocnos);
226 ira_free_bitmap (loop->modified_regnos);
227 ira_free_bitmap (loop->all_allocnos);
228 ira_free (loop->regno_allocno_map);
229 loop->regno_allocno_map = NULL;
233 /* Free the loop tree nodes. */
234 static void
235 finish_loop_tree_nodes (void)
237 unsigned int i;
239 for (i = 0; i < ira_loop_nodes_count; i++)
240 finish_loop_tree_node (&ira_loop_nodes[i]);
241 ira_free (ira_loop_nodes);
242 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
244 if (ira_bb_nodes[i].local_copies != NULL)
245 ira_free_bitmap (ira_bb_nodes[i].local_copies);
246 if (ira_bb_nodes[i].border_allocnos != NULL)
247 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
248 if (ira_bb_nodes[i].modified_regnos != NULL)
249 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
250 if (ira_bb_nodes[i].all_allocnos != NULL)
251 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
252 if (ira_bb_nodes[i].regno_allocno_map != NULL)
253 ira_free (ira_bb_nodes[i].regno_allocno_map);
255 ira_free (ira_bb_nodes);
260 /* The following recursive function adds LOOP to the loop tree
261 hierarchy. LOOP is added only once. If LOOP is NULL we adding
262 loop designating the whole function when CFG loops are not
263 built. */
264 static void
265 add_loop_to_tree (struct loop *loop)
267 int loop_num;
268 struct loop *parent;
269 ira_loop_tree_node_t loop_node, parent_node;
271 /* We can not use loop node access macros here because of potential
272 checking and because the nodes are not initialized enough
273 yet. */
274 if (loop != NULL && loop_outer (loop) != NULL)
275 add_loop_to_tree (loop_outer (loop));
276 loop_num = loop != NULL ? loop->num : 0;
277 if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
278 && ira_loop_nodes[loop_num].children == NULL)
280 /* We have not added loop node to the tree yet. */
281 loop_node = &ira_loop_nodes[loop_num];
282 loop_node->loop = loop;
283 loop_node->bb = NULL;
284 if (loop == NULL)
285 parent = NULL;
286 else
288 for (parent = loop_outer (loop);
289 parent != NULL;
290 parent = loop_outer (parent))
291 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
292 break;
294 if (parent == NULL)
296 loop_node->next = NULL;
297 loop_node->subloop_next = NULL;
298 loop_node->parent = NULL;
300 else
302 parent_node = &ira_loop_nodes[parent->num];
303 loop_node->next = parent_node->children;
304 parent_node->children = loop_node;
305 loop_node->subloop_next = parent_node->subloops;
306 parent_node->subloops = loop_node;
307 loop_node->parent = parent_node;
312 /* The following recursive function sets up levels of nodes of the
313 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
314 The function returns maximal value of level in the tree + 1. */
315 static int
316 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
318 int height, max_height;
319 ira_loop_tree_node_t subloop_node;
321 ira_assert (loop_node->bb == NULL);
322 loop_node->level = level;
323 max_height = level + 1;
324 for (subloop_node = loop_node->subloops;
325 subloop_node != NULL;
326 subloop_node = subloop_node->subloop_next)
328 ira_assert (subloop_node->bb == NULL);
329 height = setup_loop_tree_level (subloop_node, level + 1);
330 if (height > max_height)
331 max_height = height;
333 return max_height;
336 /* Create the loop tree. The algorithm is designed to provide correct
337 order of loops (they are ordered by their last loop BB) and basic
338 blocks in the chain formed by member next. */
339 static void
340 form_loop_tree (void)
342 basic_block bb;
343 struct loop *parent;
344 ira_loop_tree_node_t bb_node, loop_node;
346 /* We can not use loop/bb node access macros because of potential
347 checking and because the nodes are not initialized enough
348 yet. */
349 FOR_EACH_BB_FN (bb, cfun)
351 bb_node = &ira_bb_nodes[bb->index];
352 bb_node->bb = bb;
353 bb_node->loop = NULL;
354 bb_node->subloops = NULL;
355 bb_node->children = NULL;
356 bb_node->subloop_next = NULL;
357 bb_node->next = NULL;
358 if (current_loops == NULL)
359 parent = NULL;
360 else
362 for (parent = bb->loop_father;
363 parent != NULL;
364 parent = loop_outer (parent))
365 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
366 break;
368 add_loop_to_tree (parent);
369 loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
370 bb_node->next = loop_node->children;
371 bb_node->parent = loop_node;
372 loop_node->children = bb_node;
374 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
375 ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
376 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
381 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
382 tree nodes. */
383 static void
384 rebuild_regno_allocno_maps (void)
386 unsigned int l;
387 int max_regno, regno;
388 ira_allocno_t a;
389 ira_loop_tree_node_t loop_tree_node;
390 loop_p loop;
391 ira_allocno_iterator ai;
393 ira_assert (current_loops != NULL);
394 max_regno = max_reg_num ();
395 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
396 if (ira_loop_nodes[l].regno_allocno_map != NULL)
398 ira_free (ira_loop_nodes[l].regno_allocno_map);
399 ira_loop_nodes[l].regno_allocno_map
400 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
401 * max_regno);
402 memset (ira_loop_nodes[l].regno_allocno_map, 0,
403 sizeof (ira_allocno_t) * max_regno);
405 ira_free (ira_regno_allocno_map);
406 ira_regno_allocno_map
407 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
408 memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
409 FOR_EACH_ALLOCNO (a, ai)
411 if (ALLOCNO_CAP_MEMBER (a) != NULL)
412 /* Caps are not in the regno allocno maps. */
413 continue;
414 regno = ALLOCNO_REGNO (a);
415 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
416 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
417 ira_regno_allocno_map[regno] = a;
418 if (loop_tree_node->regno_allocno_map[regno] == NULL)
419 /* Remember that we can create temporary allocnos to break
420 cycles in register shuffle. */
421 loop_tree_node->regno_allocno_map[regno] = a;
426 /* Pools for allocnos, allocno live ranges and objects. */
427 static pool_allocator<live_range> live_range_pool ("live ranges", 100);
428 static pool_allocator<ira_allocno> allocno_pool ("allocnos", 100);
429 static pool_allocator<ira_object> object_pool ("objects", 100);
431 /* Vec containing references to all created allocnos. It is a
432 container of array allocnos. */
433 static vec<ira_allocno_t> allocno_vec;
435 /* Vec containing references to all created ira_objects. It is a
436 container of ira_object_id_map. */
437 static vec<ira_object_t> ira_object_id_map_vec;
439 /* Initialize data concerning allocnos. */
440 static void
441 initiate_allocnos (void)
443 allocno_vec.create (max_reg_num () * 2);
444 ira_allocnos = NULL;
445 ira_allocnos_num = 0;
446 ira_objects_num = 0;
447 ira_object_id_map_vec.create (max_reg_num () * 2);
448 ira_object_id_map = NULL;
449 ira_regno_allocno_map
450 = (ira_allocno_t *) ira_allocate (max_reg_num ()
451 * sizeof (ira_allocno_t));
452 memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
455 /* Create and return an object corresponding to a new allocno A. */
456 static ira_object_t
457 ira_create_object (ira_allocno_t a, int subword)
459 enum reg_class aclass = ALLOCNO_CLASS (a);
460 ira_object_t obj = object_pool.allocate ();
462 OBJECT_ALLOCNO (obj) = a;
463 OBJECT_SUBWORD (obj) = subword;
464 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
465 OBJECT_CONFLICT_VEC_P (obj) = false;
466 OBJECT_CONFLICT_ARRAY (obj) = NULL;
467 OBJECT_NUM_CONFLICTS (obj) = 0;
468 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
469 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
470 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
471 reg_class_contents[aclass]);
472 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
473 reg_class_contents[aclass]);
474 OBJECT_MIN (obj) = INT_MAX;
475 OBJECT_MAX (obj) = -1;
476 OBJECT_LIVE_RANGES (obj) = NULL;
478 ira_object_id_map_vec.safe_push (obj);
479 ira_object_id_map
480 = ira_object_id_map_vec.address ();
481 ira_objects_num = ira_object_id_map_vec.length ();
483 return obj;
486 /* Create and return the allocno corresponding to REGNO in
487 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
488 same regno if CAP_P is FALSE. */
489 ira_allocno_t
490 ira_create_allocno (int regno, bool cap_p,
491 ira_loop_tree_node_t loop_tree_node)
493 ira_allocno_t a;
495 a = allocno_pool.allocate ();
496 ALLOCNO_REGNO (a) = regno;
497 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
498 if (! cap_p)
500 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
501 ira_regno_allocno_map[regno] = a;
502 if (loop_tree_node->regno_allocno_map[regno] == NULL)
503 /* Remember that we can create temporary allocnos to break
504 cycles in register shuffle on region borders (see
505 ira-emit.c). */
506 loop_tree_node->regno_allocno_map[regno] = a;
508 ALLOCNO_CAP (a) = NULL;
509 ALLOCNO_CAP_MEMBER (a) = NULL;
510 ALLOCNO_NUM (a) = ira_allocnos_num;
511 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
512 ALLOCNO_NREFS (a) = 0;
513 ALLOCNO_FREQ (a) = 0;
514 ALLOCNO_HARD_REGNO (a) = -1;
515 ALLOCNO_CALL_FREQ (a) = 0;
516 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
517 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
518 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
519 #ifdef STACK_REGS
520 ALLOCNO_NO_STACK_REG_P (a) = false;
521 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
522 #endif
523 ALLOCNO_DONT_REASSIGN_P (a) = false;
524 ALLOCNO_BAD_SPILL_P (a) = false;
525 ALLOCNO_ASSIGNED_P (a) = false;
526 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
527 ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
528 ALLOCNO_PREFS (a) = NULL;
529 ALLOCNO_COPIES (a) = NULL;
530 ALLOCNO_HARD_REG_COSTS (a) = NULL;
531 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
532 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
533 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
534 ALLOCNO_CLASS (a) = NO_REGS;
535 ALLOCNO_UPDATED_CLASS_COST (a) = 0;
536 ALLOCNO_CLASS_COST (a) = 0;
537 ALLOCNO_MEMORY_COST (a) = 0;
538 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
539 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
540 ALLOCNO_NUM_OBJECTS (a) = 0;
542 ALLOCNO_ADD_DATA (a) = NULL;
543 allocno_vec.safe_push (a);
544 ira_allocnos = allocno_vec.address ();
545 ira_allocnos_num = allocno_vec.length ();
547 return a;
550 /* Set up register class for A and update its conflict hard
551 registers. */
552 void
553 ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
555 ira_allocno_object_iterator oi;
556 ira_object_t obj;
558 ALLOCNO_CLASS (a) = aclass;
559 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
561 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
562 reg_class_contents[aclass]);
563 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
564 reg_class_contents[aclass]);
568 /* Determine the number of objects we should associate with allocno A
569 and allocate them. */
570 void
571 ira_create_allocno_objects (ira_allocno_t a)
573 machine_mode mode = ALLOCNO_MODE (a);
574 enum reg_class aclass = ALLOCNO_CLASS (a);
575 int n = ira_reg_class_max_nregs[aclass][mode];
576 int i;
578 if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
579 n = 1;
581 ALLOCNO_NUM_OBJECTS (a) = n;
582 for (i = 0; i < n; i++)
583 ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
586 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
587 ALLOCNO_OBJECT structures. This must be called after the allocno
588 classes are known. */
589 static void
590 create_allocno_objects (void)
592 ira_allocno_t a;
593 ira_allocno_iterator ai;
595 FOR_EACH_ALLOCNO (a, ai)
596 ira_create_allocno_objects (a);
599 /* Merge hard register conflict information for all objects associated with
600 allocno TO into the corresponding objects associated with FROM.
601 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
602 static void
603 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
604 bool total_only)
606 int i;
607 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
608 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
610 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
611 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
613 if (!total_only)
614 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
615 OBJECT_CONFLICT_HARD_REGS (from_obj));
616 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
617 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
619 #ifdef STACK_REGS
620 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
621 ALLOCNO_NO_STACK_REG_P (to) = true;
622 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
623 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
624 #endif
627 /* Update hard register conflict information for all objects associated with
628 A to include the regs in SET. */
629 void
630 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
632 ira_allocno_object_iterator i;
633 ira_object_t obj;
635 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
637 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
638 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
642 /* Return TRUE if a conflict vector with NUM elements is more
643 profitable than a conflict bit vector for OBJ. */
644 bool
645 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
647 int nw;
648 int max = OBJECT_MAX (obj);
649 int min = OBJECT_MIN (obj);
651 if (max < min)
652 /* We prefer a bit vector in such case because it does not result
653 in allocation. */
654 return false;
656 nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
657 return (2 * sizeof (ira_object_t) * (num + 1)
658 < 3 * nw * sizeof (IRA_INT_TYPE));
661 /* Allocates and initialize the conflict vector of OBJ for NUM
662 conflicting objects. */
663 void
664 ira_allocate_conflict_vec (ira_object_t obj, int num)
666 int size;
667 ira_object_t *vec;
669 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
670 num++; /* for NULL end marker */
671 size = sizeof (ira_object_t) * num;
672 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
673 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
674 vec[0] = NULL;
675 OBJECT_NUM_CONFLICTS (obj) = 0;
676 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
677 OBJECT_CONFLICT_VEC_P (obj) = true;
680 /* Allocate and initialize the conflict bit vector of OBJ. */
681 static void
682 allocate_conflict_bit_vec (ira_object_t obj)
684 unsigned int size;
686 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
687 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
688 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
689 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
690 memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
691 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
692 OBJECT_CONFLICT_VEC_P (obj) = false;
695 /* Allocate and initialize the conflict vector or conflict bit vector
696 of OBJ for NUM conflicting allocnos whatever is more profitable. */
697 void
698 ira_allocate_object_conflicts (ira_object_t obj, int num)
700 if (ira_conflict_vector_profitable_p (obj, num))
701 ira_allocate_conflict_vec (obj, num);
702 else
703 allocate_conflict_bit_vec (obj);
706 /* Add OBJ2 to the conflicts of OBJ1. */
707 static void
708 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
710 int num;
711 unsigned int size;
713 if (OBJECT_CONFLICT_VEC_P (obj1))
715 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
716 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
717 num = curr_num + 2;
718 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
720 ira_object_t *newvec;
721 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
722 newvec = (ira_object_t *) ira_allocate (size);
723 memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
724 ira_free (vec);
725 vec = newvec;
726 OBJECT_CONFLICT_ARRAY (obj1) = vec;
727 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
729 vec[num - 2] = obj2;
730 vec[num - 1] = NULL;
731 OBJECT_NUM_CONFLICTS (obj1)++;
733 else
735 int nw, added_head_nw, id;
736 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
738 id = OBJECT_CONFLICT_ID (obj2);
739 if (OBJECT_MIN (obj1) > id)
741 /* Expand head of the bit vector. */
742 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
743 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
744 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
745 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
747 memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
748 vec, nw * sizeof (IRA_INT_TYPE));
749 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
751 else
753 size
754 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
755 vec = (IRA_INT_TYPE *) ira_allocate (size);
756 memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
757 OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
758 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
759 memset ((char *) vec
760 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
761 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
762 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
763 OBJECT_CONFLICT_ARRAY (obj1) = vec;
764 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
766 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
768 else if (OBJECT_MAX (obj1) < id)
770 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
771 size = nw * sizeof (IRA_INT_TYPE);
772 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
774 /* Expand tail of the bit vector. */
775 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
776 vec = (IRA_INT_TYPE *) ira_allocate (size);
777 memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
778 memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
779 0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
780 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
781 OBJECT_CONFLICT_ARRAY (obj1) = vec;
782 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
784 OBJECT_MAX (obj1) = id;
786 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
790 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
791 static void
792 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
794 add_to_conflicts (obj1, obj2);
795 add_to_conflicts (obj2, obj1);
798 /* Clear all conflicts of OBJ. */
799 static void
800 clear_conflicts (ira_object_t obj)
802 if (OBJECT_CONFLICT_VEC_P (obj))
804 OBJECT_NUM_CONFLICTS (obj) = 0;
805 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
807 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
809 int nw;
811 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
812 memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
816 /* The array used to find duplications in conflict vectors of
817 allocnos. */
818 static int *conflict_check;
820 /* The value used to mark allocation presence in conflict vector of
821 the current allocno. */
822 static int curr_conflict_check_tick;
824 /* Remove duplications in conflict vector of OBJ. */
825 static void
826 compress_conflict_vec (ira_object_t obj)
828 ira_object_t *vec, conflict_obj;
829 int i, j;
831 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
832 vec = OBJECT_CONFLICT_VEC (obj);
833 curr_conflict_check_tick++;
834 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
836 int id = OBJECT_CONFLICT_ID (conflict_obj);
837 if (conflict_check[id] != curr_conflict_check_tick)
839 conflict_check[id] = curr_conflict_check_tick;
840 vec[j++] = conflict_obj;
843 OBJECT_NUM_CONFLICTS (obj) = j;
844 vec[j] = NULL;
847 /* Remove duplications in conflict vectors of all allocnos. */
848 static void
849 compress_conflict_vecs (void)
851 ira_object_t obj;
852 ira_object_iterator oi;
854 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
855 memset (conflict_check, 0, sizeof (int) * ira_objects_num);
856 curr_conflict_check_tick = 0;
857 FOR_EACH_OBJECT (obj, oi)
859 if (OBJECT_CONFLICT_VEC_P (obj))
860 compress_conflict_vec (obj);
862 ira_free (conflict_check);
865 /* This recursive function outputs allocno A and if it is a cap the
866 function outputs its members. */
867 void
868 ira_print_expanded_allocno (ira_allocno_t a)
870 basic_block bb;
872 fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
873 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
874 fprintf (ira_dump_file, ",b%d", bb->index);
875 else
876 fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
877 if (ALLOCNO_CAP_MEMBER (a) != NULL)
879 fprintf (ira_dump_file, ":");
880 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
882 fprintf (ira_dump_file, ")");
885 /* Create and return the cap representing allocno A in the
886 parent loop. */
887 static ira_allocno_t
888 create_cap_allocno (ira_allocno_t a)
890 ira_allocno_t cap;
891 ira_loop_tree_node_t parent;
892 enum reg_class aclass;
894 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
895 cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
896 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
897 ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
898 aclass = ALLOCNO_CLASS (a);
899 ira_set_allocno_class (cap, aclass);
900 ira_create_allocno_objects (cap);
901 ALLOCNO_CAP_MEMBER (cap) = a;
902 ALLOCNO_CAP (a) = cap;
903 ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
904 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
905 ira_allocate_and_copy_costs
906 (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
907 ira_allocate_and_copy_costs
908 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
909 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
910 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
911 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
912 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
913 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
915 merge_hard_reg_conflicts (a, cap, false);
917 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
918 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
919 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap),
920 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
921 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
923 fprintf (ira_dump_file, " Creating cap ");
924 ira_print_expanded_allocno (cap);
925 fprintf (ira_dump_file, "\n");
927 return cap;
930 /* Create and return a live range for OBJECT with given attributes. */
931 live_range_t
932 ira_create_live_range (ira_object_t obj, int start, int finish,
933 live_range_t next)
935 live_range_t p;
937 p = live_range_pool.allocate ();
938 p->object = obj;
939 p->start = start;
940 p->finish = finish;
941 p->next = next;
942 return p;
945 /* Create a new live range for OBJECT and queue it at the head of its
946 live range list. */
947 void
948 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
950 live_range_t p;
951 p = ira_create_live_range (object, start, finish,
952 OBJECT_LIVE_RANGES (object));
953 OBJECT_LIVE_RANGES (object) = p;
956 /* Copy allocno live range R and return the result. */
957 static live_range_t
958 copy_live_range (live_range_t r)
960 live_range_t p;
962 p = live_range_pool.allocate ();
963 *p = *r;
964 return p;
967 /* Copy allocno live range list given by its head R and return the
968 result. */
969 live_range_t
970 ira_copy_live_range_list (live_range_t r)
972 live_range_t p, first, last;
974 if (r == NULL)
975 return NULL;
976 for (first = last = NULL; r != NULL; r = r->next)
978 p = copy_live_range (r);
979 if (first == NULL)
980 first = p;
981 else
982 last->next = p;
983 last = p;
985 return first;
988 /* Merge ranges R1 and R2 and returns the result. The function
989 maintains the order of ranges and tries to minimize number of the
990 result ranges. */
991 live_range_t
992 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
994 live_range_t first, last;
996 if (r1 == NULL)
997 return r2;
998 if (r2 == NULL)
999 return r1;
1000 for (first = last = NULL; r1 != NULL && r2 != NULL;)
1002 if (r1->start < r2->start)
1003 std::swap (r1, r2);
1004 if (r1->start <= r2->finish + 1)
1006 /* Intersected ranges: merge r1 and r2 into r1. */
1007 r1->start = r2->start;
1008 if (r1->finish < r2->finish)
1009 r1->finish = r2->finish;
1010 live_range_t temp = r2;
1011 r2 = r2->next;
1012 ira_finish_live_range (temp);
1013 if (r2 == NULL)
1015 /* To try to merge with subsequent ranges in r1. */
1016 r2 = r1->next;
1017 r1->next = NULL;
1020 else
1022 /* Add r1 to the result. */
1023 if (first == NULL)
1024 first = last = r1;
1025 else
1027 last->next = r1;
1028 last = r1;
1030 r1 = r1->next;
1031 if (r1 == NULL)
1033 /* To try to merge with subsequent ranges in r2. */
1034 r1 = r2->next;
1035 r2->next = NULL;
1039 if (r1 != NULL)
1041 if (first == NULL)
1042 first = r1;
1043 else
1044 last->next = r1;
1045 ira_assert (r1->next == NULL);
1047 else if (r2 != NULL)
1049 if (first == NULL)
1050 first = r2;
1051 else
1052 last->next = r2;
1053 ira_assert (r2->next == NULL);
1055 else
1057 ira_assert (last->next == NULL);
1059 return first;
1062 /* Return TRUE if live ranges R1 and R2 intersect. */
1063 bool
1064 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1066 /* Remember the live ranges are always kept ordered. */
1067 while (r1 != NULL && r2 != NULL)
1069 if (r1->start > r2->finish)
1070 r1 = r1->next;
1071 else if (r2->start > r1->finish)
1072 r2 = r2->next;
1073 else
1074 return true;
1076 return false;
1079 /* Free allocno live range R. */
1080 void
1081 ira_finish_live_range (live_range_t r)
1083 live_range_pool.remove (r);
1086 /* Free list of allocno live ranges starting with R. */
1087 void
1088 ira_finish_live_range_list (live_range_t r)
1090 live_range_t next_r;
1092 for (; r != NULL; r = next_r)
1094 next_r = r->next;
1095 ira_finish_live_range (r);
1099 /* Free updated register costs of allocno A. */
1100 void
1101 ira_free_allocno_updated_costs (ira_allocno_t a)
1103 enum reg_class aclass;
1105 aclass = ALLOCNO_CLASS (a);
1106 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1107 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1108 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1109 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1110 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1111 aclass);
1112 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1115 /* Free and nullify all cost vectors allocated earlier for allocno
1116 A. */
1117 static void
1118 ira_free_allocno_costs (ira_allocno_t a)
1120 enum reg_class aclass = ALLOCNO_CLASS (a);
1121 ira_object_t obj;
1122 ira_allocno_object_iterator oi;
1124 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1126 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1127 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1128 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1129 ira_free (OBJECT_CONFLICT_ARRAY (obj));
1130 object_pool.remove (obj);
1133 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1134 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1135 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1136 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1137 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1138 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1139 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1140 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1141 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1142 aclass);
1143 ALLOCNO_HARD_REG_COSTS (a) = NULL;
1144 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1145 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1146 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1149 /* Free the memory allocated for allocno A. */
1150 static void
1151 finish_allocno (ira_allocno_t a)
1153 ira_free_allocno_costs (a);
1154 allocno_pool.remove (a);
1157 /* Free the memory allocated for all allocnos. */
1158 static void
1159 finish_allocnos (void)
1161 ira_allocno_t a;
1162 ira_allocno_iterator ai;
1164 FOR_EACH_ALLOCNO (a, ai)
1165 finish_allocno (a);
1166 ira_free (ira_regno_allocno_map);
1167 ira_object_id_map_vec.release ();
1168 allocno_vec.release ();
1169 allocno_pool.release ();
1170 object_pool.release ();
1171 live_range_pool.release ();
1176 /* Pools for allocno preferences. */
1177 static pool_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
1179 /* Vec containing references to all created preferences. It is a
1180 container of array ira_prefs. */
1181 static vec<ira_pref_t> pref_vec;
1183 /* The function initializes data concerning allocno prefs. */
1184 static void
1185 initiate_prefs (void)
1187 pref_vec.create (get_max_uid ());
1188 ira_prefs = NULL;
1189 ira_prefs_num = 0;
1192 /* Return pref for A and HARD_REGNO if any. */
1193 static ira_pref_t
1194 find_allocno_pref (ira_allocno_t a, int hard_regno)
1196 ira_pref_t pref;
1198 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1199 if (pref->allocno == a && pref->hard_regno == hard_regno)
1200 return pref;
1201 return NULL;
1204 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1205 ira_pref_t
1206 ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
1208 ira_pref_t pref;
1210 pref = pref_pool.allocate ();
1211 pref->num = ira_prefs_num;
1212 pref->allocno = a;
1213 pref->hard_regno = hard_regno;
1214 pref->freq = freq;
1215 pref_vec.safe_push (pref);
1216 ira_prefs = pref_vec.address ();
1217 ira_prefs_num = pref_vec.length ();
1218 return pref;
1221 /* Attach a pref PREF to the corresponding allocno. */
1222 static void
1223 add_allocno_pref_to_list (ira_pref_t pref)
1225 ira_allocno_t a = pref->allocno;
1227 pref->next_pref = ALLOCNO_PREFS (a);
1228 ALLOCNO_PREFS (a) = pref;
1231 /* Create (or update frequency if the pref already exists) the pref of
1232 allocnos A preferring HARD_REGNO with frequency FREQ. */
1233 void
1234 ira_add_allocno_pref (ira_allocno_t a, int hard_regno, int freq)
1236 ira_pref_t pref;
1238 if (freq <= 0)
1239 return;
1240 if ((pref = find_allocno_pref (a, hard_regno)) != NULL)
1242 pref->freq += freq;
1243 return;
1245 pref = ira_create_pref (a, hard_regno, freq);
1246 ira_assert (a != NULL);
1247 add_allocno_pref_to_list (pref);
1250 /* Print info about PREF into file F. */
1251 static void
1252 print_pref (FILE *f, ira_pref_t pref)
1254 fprintf (f, " pref%d:a%d(r%d)<-hr%d@%d\n", pref->num,
1255 ALLOCNO_NUM (pref->allocno), ALLOCNO_REGNO (pref->allocno),
1256 pref->hard_regno, pref->freq);
1259 /* Print info about PREF into stderr. */
1260 void
1261 ira_debug_pref (ira_pref_t pref)
1263 print_pref (stderr, pref);
1266 /* Print info about all prefs into file F. */
1267 static void
1268 print_prefs (FILE *f)
1270 ira_pref_t pref;
1271 ira_pref_iterator pi;
1273 FOR_EACH_PREF (pref, pi)
1274 print_pref (f, pref);
1277 /* Print info about all prefs into stderr. */
1278 void
1279 ira_debug_prefs (void)
1281 print_prefs (stderr);
1284 /* Print info about prefs involving allocno A into file F. */
1285 static void
1286 print_allocno_prefs (FILE *f, ira_allocno_t a)
1288 ira_pref_t pref;
1290 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1291 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1292 fprintf (f, " pref%d:hr%d@%d", pref->num, pref->hard_regno, pref->freq);
1293 fprintf (f, "\n");
1296 /* Print info about prefs involving allocno A into stderr. */
1297 void
1298 ira_debug_allocno_prefs (ira_allocno_t a)
1300 print_allocno_prefs (stderr, a);
1303 /* The function frees memory allocated for PREF. */
1304 static void
1305 finish_pref (ira_pref_t pref)
1307 ira_prefs[pref->num] = NULL;
1308 pref_pool.remove (pref);
1311 /* Remove PREF from the list of allocno prefs and free memory for
1312 it. */
1313 void
1314 ira_remove_pref (ira_pref_t pref)
1316 ira_pref_t cpref, prev;
1318 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1319 fprintf (ira_dump_file, " Removing pref%d:hr%d@%d\n",
1320 pref->num, pref->hard_regno, pref->freq);
1321 for (prev = NULL, cpref = ALLOCNO_PREFS (pref->allocno);
1322 cpref != NULL;
1323 prev = cpref, cpref = cpref->next_pref)
1324 if (cpref == pref)
1325 break;
1326 ira_assert (cpref != NULL);
1327 if (prev == NULL)
1328 ALLOCNO_PREFS (pref->allocno) = pref->next_pref;
1329 else
1330 prev->next_pref = pref->next_pref;
1331 finish_pref (pref);
1334 /* Remove all prefs of allocno A. */
1335 void
1336 ira_remove_allocno_prefs (ira_allocno_t a)
1338 ira_pref_t pref, next_pref;
1340 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
1342 next_pref = pref->next_pref;
1343 finish_pref (pref);
1345 ALLOCNO_PREFS (a) = NULL;
1348 /* Free memory allocated for all prefs. */
1349 static void
1350 finish_prefs (void)
1352 ira_pref_t pref;
1353 ira_pref_iterator pi;
1355 FOR_EACH_PREF (pref, pi)
1356 finish_pref (pref);
1357 pref_vec.release ();
1358 pref_pool.release ();
1363 /* Pools for copies. */
1364 static pool_allocator<ira_allocno_copy> copy_pool ("copies", 100);
1366 /* Vec containing references to all created copies. It is a
1367 container of array ira_copies. */
1368 static vec<ira_copy_t> copy_vec;
1370 /* The function initializes data concerning allocno copies. */
1371 static void
1372 initiate_copies (void)
1374 copy_vec.create (get_max_uid ());
1375 ira_copies = NULL;
1376 ira_copies_num = 0;
1379 /* Return copy connecting A1 and A2 and originated from INSN of
1380 LOOP_TREE_NODE if any. */
1381 static ira_copy_t
1382 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx_insn *insn,
1383 ira_loop_tree_node_t loop_tree_node)
1385 ira_copy_t cp, next_cp;
1386 ira_allocno_t another_a;
1388 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1390 if (cp->first == a1)
1392 next_cp = cp->next_first_allocno_copy;
1393 another_a = cp->second;
1395 else if (cp->second == a1)
1397 next_cp = cp->next_second_allocno_copy;
1398 another_a = cp->first;
1400 else
1401 gcc_unreachable ();
1402 if (another_a == a2 && cp->insn == insn
1403 && cp->loop_tree_node == loop_tree_node)
1404 return cp;
1406 return NULL;
1409 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1410 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1411 ira_copy_t
1412 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1413 bool constraint_p, rtx_insn *insn,
1414 ira_loop_tree_node_t loop_tree_node)
1416 ira_copy_t cp;
1418 cp = copy_pool.allocate ();
1419 cp->num = ira_copies_num;
1420 cp->first = first;
1421 cp->second = second;
1422 cp->freq = freq;
1423 cp->constraint_p = constraint_p;
1424 cp->insn = insn;
1425 cp->loop_tree_node = loop_tree_node;
1426 copy_vec.safe_push (cp);
1427 ira_copies = copy_vec.address ();
1428 ira_copies_num = copy_vec.length ();
1429 return cp;
1432 /* Attach a copy CP to allocnos involved into the copy. */
1433 static void
1434 add_allocno_copy_to_list (ira_copy_t cp)
1436 ira_allocno_t first = cp->first, second = cp->second;
1438 cp->prev_first_allocno_copy = NULL;
1439 cp->prev_second_allocno_copy = NULL;
1440 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1441 if (cp->next_first_allocno_copy != NULL)
1443 if (cp->next_first_allocno_copy->first == first)
1444 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1445 else
1446 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1448 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1449 if (cp->next_second_allocno_copy != NULL)
1451 if (cp->next_second_allocno_copy->second == second)
1452 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1453 else
1454 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1456 ALLOCNO_COPIES (first) = cp;
1457 ALLOCNO_COPIES (second) = cp;
1460 /* Make a copy CP a canonical copy where number of the
1461 first allocno is less than the second one. */
1462 static void
1463 swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1465 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1466 return;
1468 std::swap (cp->first, cp->second);
1469 std::swap (cp->prev_first_allocno_copy, cp->prev_second_allocno_copy);
1470 std::swap (cp->next_first_allocno_copy, cp->next_second_allocno_copy);
1473 /* Create (or update frequency if the copy already exists) and return
1474 the copy of allocnos FIRST and SECOND with frequency FREQ
1475 corresponding to move insn INSN (if any) and originated from
1476 LOOP_TREE_NODE. */
1477 ira_copy_t
1478 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1479 bool constraint_p, rtx_insn *insn,
1480 ira_loop_tree_node_t loop_tree_node)
1482 ira_copy_t cp;
1484 if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1486 cp->freq += freq;
1487 return cp;
1489 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1490 loop_tree_node);
1491 ira_assert (first != NULL && second != NULL);
1492 add_allocno_copy_to_list (cp);
1493 swap_allocno_copy_ends_if_necessary (cp);
1494 return cp;
1497 /* Print info about copy CP into file F. */
1498 static void
1499 print_copy (FILE *f, ira_copy_t cp)
1501 fprintf (f, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1502 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1503 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1504 cp->insn != NULL
1505 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1508 DEBUG_FUNCTION void
1509 debug (ira_allocno_copy &ref)
1511 print_copy (stderr, &ref);
1514 DEBUG_FUNCTION void
1515 debug (ira_allocno_copy *ptr)
1517 if (ptr)
1518 debug (*ptr);
1519 else
1520 fprintf (stderr, "<nil>\n");
1523 /* Print info about copy CP into stderr. */
1524 void
1525 ira_debug_copy (ira_copy_t cp)
1527 print_copy (stderr, cp);
1530 /* Print info about all copies into file F. */
1531 static void
1532 print_copies (FILE *f)
1534 ira_copy_t cp;
1535 ira_copy_iterator ci;
1537 FOR_EACH_COPY (cp, ci)
1538 print_copy (f, cp);
1541 /* Print info about all copies into stderr. */
1542 void
1543 ira_debug_copies (void)
1545 print_copies (stderr);
1548 /* Print info about copies involving allocno A into file F. */
1549 static void
1550 print_allocno_copies (FILE *f, ira_allocno_t a)
1552 ira_allocno_t another_a;
1553 ira_copy_t cp, next_cp;
1555 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1556 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1558 if (cp->first == a)
1560 next_cp = cp->next_first_allocno_copy;
1561 another_a = cp->second;
1563 else if (cp->second == a)
1565 next_cp = cp->next_second_allocno_copy;
1566 another_a = cp->first;
1568 else
1569 gcc_unreachable ();
1570 fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1571 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1573 fprintf (f, "\n");
1576 DEBUG_FUNCTION void
1577 debug (ira_allocno &ref)
1579 print_allocno_copies (stderr, &ref);
1582 DEBUG_FUNCTION void
1583 debug (ira_allocno *ptr)
1585 if (ptr)
1586 debug (*ptr);
1587 else
1588 fprintf (stderr, "<nil>\n");
1592 /* Print info about copies involving allocno A into stderr. */
1593 void
1594 ira_debug_allocno_copies (ira_allocno_t a)
1596 print_allocno_copies (stderr, a);
1599 /* The function frees memory allocated for copy CP. */
1600 static void
1601 finish_copy (ira_copy_t cp)
1603 copy_pool.remove (cp);
1607 /* Free memory allocated for all copies. */
1608 static void
1609 finish_copies (void)
1611 ira_copy_t cp;
1612 ira_copy_iterator ci;
1614 FOR_EACH_COPY (cp, ci)
1615 finish_copy (cp);
1616 copy_vec.release ();
1617 copy_pool.release ();
1622 /* Pools for cost vectors. It is defined only for allocno classes. */
1623 static pool_allocator<int> * cost_vector_pool[N_REG_CLASSES];
1625 /* The function initiates work with hard register cost vectors. It
1626 creates allocation pool for each allocno class. */
1627 static void
1628 initiate_cost_vectors (void)
1630 int i;
1631 enum reg_class aclass;
1633 for (i = 0; i < ira_allocno_classes_num; i++)
1635 aclass = ira_allocno_classes[i];
1636 cost_vector_pool[aclass] = new pool_allocator<int>
1637 ("cost vectors", 100,
1638 sizeof (int) * (ira_class_hard_regs_num[aclass] - 1));
1642 /* Allocate and return a cost vector VEC for ACLASS. */
1643 int *
1644 ira_allocate_cost_vector (reg_class_t aclass)
1646 return cost_vector_pool[(int) aclass]->allocate ();
1649 /* Free a cost vector VEC for ACLASS. */
1650 void
1651 ira_free_cost_vector (int *vec, reg_class_t aclass)
1653 ira_assert (vec != NULL);
1654 cost_vector_pool[(int) aclass]->remove (vec);
1657 /* Finish work with hard register cost vectors. Release allocation
1658 pool for each allocno class. */
1659 static void
1660 finish_cost_vectors (void)
1662 int i;
1663 enum reg_class aclass;
1665 for (i = 0; i < ira_allocno_classes_num; i++)
1667 aclass = ira_allocno_classes[i];
1668 delete cost_vector_pool[aclass];
1674 /* Compute a post-ordering of the reverse control flow of the loop body
1675 designated by the children nodes of LOOP_NODE, whose body nodes in
1676 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1677 of the reverse loop body.
1679 For the post-order of the reverse CFG, we visit the basic blocks in
1680 LOOP_PREORDER array in the reverse order of where they appear.
1681 This is important: We do not just want to compute a post-order of
1682 the reverse CFG, we want to make a best-guess for a visiting order that
1683 minimizes the number of chain elements per allocno live range. If the
1684 blocks would be visited in a different order, we would still compute a
1685 correct post-ordering but it would be less likely that two nodes
1686 connected by an edge in the CFG are neighbours in the topsort. */
1688 static vec<ira_loop_tree_node_t>
1689 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
1690 vec<ira_loop_tree_node_t> loop_preorder)
1692 vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
1693 unsigned int n_loop_preorder;
1695 n_loop_preorder = loop_preorder.length ();
1696 if (n_loop_preorder != 0)
1698 ira_loop_tree_node_t subloop_node;
1699 unsigned int i;
1700 auto_vec<ira_loop_tree_node_t> dfs_stack;
1702 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1703 the flag to mark blocks we still have to visit to add them to
1704 our post-order. Define an alias to avoid confusion. */
1705 #define BB_TO_VISIT BB_VISITED
1707 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1709 gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
1710 subloop_node->bb->flags |= BB_TO_VISIT;
1713 topsort_nodes.create (n_loop_preorder);
1714 dfs_stack.create (n_loop_preorder);
1716 FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
1718 if (! (subloop_node->bb->flags & BB_TO_VISIT))
1719 continue;
1721 subloop_node->bb->flags &= ~BB_TO_VISIT;
1722 dfs_stack.quick_push (subloop_node);
1723 while (! dfs_stack.is_empty ())
1725 edge e;
1726 edge_iterator ei;
1728 ira_loop_tree_node_t n = dfs_stack.last ();
1729 FOR_EACH_EDGE (e, ei, n->bb->preds)
1731 ira_loop_tree_node_t pred_node;
1732 basic_block pred_bb = e->src;
1734 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1735 continue;
1737 pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
1738 if (pred_node != n
1739 && (pred_node->bb->flags & BB_TO_VISIT))
1741 pred_node->bb->flags &= ~BB_TO_VISIT;
1742 dfs_stack.quick_push (pred_node);
1745 if (n == dfs_stack.last ())
1747 dfs_stack.pop ();
1748 topsort_nodes.quick_push (n);
1753 #undef BB_TO_VISIT
1756 gcc_assert (topsort_nodes.length () == n_loop_preorder);
1757 return topsort_nodes;
1760 /* The current loop tree node and its regno allocno map. */
1761 ira_loop_tree_node_t ira_curr_loop_tree_node;
1762 ira_allocno_t *ira_curr_regno_allocno_map;
1764 /* This recursive function traverses loop tree with root LOOP_NODE
1765 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1766 correspondingly in preorder and postorder. The function sets up
1767 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1768 basic block nodes of LOOP_NODE is also processed (before its
1769 subloop nodes).
1771 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1772 the loop are passed in the *reverse* post-order of the *reverse*
1773 CFG. This is only used by ira_create_allocno_live_ranges, which
1774 wants to visit basic blocks in this order to minimize the number
1775 of elements per live range chain.
1776 Note that the loop tree nodes are still visited in the normal,
1777 forward post-order of the loop tree. */
1779 void
1780 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1781 void (*preorder_func) (ira_loop_tree_node_t),
1782 void (*postorder_func) (ira_loop_tree_node_t))
1784 ira_loop_tree_node_t subloop_node;
1786 ira_assert (loop_node->bb == NULL);
1787 ira_curr_loop_tree_node = loop_node;
1788 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1790 if (preorder_func != NULL)
1791 (*preorder_func) (loop_node);
1793 if (bb_p)
1795 auto_vec<ira_loop_tree_node_t> loop_preorder;
1796 unsigned int i;
1798 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1799 is set up such that nodes in the loop body appear in a pre-order
1800 of their place in the CFG. */
1801 for (subloop_node = loop_node->children;
1802 subloop_node != NULL;
1803 subloop_node = subloop_node->next)
1804 if (subloop_node->bb != NULL)
1805 loop_preorder.safe_push (subloop_node);
1807 if (preorder_func != NULL)
1808 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1809 (*preorder_func) (subloop_node);
1811 if (postorder_func != NULL)
1813 vec<ira_loop_tree_node_t> loop_rev_postorder =
1814 ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
1815 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
1816 (*postorder_func) (subloop_node);
1817 loop_rev_postorder.release ();
1821 for (subloop_node = loop_node->subloops;
1822 subloop_node != NULL;
1823 subloop_node = subloop_node->subloop_next)
1825 ira_assert (subloop_node->bb == NULL);
1826 ira_traverse_loop_tree (bb_p, subloop_node,
1827 preorder_func, postorder_func);
1830 ira_curr_loop_tree_node = loop_node;
1831 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1833 if (postorder_func != NULL)
1834 (*postorder_func) (loop_node);
1839 /* The basic block currently being processed. */
1840 static basic_block curr_bb;
1842 /* This recursive function creates allocnos corresponding to
1843 pseudo-registers containing in X. True OUTPUT_P means that X is
1844 an lvalue. PARENT corresponds to the parent expression of X. */
1845 static void
1846 create_insn_allocnos (rtx x, rtx outer, bool output_p)
1848 int i, j;
1849 const char *fmt;
1850 enum rtx_code code = GET_CODE (x);
1852 if (code == REG)
1854 int regno;
1856 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1858 ira_allocno_t a;
1860 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1862 a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1863 if (outer != NULL && GET_CODE (outer) == SUBREG)
1865 machine_mode wmode = GET_MODE (outer);
1866 if (GET_MODE_SIZE (wmode) > GET_MODE_SIZE (ALLOCNO_WMODE (a)))
1867 ALLOCNO_WMODE (a) = wmode;
1871 ALLOCNO_NREFS (a)++;
1872 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1873 if (output_p)
1874 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1876 return;
1878 else if (code == SET)
1880 create_insn_allocnos (SET_DEST (x), NULL, true);
1881 create_insn_allocnos (SET_SRC (x), NULL, false);
1882 return;
1884 else if (code == CLOBBER)
1886 create_insn_allocnos (XEXP (x, 0), NULL, true);
1887 return;
1889 else if (code == MEM)
1891 create_insn_allocnos (XEXP (x, 0), NULL, false);
1892 return;
1894 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1895 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1897 create_insn_allocnos (XEXP (x, 0), NULL, true);
1898 create_insn_allocnos (XEXP (x, 0), NULL, false);
1899 return;
1902 fmt = GET_RTX_FORMAT (code);
1903 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1905 if (fmt[i] == 'e')
1906 create_insn_allocnos (XEXP (x, i), x, output_p);
1907 else if (fmt[i] == 'E')
1908 for (j = 0; j < XVECLEN (x, i); j++)
1909 create_insn_allocnos (XVECEXP (x, i, j), x, output_p);
1913 /* Create allocnos corresponding to pseudo-registers living in the
1914 basic block represented by the corresponding loop tree node
1915 BB_NODE. */
1916 static void
1917 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1919 basic_block bb;
1920 rtx_insn *insn;
1921 unsigned int i;
1922 bitmap_iterator bi;
1924 curr_bb = bb = bb_node->bb;
1925 ira_assert (bb != NULL);
1926 FOR_BB_INSNS_REVERSE (bb, insn)
1927 if (NONDEBUG_INSN_P (insn))
1928 create_insn_allocnos (PATTERN (insn), NULL, false);
1929 /* It might be a allocno living through from one subloop to
1930 another. */
1931 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
1932 if (ira_curr_regno_allocno_map[i] == NULL)
1933 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1936 /* Create allocnos corresponding to pseudo-registers living on edge E
1937 (a loop entry or exit). Also mark the allocnos as living on the
1938 loop border. */
1939 static void
1940 create_loop_allocnos (edge e)
1942 unsigned int i;
1943 bitmap live_in_regs, border_allocnos;
1944 bitmap_iterator bi;
1945 ira_loop_tree_node_t parent;
1947 live_in_regs = df_get_live_in (e->dest);
1948 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1949 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e->src),
1950 FIRST_PSEUDO_REGISTER, i, bi)
1951 if (bitmap_bit_p (live_in_regs, i))
1953 if (ira_curr_regno_allocno_map[i] == NULL)
1955 /* The order of creations is important for right
1956 ira_regno_allocno_map. */
1957 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1958 && parent->regno_allocno_map[i] == NULL)
1959 ira_create_allocno (i, false, parent);
1960 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1962 bitmap_set_bit (border_allocnos,
1963 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1967 /* Create allocnos corresponding to pseudo-registers living in loop
1968 represented by the corresponding loop tree node LOOP_NODE. This
1969 function is called by ira_traverse_loop_tree. */
1970 static void
1971 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1973 if (loop_node->bb != NULL)
1974 create_bb_allocnos (loop_node);
1975 else if (loop_node != ira_loop_tree_root)
1977 int i;
1978 edge_iterator ei;
1979 edge e;
1980 vec<edge> edges;
1982 ira_assert (current_loops != NULL);
1983 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1984 if (e->src != loop_node->loop->latch)
1985 create_loop_allocnos (e);
1987 edges = get_loop_exit_edges (loop_node->loop);
1988 FOR_EACH_VEC_ELT (edges, i, e)
1989 create_loop_allocnos (e);
1990 edges.release ();
1994 /* Propagate information about allocnos modified inside the loop given
1995 by its LOOP_TREE_NODE to its parent. */
1996 static void
1997 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1999 if (loop_tree_node == ira_loop_tree_root)
2000 return;
2001 ira_assert (loop_tree_node->bb == NULL);
2002 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
2003 loop_tree_node->modified_regnos);
2006 /* Propagate new info about allocno A (see comments about accumulated
2007 info in allocno definition) to the corresponding allocno on upper
2008 loop tree level. So allocnos on upper levels accumulate
2009 information about the corresponding allocnos in nested regions.
2010 The new info means allocno info finally calculated in this
2011 file. */
2012 static void
2013 propagate_allocno_info (void)
2015 int i;
2016 ira_allocno_t a, parent_a;
2017 ira_loop_tree_node_t parent;
2018 enum reg_class aclass;
2020 if (flag_ira_region != IRA_REGION_ALL
2021 && flag_ira_region != IRA_REGION_MIXED)
2022 return;
2023 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2024 for (a = ira_regno_allocno_map[i];
2025 a != NULL;
2026 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2027 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
2028 && (parent_a = parent->regno_allocno_map[i]) != NULL
2029 /* There are no caps yet at this point. So use
2030 border_allocnos to find allocnos for the propagation. */
2031 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
2032 ALLOCNO_NUM (a)))
2034 if (! ALLOCNO_BAD_SPILL_P (a))
2035 ALLOCNO_BAD_SPILL_P (parent_a) = false;
2036 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
2037 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
2038 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2039 merge_hard_reg_conflicts (a, parent_a, true);
2040 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2041 += ALLOCNO_CALLS_CROSSED_NUM (a);
2042 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2043 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
2044 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
2045 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2046 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2047 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2048 aclass = ALLOCNO_CLASS (a);
2049 ira_assert (aclass == ALLOCNO_CLASS (parent_a));
2050 ira_allocate_and_accumulate_costs
2051 (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
2052 ALLOCNO_HARD_REG_COSTS (a));
2053 ira_allocate_and_accumulate_costs
2054 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
2055 aclass,
2056 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
2057 ALLOCNO_CLASS_COST (parent_a)
2058 += ALLOCNO_CLASS_COST (a);
2059 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
2063 /* Create allocnos corresponding to pseudo-registers in the current
2064 function. Traverse the loop tree for this. */
2065 static void
2066 create_allocnos (void)
2068 /* We need to process BB first to correctly link allocnos by member
2069 next_regno_allocno. */
2070 ira_traverse_loop_tree (true, ira_loop_tree_root,
2071 create_loop_tree_node_allocnos, NULL);
2072 if (optimize)
2073 ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
2074 propagate_modified_regnos);
2079 /* The page contains function to remove some regions from a separate
2080 register allocation. We remove regions whose separate allocation
2081 will hardly improve the result. As a result we speed up regional
2082 register allocation. */
2084 /* The function changes the object in range list given by R to OBJ. */
2085 static void
2086 change_object_in_range_list (live_range_t r, ira_object_t obj)
2088 for (; r != NULL; r = r->next)
2089 r->object = obj;
2092 /* Move all live ranges associated with allocno FROM to allocno TO. */
2093 static void
2094 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2096 int i;
2097 int n = ALLOCNO_NUM_OBJECTS (from);
2099 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2101 for (i = 0; i < n; i++)
2103 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2104 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2105 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2107 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2109 fprintf (ira_dump_file,
2110 " Moving ranges of a%dr%d to a%dr%d: ",
2111 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2112 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2113 ira_print_live_range_list (ira_dump_file, lr);
2115 change_object_in_range_list (lr, to_obj);
2116 OBJECT_LIVE_RANGES (to_obj)
2117 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2118 OBJECT_LIVE_RANGES (from_obj) = NULL;
2122 static void
2123 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2125 int i;
2126 int n = ALLOCNO_NUM_OBJECTS (from);
2128 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2130 for (i = 0; i < n; i++)
2132 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2133 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2134 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2136 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2138 fprintf (ira_dump_file, " Copying ranges of a%dr%d to a%dr%d: ",
2139 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2140 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2141 ira_print_live_range_list (ira_dump_file, lr);
2143 lr = ira_copy_live_range_list (lr);
2144 change_object_in_range_list (lr, to_obj);
2145 OBJECT_LIVE_RANGES (to_obj)
2146 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2150 /* Return TRUE if NODE represents a loop with low register
2151 pressure. */
2152 static bool
2153 low_pressure_loop_node_p (ira_loop_tree_node_t node)
2155 int i;
2156 enum reg_class pclass;
2158 if (node->bb != NULL)
2159 return false;
2161 for (i = 0; i < ira_pressure_classes_num; i++)
2163 pclass = ira_pressure_classes[i];
2164 if (node->reg_pressure[pclass] > ira_class_hard_regs_num[pclass]
2165 && ira_class_hard_regs_num[pclass] > 1)
2166 return false;
2168 return true;
2171 #ifdef STACK_REGS
2172 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2173 form a region from such loop if the target use stack register
2174 because reg-stack.c can not deal with such edges. */
2175 static bool
2176 loop_with_complex_edge_p (struct loop *loop)
2178 int i;
2179 edge_iterator ei;
2180 edge e;
2181 vec<edge> edges;
2182 bool res;
2184 FOR_EACH_EDGE (e, ei, loop->header->preds)
2185 if (e->flags & EDGE_EH)
2186 return true;
2187 edges = get_loop_exit_edges (loop);
2188 res = false;
2189 FOR_EACH_VEC_ELT (edges, i, e)
2190 if (e->flags & EDGE_COMPLEX)
2192 res = true;
2193 break;
2195 edges.release ();
2196 return res;
2198 #endif
2200 /* Sort loops for marking them for removal. We put already marked
2201 loops first, then less frequent loops next, and then outer loops
2202 next. */
2203 static int
2204 loop_compare_func (const void *v1p, const void *v2p)
2206 int diff;
2207 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
2208 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
2210 ira_assert (l1->parent != NULL && l2->parent != NULL);
2211 if (l1->to_remove_p && ! l2->to_remove_p)
2212 return -1;
2213 if (! l1->to_remove_p && l2->to_remove_p)
2214 return 1;
2215 if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
2216 return diff;
2217 if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
2218 return diff;
2219 /* Make sorting stable. */
2220 return l1->loop_num - l2->loop_num;
2223 /* Mark loops which should be removed from regional allocation. We
2224 remove a loop with low register pressure inside another loop with
2225 register pressure. In this case a separate allocation of the loop
2226 hardly helps (for irregular register file architecture it could
2227 help by choosing a better hard register in the loop but we prefer
2228 faster allocation even in this case). We also remove cheap loops
2229 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
2230 exit or enter edges are removed too because the allocation might
2231 require put pseudo moves on the EH edges (we could still do this
2232 for pseudos with caller saved hard registers in some cases but it
2233 is impossible to say here or during top-down allocation pass what
2234 hard register the pseudos get finally). */
2235 static void
2236 mark_loops_for_removal (void)
2238 int i, n;
2239 ira_loop_tree_node_t *sorted_loops;
2240 loop_p loop;
2242 ira_assert (current_loops != NULL);
2243 sorted_loops
2244 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
2245 * number_of_loops (cfun));
2246 for (n = i = 0; vec_safe_iterate (get_loops (cfun), i, &loop); i++)
2247 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2249 if (ira_loop_nodes[i].parent == NULL)
2251 /* Don't remove the root. */
2252 ira_loop_nodes[i].to_remove_p = false;
2253 continue;
2255 sorted_loops[n++] = &ira_loop_nodes[i];
2256 ira_loop_nodes[i].to_remove_p
2257 = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
2258 && low_pressure_loop_node_p (&ira_loop_nodes[i]))
2259 #ifdef STACK_REGS
2260 || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
2261 #endif
2264 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
2265 for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
2267 sorted_loops[i]->to_remove_p = true;
2268 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2269 fprintf
2270 (ira_dump_file,
2271 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2272 sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
2273 sorted_loops[i]->loop->header->frequency,
2274 loop_depth (sorted_loops[i]->loop),
2275 low_pressure_loop_node_p (sorted_loops[i]->parent)
2276 && low_pressure_loop_node_p (sorted_loops[i])
2277 ? "low pressure" : "cheap loop");
2279 ira_free (sorted_loops);
2282 /* Mark all loops but root for removing. */
2283 static void
2284 mark_all_loops_for_removal (void)
2286 int i;
2287 loop_p loop;
2289 ira_assert (current_loops != NULL);
2290 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
2291 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2293 if (ira_loop_nodes[i].parent == NULL)
2295 /* Don't remove the root. */
2296 ira_loop_nodes[i].to_remove_p = false;
2297 continue;
2299 ira_loop_nodes[i].to_remove_p = true;
2300 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2301 fprintf
2302 (ira_dump_file,
2303 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2304 ira_loop_nodes[i].loop_num,
2305 ira_loop_nodes[i].loop->header->index,
2306 ira_loop_nodes[i].loop->header->frequency,
2307 loop_depth (ira_loop_nodes[i].loop));
2311 /* Definition of vector of loop tree nodes. */
2313 /* Vec containing references to all removed loop tree nodes. */
2314 static vec<ira_loop_tree_node_t> removed_loop_vec;
2316 /* Vec containing references to all children of loop tree nodes. */
2317 static vec<ira_loop_tree_node_t> children_vec;
2319 /* Remove subregions of NODE if their separate allocation will not
2320 improve the result. */
2321 static void
2322 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
2324 unsigned int start;
2325 bool remove_p;
2326 ira_loop_tree_node_t subnode;
2328 remove_p = node->to_remove_p;
2329 if (! remove_p)
2330 children_vec.safe_push (node);
2331 start = children_vec.length ();
2332 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2333 if (subnode->bb == NULL)
2334 remove_uneccesary_loop_nodes_from_loop_tree (subnode);
2335 else
2336 children_vec.safe_push (subnode);
2337 node->children = node->subloops = NULL;
2338 if (remove_p)
2340 removed_loop_vec.safe_push (node);
2341 return;
2343 while (children_vec.length () > start)
2345 subnode = children_vec.pop ();
2346 subnode->parent = node;
2347 subnode->next = node->children;
2348 node->children = subnode;
2349 if (subnode->bb == NULL)
2351 subnode->subloop_next = node->subloops;
2352 node->subloops = subnode;
2357 /* Return TRUE if NODE is inside PARENT. */
2358 static bool
2359 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2361 for (node = node->parent; node != NULL; node = node->parent)
2362 if (node == parent)
2363 return true;
2364 return false;
2367 /* Sort allocnos according to their order in regno allocno list. */
2368 static int
2369 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2371 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2372 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2373 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2374 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2376 if (loop_is_inside_p (n1, n2))
2377 return -1;
2378 else if (loop_is_inside_p (n2, n1))
2379 return 1;
2380 /* If allocnos are equally good, sort by allocno numbers, so that
2381 the results of qsort leave nothing to chance. We put allocnos
2382 with higher number first in the list because it is the original
2383 order for allocnos from loops on the same levels. */
2384 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2387 /* This array is used to sort allocnos to restore allocno order in
2388 the regno allocno list. */
2389 static ira_allocno_t *regno_allocnos;
2391 /* Restore allocno order for REGNO in the regno allocno list. */
2392 static void
2393 ira_rebuild_regno_allocno_list (int regno)
2395 int i, n;
2396 ira_allocno_t a;
2398 for (n = 0, a = ira_regno_allocno_map[regno];
2399 a != NULL;
2400 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2401 regno_allocnos[n++] = a;
2402 ira_assert (n > 0);
2403 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2404 regno_allocno_order_compare_func);
2405 for (i = 1; i < n; i++)
2406 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2407 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2408 ira_regno_allocno_map[regno] = regno_allocnos[0];
2409 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2410 fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2413 /* Propagate info from allocno FROM_A to allocno A. */
2414 static void
2415 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2417 enum reg_class aclass;
2419 merge_hard_reg_conflicts (from_a, a, false);
2420 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2421 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2422 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2423 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2424 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
2425 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
2426 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
2427 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a));
2429 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2430 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2431 if (! ALLOCNO_BAD_SPILL_P (from_a))
2432 ALLOCNO_BAD_SPILL_P (a) = false;
2433 aclass = ALLOCNO_CLASS (from_a);
2434 ira_assert (aclass == ALLOCNO_CLASS (a));
2435 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2436 ALLOCNO_HARD_REG_COSTS (from_a));
2437 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2438 aclass,
2439 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2440 ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2441 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2444 /* Remove allocnos from loops removed from the allocation
2445 consideration. */
2446 static void
2447 remove_unnecessary_allocnos (void)
2449 int regno;
2450 bool merged_p, rebuild_p;
2451 ira_allocno_t a, prev_a, next_a, parent_a;
2452 ira_loop_tree_node_t a_node, parent;
2454 merged_p = false;
2455 regno_allocnos = NULL;
2456 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2458 rebuild_p = false;
2459 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2460 a != NULL;
2461 a = next_a)
2463 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2464 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2465 if (! a_node->to_remove_p)
2466 prev_a = a;
2467 else
2469 for (parent = a_node->parent;
2470 (parent_a = parent->regno_allocno_map[regno]) == NULL
2471 && parent->to_remove_p;
2472 parent = parent->parent)
2474 if (parent_a == NULL)
2476 /* There are no allocnos with the same regno in
2477 upper region -- just move the allocno to the
2478 upper region. */
2479 prev_a = a;
2480 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2481 parent->regno_allocno_map[regno] = a;
2482 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2483 rebuild_p = true;
2485 else
2487 /* Remove the allocno and update info of allocno in
2488 the upper region. */
2489 if (prev_a == NULL)
2490 ira_regno_allocno_map[regno] = next_a;
2491 else
2492 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2493 move_allocno_live_ranges (a, parent_a);
2494 merged_p = true;
2495 propagate_some_info_from_allocno (parent_a, a);
2496 /* Remove it from the corresponding regno allocno
2497 map to avoid info propagation of subsequent
2498 allocno into this already removed allocno. */
2499 a_node->regno_allocno_map[regno] = NULL;
2500 ira_remove_allocno_prefs (a);
2501 finish_allocno (a);
2505 if (rebuild_p)
2506 /* We need to restore the order in regno allocno list. */
2508 if (regno_allocnos == NULL)
2509 regno_allocnos
2510 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2511 * ira_allocnos_num);
2512 ira_rebuild_regno_allocno_list (regno);
2515 if (merged_p)
2516 ira_rebuild_start_finish_chains ();
2517 if (regno_allocnos != NULL)
2518 ira_free (regno_allocnos);
2521 /* Remove allocnos from all loops but the root. */
2522 static void
2523 remove_low_level_allocnos (void)
2525 int regno;
2526 bool merged_p, propagate_p;
2527 ira_allocno_t a, top_a;
2528 ira_loop_tree_node_t a_node, parent;
2529 ira_allocno_iterator ai;
2531 merged_p = false;
2532 FOR_EACH_ALLOCNO (a, ai)
2534 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2535 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2536 continue;
2537 regno = ALLOCNO_REGNO (a);
2538 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2540 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2541 ira_loop_tree_root->regno_allocno_map[regno] = a;
2542 continue;
2544 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2545 /* Remove the allocno and update info of allocno in the upper
2546 region. */
2547 move_allocno_live_ranges (a, top_a);
2548 merged_p = true;
2549 if (propagate_p)
2550 propagate_some_info_from_allocno (top_a, a);
2552 FOR_EACH_ALLOCNO (a, ai)
2554 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2555 if (a_node == ira_loop_tree_root)
2556 continue;
2557 parent = a_node->parent;
2558 regno = ALLOCNO_REGNO (a);
2559 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2560 ira_assert (ALLOCNO_CAP (a) != NULL);
2561 else if (ALLOCNO_CAP (a) == NULL)
2562 ira_assert (parent->regno_allocno_map[regno] != NULL);
2564 FOR_EACH_ALLOCNO (a, ai)
2566 regno = ALLOCNO_REGNO (a);
2567 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2569 ira_object_t obj;
2570 ira_allocno_object_iterator oi;
2572 ira_regno_allocno_map[regno] = a;
2573 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2574 ALLOCNO_CAP_MEMBER (a) = NULL;
2575 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2576 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2577 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2578 #ifdef STACK_REGS
2579 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2580 ALLOCNO_NO_STACK_REG_P (a) = true;
2581 #endif
2583 else
2585 ira_remove_allocno_prefs (a);
2586 finish_allocno (a);
2589 if (merged_p)
2590 ira_rebuild_start_finish_chains ();
2593 /* Remove loops from consideration. We remove all loops except for
2594 root if ALL_P or loops for which a separate allocation will not
2595 improve the result. We have to do this after allocno creation and
2596 their costs and allocno class evaluation because only after that
2597 the register pressure can be known and is calculated. */
2598 static void
2599 remove_unnecessary_regions (bool all_p)
2601 if (current_loops == NULL)
2602 return;
2603 if (all_p)
2604 mark_all_loops_for_removal ();
2605 else
2606 mark_loops_for_removal ();
2607 children_vec.create (last_basic_block_for_fn (cfun)
2608 + number_of_loops (cfun));
2609 removed_loop_vec.create (last_basic_block_for_fn (cfun)
2610 + number_of_loops (cfun));
2611 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
2612 children_vec.release ();
2613 if (all_p)
2614 remove_low_level_allocnos ();
2615 else
2616 remove_unnecessary_allocnos ();
2617 while (removed_loop_vec.length () > 0)
2618 finish_loop_tree_node (removed_loop_vec.pop ());
2619 removed_loop_vec.release ();
2624 /* At this point true value of allocno attribute bad_spill_p means
2625 that there is an insn where allocno occurs and where the allocno
2626 can not be used as memory. The function updates the attribute, now
2627 it can be true only for allocnos which can not be used as memory in
2628 an insn and in whose live ranges there is other allocno deaths.
2629 Spilling allocnos with true value will not improve the code because
2630 it will not make other allocnos colorable and additional reloads
2631 for the corresponding pseudo will be generated in reload pass for
2632 each insn it occurs.
2634 This is a trick mentioned in one classic article of Chaitin etc
2635 which is frequently omitted in other implementations of RA based on
2636 graph coloring. */
2637 static void
2638 update_bad_spill_attribute (void)
2640 int i;
2641 ira_allocno_t a;
2642 ira_allocno_iterator ai;
2643 ira_allocno_object_iterator aoi;
2644 ira_object_t obj;
2645 live_range_t r;
2646 enum reg_class aclass;
2647 bitmap_head dead_points[N_REG_CLASSES];
2649 for (i = 0; i < ira_allocno_classes_num; i++)
2651 aclass = ira_allocno_classes[i];
2652 bitmap_initialize (&dead_points[aclass], &reg_obstack);
2654 FOR_EACH_ALLOCNO (a, ai)
2656 aclass = ALLOCNO_CLASS (a);
2657 if (aclass == NO_REGS)
2658 continue;
2659 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2660 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2661 bitmap_set_bit (&dead_points[aclass], r->finish);
2663 FOR_EACH_ALLOCNO (a, ai)
2665 aclass = ALLOCNO_CLASS (a);
2666 if (aclass == NO_REGS)
2667 continue;
2668 if (! ALLOCNO_BAD_SPILL_P (a))
2669 continue;
2670 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2672 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2674 for (i = r->start + 1; i < r->finish; i++)
2675 if (bitmap_bit_p (&dead_points[aclass], i))
2676 break;
2677 if (i < r->finish)
2678 break;
2680 if (r != NULL)
2682 ALLOCNO_BAD_SPILL_P (a) = false;
2683 break;
2687 for (i = 0; i < ira_allocno_classes_num; i++)
2689 aclass = ira_allocno_classes[i];
2690 bitmap_clear (&dead_points[aclass]);
2696 /* Set up minimal and maximal live range points for allocnos. */
2697 static void
2698 setup_min_max_allocno_live_range_point (void)
2700 int i;
2701 ira_allocno_t a, parent_a, cap;
2702 ira_allocno_iterator ai;
2703 #ifdef ENABLE_IRA_CHECKING
2704 ira_object_iterator oi;
2705 ira_object_t obj;
2706 #endif
2707 live_range_t r;
2708 ira_loop_tree_node_t parent;
2710 FOR_EACH_ALLOCNO (a, ai)
2712 int n = ALLOCNO_NUM_OBJECTS (a);
2714 for (i = 0; i < n; i++)
2716 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2717 r = OBJECT_LIVE_RANGES (obj);
2718 if (r == NULL)
2719 continue;
2720 OBJECT_MAX (obj) = r->finish;
2721 for (; r->next != NULL; r = r->next)
2723 OBJECT_MIN (obj) = r->start;
2726 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2727 for (a = ira_regno_allocno_map[i];
2728 a != NULL;
2729 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2731 int j;
2732 int n = ALLOCNO_NUM_OBJECTS (a);
2734 for (j = 0; j < n; j++)
2736 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2737 ira_object_t parent_obj;
2739 if (OBJECT_MAX (obj) < 0)
2740 continue;
2741 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2742 /* Accumulation of range info. */
2743 if (ALLOCNO_CAP (a) != NULL)
2745 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2747 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2748 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2749 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2750 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2751 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2753 continue;
2755 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2756 continue;
2757 parent_a = parent->regno_allocno_map[i];
2758 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2759 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2760 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2761 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2762 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2765 #ifdef ENABLE_IRA_CHECKING
2766 FOR_EACH_OBJECT (obj, oi)
2768 if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2769 && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2770 continue;
2771 gcc_unreachable ();
2773 #endif
2776 /* Sort allocnos according to their live ranges. Allocnos with
2777 smaller allocno class are put first unless we use priority
2778 coloring. Allocnos with the same class are ordered according
2779 their start (min). Allocnos with the same start are ordered
2780 according their finish (max). */
2781 static int
2782 object_range_compare_func (const void *v1p, const void *v2p)
2784 int diff;
2785 ira_object_t obj1 = *(const ira_object_t *) v1p;
2786 ira_object_t obj2 = *(const ira_object_t *) v2p;
2787 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2788 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2790 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2791 return diff;
2792 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2793 return diff;
2794 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2797 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2798 static void
2799 sort_conflict_id_map (void)
2801 int i, num;
2802 ira_allocno_t a;
2803 ira_allocno_iterator ai;
2805 num = 0;
2806 FOR_EACH_ALLOCNO (a, ai)
2808 ira_allocno_object_iterator oi;
2809 ira_object_t obj;
2811 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2812 ira_object_id_map[num++] = obj;
2814 if (num > 1)
2815 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2816 object_range_compare_func);
2817 for (i = 0; i < num; i++)
2819 ira_object_t obj = ira_object_id_map[i];
2821 gcc_assert (obj != NULL);
2822 OBJECT_CONFLICT_ID (obj) = i;
2824 for (i = num; i < ira_objects_num; i++)
2825 ira_object_id_map[i] = NULL;
2828 /* Set up minimal and maximal conflict ids of allocnos with which
2829 given allocno can conflict. */
2830 static void
2831 setup_min_max_conflict_allocno_ids (void)
2833 int aclass;
2834 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2835 int *live_range_min, *last_lived;
2836 int word0_min, word0_max;
2837 ira_allocno_t a;
2838 ira_allocno_iterator ai;
2840 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2841 aclass = -1;
2842 first_not_finished = -1;
2843 for (i = 0; i < ira_objects_num; i++)
2845 ira_object_t obj = ira_object_id_map[i];
2847 if (obj == NULL)
2848 continue;
2850 a = OBJECT_ALLOCNO (obj);
2852 if (aclass < 0)
2854 aclass = ALLOCNO_CLASS (a);
2855 min = i;
2856 first_not_finished = i;
2858 else
2860 start = OBJECT_MIN (obj);
2861 /* If we skip an allocno, the allocno with smaller ids will
2862 be also skipped because of the secondary sorting the
2863 range finishes (see function
2864 object_range_compare_func). */
2865 while (first_not_finished < i
2866 && start > OBJECT_MAX (ira_object_id_map
2867 [first_not_finished]))
2868 first_not_finished++;
2869 min = first_not_finished;
2871 if (min == i)
2872 /* We could increase min further in this case but it is good
2873 enough. */
2874 min++;
2875 live_range_min[i] = OBJECT_MIN (obj);
2876 OBJECT_MIN (obj) = min;
2878 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2879 aclass = -1;
2880 filled_area_start = -1;
2881 for (i = ira_objects_num - 1; i >= 0; i--)
2883 ira_object_t obj = ira_object_id_map[i];
2885 if (obj == NULL)
2886 continue;
2888 a = OBJECT_ALLOCNO (obj);
2889 if (aclass < 0)
2891 aclass = ALLOCNO_CLASS (a);
2892 for (j = 0; j < ira_max_point; j++)
2893 last_lived[j] = -1;
2894 filled_area_start = ira_max_point;
2896 min = live_range_min[i];
2897 finish = OBJECT_MAX (obj);
2898 max = last_lived[finish];
2899 if (max < 0)
2900 /* We could decrease max further in this case but it is good
2901 enough. */
2902 max = OBJECT_CONFLICT_ID (obj) - 1;
2903 OBJECT_MAX (obj) = max;
2904 /* In filling, we can go further A range finish to recognize
2905 intersection quickly because if the finish of subsequently
2906 processed allocno (it has smaller conflict id) range is
2907 further A range finish than they are definitely intersected
2908 (the reason for this is the allocnos with bigger conflict id
2909 have their range starts not smaller than allocnos with
2910 smaller ids. */
2911 for (j = min; j < filled_area_start; j++)
2912 last_lived[j] = i;
2913 filled_area_start = min;
2915 ira_free (last_lived);
2916 ira_free (live_range_min);
2918 /* For allocnos with more than one object, we may later record extra conflicts in
2919 subobject 0 that we cannot really know about here.
2920 For now, simply widen the min/max range of these subobjects. */
2922 word0_min = INT_MAX;
2923 word0_max = INT_MIN;
2925 FOR_EACH_ALLOCNO (a, ai)
2927 int n = ALLOCNO_NUM_OBJECTS (a);
2928 ira_object_t obj0;
2930 if (n < 2)
2931 continue;
2932 obj0 = ALLOCNO_OBJECT (a, 0);
2933 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2934 word0_min = OBJECT_CONFLICT_ID (obj0);
2935 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2936 word0_max = OBJECT_CONFLICT_ID (obj0);
2938 FOR_EACH_ALLOCNO (a, ai)
2940 int n = ALLOCNO_NUM_OBJECTS (a);
2941 ira_object_t obj0;
2943 if (n < 2)
2944 continue;
2945 obj0 = ALLOCNO_OBJECT (a, 0);
2946 if (OBJECT_MIN (obj0) > word0_min)
2947 OBJECT_MIN (obj0) = word0_min;
2948 if (OBJECT_MAX (obj0) < word0_max)
2949 OBJECT_MAX (obj0) = word0_max;
2955 static void
2956 create_caps (void)
2958 ira_allocno_t a;
2959 ira_allocno_iterator ai;
2960 ira_loop_tree_node_t loop_tree_node;
2962 FOR_EACH_ALLOCNO (a, ai)
2964 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2965 continue;
2966 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2967 create_cap_allocno (a);
2968 else if (ALLOCNO_CAP (a) == NULL)
2970 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2971 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2972 create_cap_allocno (a);
2979 /* The page contains code transforming more one region internal
2980 representation (IR) to one region IR which is necessary for reload.
2981 This transformation is called IR flattening. We might just rebuild
2982 the IR for one region but we don't do it because it takes a lot of
2983 time. */
2985 /* Map: regno -> allocnos which will finally represent the regno for
2986 IR with one region. */
2987 static ira_allocno_t *regno_top_level_allocno_map;
2989 /* Find the allocno that corresponds to A at a level one higher up in the
2990 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2991 ira_allocno_t
2992 ira_parent_allocno (ira_allocno_t a)
2994 ira_loop_tree_node_t parent;
2996 if (ALLOCNO_CAP (a) != NULL)
2997 return NULL;
2999 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3000 if (parent == NULL)
3001 return NULL;
3003 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
3006 /* Find the allocno that corresponds to A at a level one higher up in the
3007 loop tree. If ALLOCNO_CAP is set for A, return that. */
3008 ira_allocno_t
3009 ira_parent_or_cap_allocno (ira_allocno_t a)
3011 if (ALLOCNO_CAP (a) != NULL)
3012 return ALLOCNO_CAP (a);
3014 return ira_parent_allocno (a);
3017 /* Process all allocnos originated from pseudo REGNO and copy live
3018 ranges, hard reg conflicts, and allocno stack reg attributes from
3019 low level allocnos to final allocnos which are destinations of
3020 removed stores at a loop exit. Return true if we copied live
3021 ranges. */
3022 static bool
3023 copy_info_to_removed_store_destinations (int regno)
3025 ira_allocno_t a;
3026 ira_allocno_t parent_a = NULL;
3027 ira_loop_tree_node_t parent;
3028 bool merged_p;
3030 merged_p = false;
3031 for (a = ira_regno_allocno_map[regno];
3032 a != NULL;
3033 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3035 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
3036 /* This allocno will be removed. */
3037 continue;
3039 /* Caps will be removed. */
3040 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3041 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3042 parent != NULL;
3043 parent = parent->parent)
3044 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
3045 || (parent_a
3046 == regno_top_level_allocno_map[REGNO
3047 (allocno_emit_reg (parent_a))]
3048 && ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
3049 break;
3050 if (parent == NULL || parent_a == NULL)
3051 continue;
3053 copy_allocno_live_ranges (a, parent_a);
3054 merge_hard_reg_conflicts (a, parent_a, true);
3056 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
3057 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3058 += ALLOCNO_CALLS_CROSSED_NUM (a);
3059 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3060 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3061 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
3062 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
3063 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3064 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3065 merged_p = true;
3067 return merged_p;
3070 /* Flatten the IR. In other words, this function transforms IR as if
3071 it were built with one region (without loops). We could make it
3072 much simpler by rebuilding IR with one region, but unfortunately it
3073 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3074 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3075 IRA_MAX_POINT before emitting insns on the loop borders. */
3076 void
3077 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
3079 int i, j;
3080 bool keep_p;
3081 int hard_regs_num;
3082 bool new_pseudos_p, merged_p, mem_dest_p;
3083 unsigned int n;
3084 enum reg_class aclass;
3085 ira_allocno_t a, parent_a, first, second, node_first, node_second;
3086 ira_copy_t cp;
3087 ira_loop_tree_node_t node;
3088 live_range_t r;
3089 ira_allocno_iterator ai;
3090 ira_copy_iterator ci;
3092 regno_top_level_allocno_map
3093 = (ira_allocno_t *) ira_allocate (max_reg_num ()
3094 * sizeof (ira_allocno_t));
3095 memset (regno_top_level_allocno_map, 0,
3096 max_reg_num () * sizeof (ira_allocno_t));
3097 new_pseudos_p = merged_p = false;
3098 FOR_EACH_ALLOCNO (a, ai)
3100 ira_allocno_object_iterator oi;
3101 ira_object_t obj;
3103 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3104 /* Caps are not in the regno allocno maps and they are never
3105 will be transformed into allocnos existing after IR
3106 flattening. */
3107 continue;
3108 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3109 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
3110 OBJECT_CONFLICT_HARD_REGS (obj));
3111 #ifdef STACK_REGS
3112 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
3113 #endif
3115 /* Fix final allocno attributes. */
3116 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
3118 mem_dest_p = false;
3119 for (a = ira_regno_allocno_map[i];
3120 a != NULL;
3121 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3123 ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
3125 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3126 if (data->somewhere_renamed_p)
3127 new_pseudos_p = true;
3128 parent_a = ira_parent_allocno (a);
3129 if (parent_a == NULL)
3131 ALLOCNO_COPIES (a) = NULL;
3132 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3133 continue;
3135 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
3137 if (data->mem_optimized_dest != NULL)
3138 mem_dest_p = true;
3139 parent_data = ALLOCNO_EMIT_DATA (parent_a);
3140 if (REGNO (data->reg) == REGNO (parent_data->reg))
3142 merge_hard_reg_conflicts (a, parent_a, true);
3143 move_allocno_live_ranges (a, parent_a);
3144 merged_p = true;
3145 parent_data->mem_optimized_dest_p
3146 = (parent_data->mem_optimized_dest_p
3147 || data->mem_optimized_dest_p);
3148 continue;
3150 new_pseudos_p = true;
3151 for (;;)
3153 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
3154 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
3155 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
3156 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3157 -= ALLOCNO_CALLS_CROSSED_NUM (a);
3158 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3159 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3160 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3161 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3162 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
3163 && ALLOCNO_NREFS (parent_a) >= 0
3164 && ALLOCNO_FREQ (parent_a) >= 0);
3165 aclass = ALLOCNO_CLASS (parent_a);
3166 hard_regs_num = ira_class_hard_regs_num[aclass];
3167 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
3168 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
3169 for (j = 0; j < hard_regs_num; j++)
3170 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
3171 -= ALLOCNO_HARD_REG_COSTS (a)[j];
3172 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
3173 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
3174 for (j = 0; j < hard_regs_num; j++)
3175 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
3176 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
3177 ALLOCNO_CLASS_COST (parent_a)
3178 -= ALLOCNO_CLASS_COST (a);
3179 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
3180 parent_a = ira_parent_allocno (parent_a);
3181 if (parent_a == NULL)
3182 break;
3184 ALLOCNO_COPIES (a) = NULL;
3185 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3187 if (mem_dest_p && copy_info_to_removed_store_destinations (i))
3188 merged_p = true;
3190 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
3191 if (merged_p || ira_max_point_before_emit != ira_max_point)
3192 ira_rebuild_start_finish_chains ();
3193 if (new_pseudos_p)
3195 sparseset objects_live;
3197 /* Rebuild conflicts. */
3198 FOR_EACH_ALLOCNO (a, ai)
3200 ira_allocno_object_iterator oi;
3201 ira_object_t obj;
3203 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3204 || ALLOCNO_CAP_MEMBER (a) != NULL)
3205 continue;
3206 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3208 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3209 ira_assert (r->object == obj);
3210 clear_conflicts (obj);
3213 objects_live = sparseset_alloc (ira_objects_num);
3214 for (i = 0; i < ira_max_point; i++)
3216 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
3218 ira_object_t obj = r->object;
3220 a = OBJECT_ALLOCNO (obj);
3221 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3222 || ALLOCNO_CAP_MEMBER (a) != NULL)
3223 continue;
3225 aclass = ALLOCNO_CLASS (a);
3226 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
3228 ira_object_t live_obj = ira_object_id_map[n];
3229 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
3230 enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
3232 if (ira_reg_classes_intersect_p[aclass][live_aclass]
3233 /* Don't set up conflict for the allocno with itself. */
3234 && live_a != a)
3235 ira_add_conflict (obj, live_obj);
3237 sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
3240 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
3241 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
3243 sparseset_free (objects_live);
3244 compress_conflict_vecs ();
3246 /* Mark some copies for removing and change allocnos in the rest
3247 copies. */
3248 FOR_EACH_COPY (cp, ci)
3250 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
3251 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
3253 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3254 fprintf
3255 (ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3256 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
3257 ALLOCNO_NUM (cp->first),
3258 REGNO (allocno_emit_reg (cp->first)),
3259 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
3260 ALLOCNO_NUM (cp->second),
3261 REGNO (allocno_emit_reg (cp->second)));
3262 cp->loop_tree_node = NULL;
3263 continue;
3265 first
3266 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
3267 second
3268 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
3269 node = cp->loop_tree_node;
3270 if (node == NULL)
3271 keep_p = true; /* It copy generated in ira-emit.c. */
3272 else
3274 /* Check that the copy was not propagated from level on
3275 which we will have different pseudos. */
3276 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
3277 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
3278 keep_p = ((REGNO (allocno_emit_reg (first))
3279 == REGNO (allocno_emit_reg (node_first)))
3280 && (REGNO (allocno_emit_reg (second))
3281 == REGNO (allocno_emit_reg (node_second))));
3283 if (keep_p)
3285 cp->loop_tree_node = ira_loop_tree_root;
3286 cp->first = first;
3287 cp->second = second;
3289 else
3291 cp->loop_tree_node = NULL;
3292 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3293 fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
3294 cp->num, ALLOCNO_NUM (cp->first),
3295 REGNO (allocno_emit_reg (cp->first)),
3296 ALLOCNO_NUM (cp->second),
3297 REGNO (allocno_emit_reg (cp->second)));
3300 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3301 FOR_EACH_ALLOCNO (a, ai)
3303 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3304 || ALLOCNO_CAP_MEMBER (a) != NULL)
3306 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3307 fprintf (ira_dump_file, " Remove a%dr%d\n",
3308 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
3309 ira_remove_allocno_prefs (a);
3310 finish_allocno (a);
3311 continue;
3313 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
3314 ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
3315 ALLOCNO_CAP (a) = NULL;
3316 /* Restore updated costs for assignments from reload. */
3317 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3318 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
3319 if (! ALLOCNO_ASSIGNED_P (a))
3320 ira_free_allocno_updated_costs (a);
3321 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3322 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3324 /* Remove unnecessary copies. */
3325 FOR_EACH_COPY (cp, ci)
3327 if (cp->loop_tree_node == NULL)
3329 ira_copies[cp->num] = NULL;
3330 finish_copy (cp);
3331 continue;
3333 ira_assert
3334 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
3335 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
3336 add_allocno_copy_to_list (cp);
3337 swap_allocno_copy_ends_if_necessary (cp);
3339 rebuild_regno_allocno_maps ();
3340 if (ira_max_point != ira_max_point_before_emit)
3341 ira_compress_allocno_live_ranges ();
3342 ira_free (regno_top_level_allocno_map);
3347 #ifdef ENABLE_IRA_CHECKING
3348 /* Check creation of all allocnos. Allocnos on lower levels should
3349 have allocnos or caps on all upper levels. */
3350 static void
3351 check_allocno_creation (void)
3353 ira_allocno_t a;
3354 ira_allocno_iterator ai;
3355 ira_loop_tree_node_t loop_tree_node;
3357 FOR_EACH_ALLOCNO (a, ai)
3359 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3360 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3361 ALLOCNO_NUM (a)));
3362 if (loop_tree_node == ira_loop_tree_root)
3363 continue;
3364 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3365 ira_assert (ALLOCNO_CAP (a) != NULL);
3366 else if (ALLOCNO_CAP (a) == NULL)
3367 ira_assert (loop_tree_node->parent
3368 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3369 && bitmap_bit_p (loop_tree_node->border_allocnos,
3370 ALLOCNO_NUM (a)));
3373 #endif
3375 /* Identify allocnos which prefer a register class with a single hard register.
3376 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3377 less likely to use the preferred singleton register. */
3378 static void
3379 update_conflict_hard_reg_costs (void)
3381 ira_allocno_t a;
3382 ira_allocno_iterator ai;
3383 int i, index, min;
3385 FOR_EACH_ALLOCNO (a, ai)
3387 reg_class_t aclass = ALLOCNO_CLASS (a);
3388 reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3389 int singleton = ira_class_singleton[pref][ALLOCNO_MODE (a)];
3390 if (singleton < 0)
3391 continue;
3392 index = ira_class_hard_reg_index[(int) aclass][singleton];
3393 if (index < 0)
3394 continue;
3395 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3396 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3397 continue;
3398 min = INT_MAX;
3399 for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3400 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3401 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3402 min = ALLOCNO_HARD_REG_COSTS (a)[i];
3403 if (min == INT_MAX)
3404 continue;
3405 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3406 aclass, 0);
3407 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3408 -= min - ALLOCNO_CLASS_COST (a);
3412 /* Create a internal representation (IR) for IRA (allocnos, copies,
3413 loop tree nodes). The function returns TRUE if we generate loop
3414 structure (besides nodes representing all function and the basic
3415 blocks) for regional allocation. A true return means that we
3416 really need to flatten IR before the reload. */
3417 bool
3418 ira_build (void)
3420 bool loops_p;
3422 df_analyze ();
3423 initiate_cost_vectors ();
3424 initiate_allocnos ();
3425 initiate_prefs ();
3426 initiate_copies ();
3427 create_loop_tree_nodes ();
3428 form_loop_tree ();
3429 create_allocnos ();
3430 ira_costs ();
3431 create_allocno_objects ();
3432 ira_create_allocno_live_ranges ();
3433 remove_unnecessary_regions (false);
3434 ira_compress_allocno_live_ranges ();
3435 update_bad_spill_attribute ();
3436 loops_p = more_one_region_p ();
3437 if (loops_p)
3439 propagate_allocno_info ();
3440 create_caps ();
3442 ira_tune_allocno_costs ();
3443 #ifdef ENABLE_IRA_CHECKING
3444 check_allocno_creation ();
3445 #endif
3446 setup_min_max_allocno_live_range_point ();
3447 sort_conflict_id_map ();
3448 setup_min_max_conflict_allocno_ids ();
3449 ira_build_conflicts ();
3450 update_conflict_hard_reg_costs ();
3451 if (! ira_conflicts_p)
3453 ira_allocno_t a;
3454 ira_allocno_iterator ai;
3456 /* Remove all regions but root one. */
3457 if (loops_p)
3459 remove_unnecessary_regions (true);
3460 loops_p = false;
3462 /* We don't save hard registers around calls for fast allocation
3463 -- add caller clobbered registers as conflicting ones to
3464 allocno crossing calls. */
3465 FOR_EACH_ALLOCNO (a, ai)
3466 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3467 ior_hard_reg_conflicts (a, &call_used_reg_set);
3469 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3470 print_copies (ira_dump_file);
3471 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3472 print_prefs (ira_dump_file);
3473 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3475 int n, nr, nr_big;
3476 ira_allocno_t a;
3477 live_range_t r;
3478 ira_allocno_iterator ai;
3480 n = 0;
3481 nr = 0;
3482 nr_big = 0;
3483 FOR_EACH_ALLOCNO (a, ai)
3485 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3487 if (nobj > 1)
3488 nr_big++;
3489 for (j = 0; j < nobj; j++)
3491 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3492 n += OBJECT_NUM_CONFLICTS (obj);
3493 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3494 nr++;
3497 fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
3498 current_loops == NULL ? 1 : number_of_loops (cfun),
3499 n_basic_blocks_for_fn (cfun), ira_max_point);
3500 fprintf (ira_dump_file,
3501 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3502 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3504 return loops_p;
3507 /* Release the data created by function ira_build. */
3508 void
3509 ira_destroy (void)
3511 finish_loop_tree_nodes ();
3512 finish_prefs ();
3513 finish_copies ();
3514 finish_allocnos ();
3515 finish_cost_vectors ();
3516 ira_finish_allocno_live_ranges ();