* c-ubsan.c (ubsan_instrument_shift): Use type0.
[official-gcc.git] / gcc / ira-build.c
blob8b6b9564fcb1e86bfb3f3c95aba910682d8173ed
1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "target.h"
28 #include "regs.h"
29 #include "flags.h"
30 #include "hard-reg-set.h"
31 #include "predict.h"
32 #include "vec.h"
33 #include "hashtab.h"
34 #include "hash-set.h"
35 #include "machmode.h"
36 #include "input.h"
37 #include "function.h"
38 #include "dominance.h"
39 #include "cfg.h"
40 #include "basic-block.h"
41 #include "insn-config.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "params.h"
45 #include "df.h"
46 #include "reload.h"
47 #include "sparseset.h"
48 #include "ira-int.h"
49 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
51 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
52 ira_loop_tree_node_t);
54 /* The root of the loop tree corresponding to the all function. */
55 ira_loop_tree_node_t ira_loop_tree_root;
57 /* Height of the loop tree. */
58 int ira_loop_tree_height;
60 /* All nodes representing basic blocks are referred through the
61 following array. We can not use basic block member `aux' for this
62 because it is used for insertion of insns on edges. */
63 ira_loop_tree_node_t ira_bb_nodes;
65 /* All nodes representing loops are referred through the following
66 array. */
67 ira_loop_tree_node_t ira_loop_nodes;
69 /* And size of the ira_loop_nodes array. */
70 unsigned int ira_loop_nodes_count;
72 /* Map regno -> allocnos with given regno (see comments for
73 allocno member `next_regno_allocno'). */
74 ira_allocno_t *ira_regno_allocno_map;
76 /* Array of references to all allocnos. The order number of the
77 allocno corresponds to the index in the array. Removed allocnos
78 have NULL element value. */
79 ira_allocno_t *ira_allocnos;
81 /* Sizes of the previous array. */
82 int ira_allocnos_num;
84 /* Count of conflict record structures we've created, used when creating
85 a new conflict id. */
86 int ira_objects_num;
88 /* Map a conflict id to its conflict record. */
89 ira_object_t *ira_object_id_map;
91 /* Array of references to all allocno preferences. The order number
92 of the preference corresponds to the index in the array. */
93 ira_pref_t *ira_prefs;
95 /* Size of the previous array. */
96 int ira_prefs_num;
98 /* Array of references to all copies. The order number of the copy
99 corresponds to the index in the array. Removed copies have NULL
100 element value. */
101 ira_copy_t *ira_copies;
103 /* Size of the previous array. */
104 int ira_copies_num;
108 /* LAST_BASIC_BLOCK before generating additional insns because of live
109 range splitting. Emitting insns on a critical edge creates a new
110 basic block. */
111 static int last_basic_block_before_change;
113 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
114 the member loop_num. */
115 static void
116 init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
118 int max_regno = max_reg_num ();
120 node->regno_allocno_map
121 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
122 memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
123 memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
124 node->all_allocnos = ira_allocate_bitmap ();
125 node->modified_regnos = ira_allocate_bitmap ();
126 node->border_allocnos = ira_allocate_bitmap ();
127 node->local_copies = ira_allocate_bitmap ();
128 node->loop_num = loop_num;
129 node->children = NULL;
130 node->subloops = NULL;
134 /* The following function allocates the loop tree nodes. If
135 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
136 the root which corresponds the all function) will be not allocated
137 but nodes will still be allocated for basic blocks. */
138 static void
139 create_loop_tree_nodes (void)
141 unsigned int i, j;
142 bool skip_p;
143 edge_iterator ei;
144 edge e;
145 vec<edge> edges;
146 loop_p loop;
148 ira_bb_nodes
149 = ((struct ira_loop_tree_node *)
150 ira_allocate (sizeof (struct ira_loop_tree_node)
151 * last_basic_block_for_fn (cfun)));
152 last_basic_block_before_change = last_basic_block_for_fn (cfun);
153 for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
155 ira_bb_nodes[i].regno_allocno_map = NULL;
156 memset (ira_bb_nodes[i].reg_pressure, 0,
157 sizeof (ira_bb_nodes[i].reg_pressure));
158 ira_bb_nodes[i].all_allocnos = NULL;
159 ira_bb_nodes[i].modified_regnos = NULL;
160 ira_bb_nodes[i].border_allocnos = NULL;
161 ira_bb_nodes[i].local_copies = NULL;
163 if (current_loops == NULL)
165 ira_loop_nodes_count = 1;
166 ira_loop_nodes = ((struct ira_loop_tree_node *)
167 ira_allocate (sizeof (struct ira_loop_tree_node)));
168 init_loop_tree_node (ira_loop_nodes, 0);
169 return;
171 ira_loop_nodes_count = number_of_loops (cfun);
172 ira_loop_nodes = ((struct ira_loop_tree_node *)
173 ira_allocate (sizeof (struct ira_loop_tree_node)
174 * ira_loop_nodes_count));
175 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
177 if (loop_outer (loop) != NULL)
179 ira_loop_nodes[i].regno_allocno_map = NULL;
180 skip_p = false;
181 FOR_EACH_EDGE (e, ei, loop->header->preds)
182 if (e->src != loop->latch
183 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
185 skip_p = true;
186 break;
188 if (skip_p)
189 continue;
190 edges = get_loop_exit_edges (loop);
191 FOR_EACH_VEC_ELT (edges, j, e)
192 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
194 skip_p = true;
195 break;
197 edges.release ();
198 if (skip_p)
199 continue;
201 init_loop_tree_node (&ira_loop_nodes[i], loop->num);
205 /* The function returns TRUE if there are more one allocation
206 region. */
207 static bool
208 more_one_region_p (void)
210 unsigned int i;
211 loop_p loop;
213 if (current_loops != NULL)
214 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
215 if (ira_loop_nodes[i].regno_allocno_map != NULL
216 && ira_loop_tree_root != &ira_loop_nodes[i])
217 return true;
218 return false;
221 /* Free the loop tree node of a loop. */
222 static void
223 finish_loop_tree_node (ira_loop_tree_node_t loop)
225 if (loop->regno_allocno_map != NULL)
227 ira_assert (loop->bb == NULL);
228 ira_free_bitmap (loop->local_copies);
229 ira_free_bitmap (loop->border_allocnos);
230 ira_free_bitmap (loop->modified_regnos);
231 ira_free_bitmap (loop->all_allocnos);
232 ira_free (loop->regno_allocno_map);
233 loop->regno_allocno_map = NULL;
237 /* Free the loop tree nodes. */
238 static void
239 finish_loop_tree_nodes (void)
241 unsigned int i;
243 for (i = 0; i < ira_loop_nodes_count; i++)
244 finish_loop_tree_node (&ira_loop_nodes[i]);
245 ira_free (ira_loop_nodes);
246 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
248 if (ira_bb_nodes[i].local_copies != NULL)
249 ira_free_bitmap (ira_bb_nodes[i].local_copies);
250 if (ira_bb_nodes[i].border_allocnos != NULL)
251 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
252 if (ira_bb_nodes[i].modified_regnos != NULL)
253 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
254 if (ira_bb_nodes[i].all_allocnos != NULL)
255 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
256 if (ira_bb_nodes[i].regno_allocno_map != NULL)
257 ira_free (ira_bb_nodes[i].regno_allocno_map);
259 ira_free (ira_bb_nodes);
264 /* The following recursive function adds LOOP to the loop tree
265 hierarchy. LOOP is added only once. If LOOP is NULL we adding
266 loop designating the whole function when CFG loops are not
267 built. */
268 static void
269 add_loop_to_tree (struct loop *loop)
271 int loop_num;
272 struct loop *parent;
273 ira_loop_tree_node_t loop_node, parent_node;
275 /* We can not use loop node access macros here because of potential
276 checking and because the nodes are not initialized enough
277 yet. */
278 if (loop != NULL && loop_outer (loop) != NULL)
279 add_loop_to_tree (loop_outer (loop));
280 loop_num = loop != NULL ? loop->num : 0;
281 if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
282 && ira_loop_nodes[loop_num].children == NULL)
284 /* We have not added loop node to the tree yet. */
285 loop_node = &ira_loop_nodes[loop_num];
286 loop_node->loop = loop;
287 loop_node->bb = NULL;
288 if (loop == NULL)
289 parent = NULL;
290 else
292 for (parent = loop_outer (loop);
293 parent != NULL;
294 parent = loop_outer (parent))
295 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
296 break;
298 if (parent == NULL)
300 loop_node->next = NULL;
301 loop_node->subloop_next = NULL;
302 loop_node->parent = NULL;
304 else
306 parent_node = &ira_loop_nodes[parent->num];
307 loop_node->next = parent_node->children;
308 parent_node->children = loop_node;
309 loop_node->subloop_next = parent_node->subloops;
310 parent_node->subloops = loop_node;
311 loop_node->parent = parent_node;
316 /* The following recursive function sets up levels of nodes of the
317 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
318 The function returns maximal value of level in the tree + 1. */
319 static int
320 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
322 int height, max_height;
323 ira_loop_tree_node_t subloop_node;
325 ira_assert (loop_node->bb == NULL);
326 loop_node->level = level;
327 max_height = level + 1;
328 for (subloop_node = loop_node->subloops;
329 subloop_node != NULL;
330 subloop_node = subloop_node->subloop_next)
332 ira_assert (subloop_node->bb == NULL);
333 height = setup_loop_tree_level (subloop_node, level + 1);
334 if (height > max_height)
335 max_height = height;
337 return max_height;
340 /* Create the loop tree. The algorithm is designed to provide correct
341 order of loops (they are ordered by their last loop BB) and basic
342 blocks in the chain formed by member next. */
343 static void
344 form_loop_tree (void)
346 basic_block bb;
347 struct loop *parent;
348 ira_loop_tree_node_t bb_node, loop_node;
350 /* We can not use loop/bb node access macros because of potential
351 checking and because the nodes are not initialized enough
352 yet. */
353 FOR_EACH_BB_FN (bb, cfun)
355 bb_node = &ira_bb_nodes[bb->index];
356 bb_node->bb = bb;
357 bb_node->loop = NULL;
358 bb_node->subloops = NULL;
359 bb_node->children = NULL;
360 bb_node->subloop_next = NULL;
361 bb_node->next = NULL;
362 if (current_loops == NULL)
363 parent = NULL;
364 else
366 for (parent = bb->loop_father;
367 parent != NULL;
368 parent = loop_outer (parent))
369 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
370 break;
372 add_loop_to_tree (parent);
373 loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
374 bb_node->next = loop_node->children;
375 bb_node->parent = loop_node;
376 loop_node->children = bb_node;
378 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
379 ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
380 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
385 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
386 tree nodes. */
387 static void
388 rebuild_regno_allocno_maps (void)
390 unsigned int l;
391 int max_regno, regno;
392 ira_allocno_t a;
393 ira_loop_tree_node_t loop_tree_node;
394 loop_p loop;
395 ira_allocno_iterator ai;
397 ira_assert (current_loops != NULL);
398 max_regno = max_reg_num ();
399 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
400 if (ira_loop_nodes[l].regno_allocno_map != NULL)
402 ira_free (ira_loop_nodes[l].regno_allocno_map);
403 ira_loop_nodes[l].regno_allocno_map
404 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
405 * max_regno);
406 memset (ira_loop_nodes[l].regno_allocno_map, 0,
407 sizeof (ira_allocno_t) * max_regno);
409 ira_free (ira_regno_allocno_map);
410 ira_regno_allocno_map
411 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
412 memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
413 FOR_EACH_ALLOCNO (a, ai)
415 if (ALLOCNO_CAP_MEMBER (a) != NULL)
416 /* Caps are not in the regno allocno maps. */
417 continue;
418 regno = ALLOCNO_REGNO (a);
419 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
420 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
421 ira_regno_allocno_map[regno] = a;
422 if (loop_tree_node->regno_allocno_map[regno] == NULL)
423 /* Remember that we can create temporary allocnos to break
424 cycles in register shuffle. */
425 loop_tree_node->regno_allocno_map[regno] = a;
430 /* Pools for allocnos, allocno live ranges and objects. */
431 static alloc_pool allocno_pool, live_range_pool, object_pool;
433 /* Vec containing references to all created allocnos. It is a
434 container of array allocnos. */
435 static vec<ira_allocno_t> allocno_vec;
437 /* Vec containing references to all created ira_objects. It is a
438 container of ira_object_id_map. */
439 static vec<ira_object_t> ira_object_id_map_vec;
441 /* Initialize data concerning allocnos. */
442 static void
443 initiate_allocnos (void)
445 live_range_pool
446 = create_alloc_pool ("live ranges",
447 sizeof (struct live_range), 100);
448 allocno_pool
449 = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
450 object_pool
451 = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
452 allocno_vec.create (max_reg_num () * 2);
453 ira_allocnos = NULL;
454 ira_allocnos_num = 0;
455 ira_objects_num = 0;
456 ira_object_id_map_vec.create (max_reg_num () * 2);
457 ira_object_id_map = NULL;
458 ira_regno_allocno_map
459 = (ira_allocno_t *) ira_allocate (max_reg_num ()
460 * sizeof (ira_allocno_t));
461 memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
464 /* Create and return an object corresponding to a new allocno A. */
465 static ira_object_t
466 ira_create_object (ira_allocno_t a, int subword)
468 enum reg_class aclass = ALLOCNO_CLASS (a);
469 ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
471 OBJECT_ALLOCNO (obj) = a;
472 OBJECT_SUBWORD (obj) = subword;
473 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
474 OBJECT_CONFLICT_VEC_P (obj) = false;
475 OBJECT_CONFLICT_ARRAY (obj) = NULL;
476 OBJECT_NUM_CONFLICTS (obj) = 0;
477 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
478 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
479 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
480 reg_class_contents[aclass]);
481 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
482 reg_class_contents[aclass]);
483 OBJECT_MIN (obj) = INT_MAX;
484 OBJECT_MAX (obj) = -1;
485 OBJECT_LIVE_RANGES (obj) = NULL;
487 ira_object_id_map_vec.safe_push (obj);
488 ira_object_id_map
489 = ira_object_id_map_vec.address ();
490 ira_objects_num = ira_object_id_map_vec.length ();
492 return obj;
495 /* Create and return the allocno corresponding to REGNO in
496 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
497 same regno if CAP_P is FALSE. */
498 ira_allocno_t
499 ira_create_allocno (int regno, bool cap_p,
500 ira_loop_tree_node_t loop_tree_node)
502 ira_allocno_t a;
504 a = (ira_allocno_t) pool_alloc (allocno_pool);
505 ALLOCNO_REGNO (a) = regno;
506 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
507 if (! cap_p)
509 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
510 ira_regno_allocno_map[regno] = a;
511 if (loop_tree_node->regno_allocno_map[regno] == NULL)
512 /* Remember that we can create temporary allocnos to break
513 cycles in register shuffle on region borders (see
514 ira-emit.c). */
515 loop_tree_node->regno_allocno_map[regno] = a;
517 ALLOCNO_CAP (a) = NULL;
518 ALLOCNO_CAP_MEMBER (a) = NULL;
519 ALLOCNO_NUM (a) = ira_allocnos_num;
520 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
521 ALLOCNO_NREFS (a) = 0;
522 ALLOCNO_FREQ (a) = 0;
523 ALLOCNO_HARD_REGNO (a) = -1;
524 ALLOCNO_CALL_FREQ (a) = 0;
525 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
526 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
527 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
528 #ifdef STACK_REGS
529 ALLOCNO_NO_STACK_REG_P (a) = false;
530 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
531 #endif
532 ALLOCNO_DONT_REASSIGN_P (a) = false;
533 ALLOCNO_BAD_SPILL_P (a) = false;
534 ALLOCNO_ASSIGNED_P (a) = false;
535 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
536 ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
537 ALLOCNO_PREFS (a) = NULL;
538 ALLOCNO_COPIES (a) = NULL;
539 ALLOCNO_HARD_REG_COSTS (a) = NULL;
540 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
541 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
542 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
543 ALLOCNO_CLASS (a) = NO_REGS;
544 ALLOCNO_UPDATED_CLASS_COST (a) = 0;
545 ALLOCNO_CLASS_COST (a) = 0;
546 ALLOCNO_MEMORY_COST (a) = 0;
547 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
548 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
549 ALLOCNO_NUM_OBJECTS (a) = 0;
551 ALLOCNO_ADD_DATA (a) = NULL;
552 allocno_vec.safe_push (a);
553 ira_allocnos = allocno_vec.address ();
554 ira_allocnos_num = allocno_vec.length ();
556 return a;
559 /* Set up register class for A and update its conflict hard
560 registers. */
561 void
562 ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
564 ira_allocno_object_iterator oi;
565 ira_object_t obj;
567 ALLOCNO_CLASS (a) = aclass;
568 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
570 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
571 reg_class_contents[aclass]);
572 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
573 reg_class_contents[aclass]);
577 /* Determine the number of objects we should associate with allocno A
578 and allocate them. */
579 void
580 ira_create_allocno_objects (ira_allocno_t a)
582 machine_mode mode = ALLOCNO_MODE (a);
583 enum reg_class aclass = ALLOCNO_CLASS (a);
584 int n = ira_reg_class_max_nregs[aclass][mode];
585 int i;
587 if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
588 n = 1;
590 ALLOCNO_NUM_OBJECTS (a) = n;
591 for (i = 0; i < n; i++)
592 ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
595 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
596 ALLOCNO_OBJECT structures. This must be called after the allocno
597 classes are known. */
598 static void
599 create_allocno_objects (void)
601 ira_allocno_t a;
602 ira_allocno_iterator ai;
604 FOR_EACH_ALLOCNO (a, ai)
605 ira_create_allocno_objects (a);
608 /* Merge hard register conflict information for all objects associated with
609 allocno TO into the corresponding objects associated with FROM.
610 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
611 static void
612 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
613 bool total_only)
615 int i;
616 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
617 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
619 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
620 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
622 if (!total_only)
623 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
624 OBJECT_CONFLICT_HARD_REGS (from_obj));
625 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
626 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
628 #ifdef STACK_REGS
629 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
630 ALLOCNO_NO_STACK_REG_P (to) = true;
631 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
632 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
633 #endif
636 /* Update hard register conflict information for all objects associated with
637 A to include the regs in SET. */
638 void
639 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
641 ira_allocno_object_iterator i;
642 ira_object_t obj;
644 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
646 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
647 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
651 /* Return TRUE if a conflict vector with NUM elements is more
652 profitable than a conflict bit vector for OBJ. */
653 bool
654 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
656 int nw;
657 int max = OBJECT_MAX (obj);
658 int min = OBJECT_MIN (obj);
660 if (max < min)
661 /* We prefer a bit vector in such case because it does not result
662 in allocation. */
663 return false;
665 nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
666 return (2 * sizeof (ira_object_t) * (num + 1)
667 < 3 * nw * sizeof (IRA_INT_TYPE));
670 /* Allocates and initialize the conflict vector of OBJ for NUM
671 conflicting objects. */
672 void
673 ira_allocate_conflict_vec (ira_object_t obj, int num)
675 int size;
676 ira_object_t *vec;
678 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
679 num++; /* for NULL end marker */
680 size = sizeof (ira_object_t) * num;
681 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
682 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
683 vec[0] = NULL;
684 OBJECT_NUM_CONFLICTS (obj) = 0;
685 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
686 OBJECT_CONFLICT_VEC_P (obj) = true;
689 /* Allocate and initialize the conflict bit vector of OBJ. */
690 static void
691 allocate_conflict_bit_vec (ira_object_t obj)
693 unsigned int size;
695 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
696 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
697 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
698 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
699 memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
700 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
701 OBJECT_CONFLICT_VEC_P (obj) = false;
704 /* Allocate and initialize the conflict vector or conflict bit vector
705 of OBJ for NUM conflicting allocnos whatever is more profitable. */
706 void
707 ira_allocate_object_conflicts (ira_object_t obj, int num)
709 if (ira_conflict_vector_profitable_p (obj, num))
710 ira_allocate_conflict_vec (obj, num);
711 else
712 allocate_conflict_bit_vec (obj);
715 /* Add OBJ2 to the conflicts of OBJ1. */
716 static void
717 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
719 int num;
720 unsigned int size;
722 if (OBJECT_CONFLICT_VEC_P (obj1))
724 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
725 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
726 num = curr_num + 2;
727 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
729 ira_object_t *newvec;
730 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
731 newvec = (ira_object_t *) ira_allocate (size);
732 memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
733 ira_free (vec);
734 vec = newvec;
735 OBJECT_CONFLICT_ARRAY (obj1) = vec;
736 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
738 vec[num - 2] = obj2;
739 vec[num - 1] = NULL;
740 OBJECT_NUM_CONFLICTS (obj1)++;
742 else
744 int nw, added_head_nw, id;
745 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
747 id = OBJECT_CONFLICT_ID (obj2);
748 if (OBJECT_MIN (obj1) > id)
750 /* Expand head of the bit vector. */
751 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
752 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
753 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
754 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
756 memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
757 vec, nw * sizeof (IRA_INT_TYPE));
758 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
760 else
762 size
763 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
764 vec = (IRA_INT_TYPE *) ira_allocate (size);
765 memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
766 OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
767 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
768 memset ((char *) vec
769 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
770 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
771 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
772 OBJECT_CONFLICT_ARRAY (obj1) = vec;
773 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
775 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
777 else if (OBJECT_MAX (obj1) < id)
779 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
780 size = nw * sizeof (IRA_INT_TYPE);
781 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
783 /* Expand tail of the bit vector. */
784 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
785 vec = (IRA_INT_TYPE *) ira_allocate (size);
786 memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
787 memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
788 0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
789 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
790 OBJECT_CONFLICT_ARRAY (obj1) = vec;
791 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
793 OBJECT_MAX (obj1) = id;
795 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
799 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
800 static void
801 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
803 add_to_conflicts (obj1, obj2);
804 add_to_conflicts (obj2, obj1);
807 /* Clear all conflicts of OBJ. */
808 static void
809 clear_conflicts (ira_object_t obj)
811 if (OBJECT_CONFLICT_VEC_P (obj))
813 OBJECT_NUM_CONFLICTS (obj) = 0;
814 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
816 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
818 int nw;
820 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
821 memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
825 /* The array used to find duplications in conflict vectors of
826 allocnos. */
827 static int *conflict_check;
829 /* The value used to mark allocation presence in conflict vector of
830 the current allocno. */
831 static int curr_conflict_check_tick;
833 /* Remove duplications in conflict vector of OBJ. */
834 static void
835 compress_conflict_vec (ira_object_t obj)
837 ira_object_t *vec, conflict_obj;
838 int i, j;
840 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
841 vec = OBJECT_CONFLICT_VEC (obj);
842 curr_conflict_check_tick++;
843 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
845 int id = OBJECT_CONFLICT_ID (conflict_obj);
846 if (conflict_check[id] != curr_conflict_check_tick)
848 conflict_check[id] = curr_conflict_check_tick;
849 vec[j++] = conflict_obj;
852 OBJECT_NUM_CONFLICTS (obj) = j;
853 vec[j] = NULL;
856 /* Remove duplications in conflict vectors of all allocnos. */
857 static void
858 compress_conflict_vecs (void)
860 ira_object_t obj;
861 ira_object_iterator oi;
863 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
864 memset (conflict_check, 0, sizeof (int) * ira_objects_num);
865 curr_conflict_check_tick = 0;
866 FOR_EACH_OBJECT (obj, oi)
868 if (OBJECT_CONFLICT_VEC_P (obj))
869 compress_conflict_vec (obj);
871 ira_free (conflict_check);
874 /* This recursive function outputs allocno A and if it is a cap the
875 function outputs its members. */
876 void
877 ira_print_expanded_allocno (ira_allocno_t a)
879 basic_block bb;
881 fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
882 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
883 fprintf (ira_dump_file, ",b%d", bb->index);
884 else
885 fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
886 if (ALLOCNO_CAP_MEMBER (a) != NULL)
888 fprintf (ira_dump_file, ":");
889 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
891 fprintf (ira_dump_file, ")");
894 /* Create and return the cap representing allocno A in the
895 parent loop. */
896 static ira_allocno_t
897 create_cap_allocno (ira_allocno_t a)
899 ira_allocno_t cap;
900 ira_loop_tree_node_t parent;
901 enum reg_class aclass;
903 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
904 cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
905 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
906 ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
907 aclass = ALLOCNO_CLASS (a);
908 ira_set_allocno_class (cap, aclass);
909 ira_create_allocno_objects (cap);
910 ALLOCNO_CAP_MEMBER (cap) = a;
911 ALLOCNO_CAP (a) = cap;
912 ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
913 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
914 ira_allocate_and_copy_costs
915 (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
916 ira_allocate_and_copy_costs
917 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
918 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
919 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
920 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
921 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
922 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
924 merge_hard_reg_conflicts (a, cap, false);
926 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
927 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
928 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap),
929 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
930 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
932 fprintf (ira_dump_file, " Creating cap ");
933 ira_print_expanded_allocno (cap);
934 fprintf (ira_dump_file, "\n");
936 return cap;
939 /* Create and return a live range for OBJECT with given attributes. */
940 live_range_t
941 ira_create_live_range (ira_object_t obj, int start, int finish,
942 live_range_t next)
944 live_range_t p;
946 p = (live_range_t) pool_alloc (live_range_pool);
947 p->object = obj;
948 p->start = start;
949 p->finish = finish;
950 p->next = next;
951 return p;
954 /* Create a new live range for OBJECT and queue it at the head of its
955 live range list. */
956 void
957 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
959 live_range_t p;
960 p = ira_create_live_range (object, start, finish,
961 OBJECT_LIVE_RANGES (object));
962 OBJECT_LIVE_RANGES (object) = p;
965 /* Copy allocno live range R and return the result. */
966 static live_range_t
967 copy_live_range (live_range_t r)
969 live_range_t p;
971 p = (live_range_t) pool_alloc (live_range_pool);
972 *p = *r;
973 return p;
976 /* Copy allocno live range list given by its head R and return the
977 result. */
978 live_range_t
979 ira_copy_live_range_list (live_range_t r)
981 live_range_t p, first, last;
983 if (r == NULL)
984 return NULL;
985 for (first = last = NULL; r != NULL; r = r->next)
987 p = copy_live_range (r);
988 if (first == NULL)
989 first = p;
990 else
991 last->next = p;
992 last = p;
994 return first;
997 /* Merge ranges R1 and R2 and returns the result. The function
998 maintains the order of ranges and tries to minimize number of the
999 result ranges. */
1000 live_range_t
1001 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
1003 live_range_t first, last;
1005 if (r1 == NULL)
1006 return r2;
1007 if (r2 == NULL)
1008 return r1;
1009 for (first = last = NULL; r1 != NULL && r2 != NULL;)
1011 if (r1->start < r2->start)
1012 std::swap (r1, r2);
1013 if (r1->start <= r2->finish + 1)
1015 /* Intersected ranges: merge r1 and r2 into r1. */
1016 r1->start = r2->start;
1017 if (r1->finish < r2->finish)
1018 r1->finish = r2->finish;
1019 live_range_t temp = r2;
1020 r2 = r2->next;
1021 ira_finish_live_range (temp);
1022 if (r2 == NULL)
1024 /* To try to merge with subsequent ranges in r1. */
1025 r2 = r1->next;
1026 r1->next = NULL;
1029 else
1031 /* Add r1 to the result. */
1032 if (first == NULL)
1033 first = last = r1;
1034 else
1036 last->next = r1;
1037 last = r1;
1039 r1 = r1->next;
1040 if (r1 == NULL)
1042 /* To try to merge with subsequent ranges in r2. */
1043 r1 = r2->next;
1044 r2->next = NULL;
1048 if (r1 != NULL)
1050 if (first == NULL)
1051 first = r1;
1052 else
1053 last->next = r1;
1054 ira_assert (r1->next == NULL);
1056 else if (r2 != NULL)
1058 if (first == NULL)
1059 first = r2;
1060 else
1061 last->next = r2;
1062 ira_assert (r2->next == NULL);
1064 else
1066 ira_assert (last->next == NULL);
1068 return first;
1071 /* Return TRUE if live ranges R1 and R2 intersect. */
1072 bool
1073 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1075 /* Remember the live ranges are always kept ordered. */
1076 while (r1 != NULL && r2 != NULL)
1078 if (r1->start > r2->finish)
1079 r1 = r1->next;
1080 else if (r2->start > r1->finish)
1081 r2 = r2->next;
1082 else
1083 return true;
1085 return false;
1088 /* Free allocno live range R. */
1089 void
1090 ira_finish_live_range (live_range_t r)
1092 pool_free (live_range_pool, r);
1095 /* Free list of allocno live ranges starting with R. */
1096 void
1097 ira_finish_live_range_list (live_range_t r)
1099 live_range_t next_r;
1101 for (; r != NULL; r = next_r)
1103 next_r = r->next;
1104 ira_finish_live_range (r);
1108 /* Free updated register costs of allocno A. */
1109 void
1110 ira_free_allocno_updated_costs (ira_allocno_t a)
1112 enum reg_class aclass;
1114 aclass = ALLOCNO_CLASS (a);
1115 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1116 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1117 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1118 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1119 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1120 aclass);
1121 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1124 /* Free and nullify all cost vectors allocated earlier for allocno
1125 A. */
1126 static void
1127 ira_free_allocno_costs (ira_allocno_t a)
1129 enum reg_class aclass = ALLOCNO_CLASS (a);
1130 ira_object_t obj;
1131 ira_allocno_object_iterator oi;
1133 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1135 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1136 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1137 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1138 ira_free (OBJECT_CONFLICT_ARRAY (obj));
1139 pool_free (object_pool, obj);
1142 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1143 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1144 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1145 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1146 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1147 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1148 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1149 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1150 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1151 aclass);
1152 ALLOCNO_HARD_REG_COSTS (a) = NULL;
1153 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1154 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1155 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1158 /* Free the memory allocated for allocno A. */
1159 static void
1160 finish_allocno (ira_allocno_t a)
1162 ira_free_allocno_costs (a);
1163 pool_free (allocno_pool, a);
1166 /* Free the memory allocated for all allocnos. */
1167 static void
1168 finish_allocnos (void)
1170 ira_allocno_t a;
1171 ira_allocno_iterator ai;
1173 FOR_EACH_ALLOCNO (a, ai)
1174 finish_allocno (a);
1175 ira_free (ira_regno_allocno_map);
1176 ira_object_id_map_vec.release ();
1177 allocno_vec.release ();
1178 free_alloc_pool (allocno_pool);
1179 free_alloc_pool (object_pool);
1180 free_alloc_pool (live_range_pool);
1185 /* Pools for allocno preferences. */
1186 static alloc_pool pref_pool;
1188 /* Vec containing references to all created preferences. It is a
1189 container of array ira_prefs. */
1190 static vec<ira_pref_t> pref_vec;
1192 /* The function initializes data concerning allocno prefs. */
1193 static void
1194 initiate_prefs (void)
1196 pref_pool
1197 = create_alloc_pool ("prefs", sizeof (struct ira_allocno_pref), 100);
1198 pref_vec.create (get_max_uid ());
1199 ira_prefs = NULL;
1200 ira_prefs_num = 0;
1203 /* Return pref for A and HARD_REGNO if any. */
1204 static ira_pref_t
1205 find_allocno_pref (ira_allocno_t a, int hard_regno)
1207 ira_pref_t pref;
1209 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1210 if (pref->allocno == a && pref->hard_regno == hard_regno)
1211 return pref;
1212 return NULL;
1215 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1216 ira_pref_t
1217 ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
1219 ira_pref_t pref;
1221 pref = (ira_pref_t) pool_alloc (pref_pool);
1222 pref->num = ira_prefs_num;
1223 pref->allocno = a;
1224 pref->hard_regno = hard_regno;
1225 pref->freq = freq;
1226 pref_vec.safe_push (pref);
1227 ira_prefs = pref_vec.address ();
1228 ira_prefs_num = pref_vec.length ();
1229 return pref;
1232 /* Attach a pref PREF to the corresponding allocno. */
1233 static void
1234 add_allocno_pref_to_list (ira_pref_t pref)
1236 ira_allocno_t a = pref->allocno;
1238 pref->next_pref = ALLOCNO_PREFS (a);
1239 ALLOCNO_PREFS (a) = pref;
1242 /* Create (or update frequency if the pref already exists) the pref of
1243 allocnos A preferring HARD_REGNO with frequency FREQ. */
1244 void
1245 ira_add_allocno_pref (ira_allocno_t a, int hard_regno, int freq)
1247 ira_pref_t pref;
1249 if (freq <= 0)
1250 return;
1251 if ((pref = find_allocno_pref (a, hard_regno)) != NULL)
1253 pref->freq += freq;
1254 return;
1256 pref = ira_create_pref (a, hard_regno, freq);
1257 ira_assert (a != NULL);
1258 add_allocno_pref_to_list (pref);
1261 /* Print info about PREF into file F. */
1262 static void
1263 print_pref (FILE *f, ira_pref_t pref)
1265 fprintf (f, " pref%d:a%d(r%d)<-hr%d@%d\n", pref->num,
1266 ALLOCNO_NUM (pref->allocno), ALLOCNO_REGNO (pref->allocno),
1267 pref->hard_regno, pref->freq);
1270 /* Print info about PREF into stderr. */
1271 void
1272 ira_debug_pref (ira_pref_t pref)
1274 print_pref (stderr, pref);
1277 /* Print info about all prefs into file F. */
1278 static void
1279 print_prefs (FILE *f)
1281 ira_pref_t pref;
1282 ira_pref_iterator pi;
1284 FOR_EACH_PREF (pref, pi)
1285 print_pref (f, pref);
1288 /* Print info about all prefs into stderr. */
1289 void
1290 ira_debug_prefs (void)
1292 print_prefs (stderr);
1295 /* Print info about prefs involving allocno A into file F. */
1296 static void
1297 print_allocno_prefs (FILE *f, ira_allocno_t a)
1299 ira_pref_t pref;
1301 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1302 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1303 fprintf (f, " pref%d:hr%d@%d", pref->num, pref->hard_regno, pref->freq);
1304 fprintf (f, "\n");
1307 /* Print info about prefs involving allocno A into stderr. */
1308 void
1309 ira_debug_allocno_prefs (ira_allocno_t a)
1311 print_allocno_prefs (stderr, a);
1314 /* The function frees memory allocated for PREF. */
1315 static void
1316 finish_pref (ira_pref_t pref)
1318 ira_prefs[pref->num] = NULL;
1319 pool_free (pref_pool, pref);
1322 /* Remove PREF from the list of allocno prefs and free memory for
1323 it. */
1324 void
1325 ira_remove_pref (ira_pref_t pref)
1327 ira_pref_t cpref, prev;
1329 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1330 fprintf (ira_dump_file, " Removing pref%d:hr%d@%d\n",
1331 pref->num, pref->hard_regno, pref->freq);
1332 for (prev = NULL, cpref = ALLOCNO_PREFS (pref->allocno);
1333 cpref != NULL;
1334 prev = cpref, cpref = cpref->next_pref)
1335 if (cpref == pref)
1336 break;
1337 ira_assert (cpref != NULL);
1338 if (prev == NULL)
1339 ALLOCNO_PREFS (pref->allocno) = pref->next_pref;
1340 else
1341 prev->next_pref = pref->next_pref;
1342 finish_pref (pref);
1345 /* Remove all prefs of allocno A. */
1346 void
1347 ira_remove_allocno_prefs (ira_allocno_t a)
1349 ira_pref_t pref, next_pref;
1351 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
1353 next_pref = pref->next_pref;
1354 finish_pref (pref);
1356 ALLOCNO_PREFS (a) = NULL;
1359 /* Free memory allocated for all prefs. */
1360 static void
1361 finish_prefs (void)
1363 ira_pref_t pref;
1364 ira_pref_iterator pi;
1366 FOR_EACH_PREF (pref, pi)
1367 finish_pref (pref);
1368 pref_vec.release ();
1369 free_alloc_pool (pref_pool);
1374 /* Pools for copies. */
1375 static alloc_pool copy_pool;
1377 /* Vec containing references to all created copies. It is a
1378 container of array ira_copies. */
1379 static vec<ira_copy_t> copy_vec;
1381 /* The function initializes data concerning allocno copies. */
1382 static void
1383 initiate_copies (void)
1385 copy_pool
1386 = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
1387 copy_vec.create (get_max_uid ());
1388 ira_copies = NULL;
1389 ira_copies_num = 0;
1392 /* Return copy connecting A1 and A2 and originated from INSN of
1393 LOOP_TREE_NODE if any. */
1394 static ira_copy_t
1395 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx_insn *insn,
1396 ira_loop_tree_node_t loop_tree_node)
1398 ira_copy_t cp, next_cp;
1399 ira_allocno_t another_a;
1401 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1403 if (cp->first == a1)
1405 next_cp = cp->next_first_allocno_copy;
1406 another_a = cp->second;
1408 else if (cp->second == a1)
1410 next_cp = cp->next_second_allocno_copy;
1411 another_a = cp->first;
1413 else
1414 gcc_unreachable ();
1415 if (another_a == a2 && cp->insn == insn
1416 && cp->loop_tree_node == loop_tree_node)
1417 return cp;
1419 return NULL;
1422 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1423 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1424 ira_copy_t
1425 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1426 bool constraint_p, rtx_insn *insn,
1427 ira_loop_tree_node_t loop_tree_node)
1429 ira_copy_t cp;
1431 cp = (ira_copy_t) pool_alloc (copy_pool);
1432 cp->num = ira_copies_num;
1433 cp->first = first;
1434 cp->second = second;
1435 cp->freq = freq;
1436 cp->constraint_p = constraint_p;
1437 cp->insn = insn;
1438 cp->loop_tree_node = loop_tree_node;
1439 copy_vec.safe_push (cp);
1440 ira_copies = copy_vec.address ();
1441 ira_copies_num = copy_vec.length ();
1442 return cp;
1445 /* Attach a copy CP to allocnos involved into the copy. */
1446 static void
1447 add_allocno_copy_to_list (ira_copy_t cp)
1449 ira_allocno_t first = cp->first, second = cp->second;
1451 cp->prev_first_allocno_copy = NULL;
1452 cp->prev_second_allocno_copy = NULL;
1453 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1454 if (cp->next_first_allocno_copy != NULL)
1456 if (cp->next_first_allocno_copy->first == first)
1457 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1458 else
1459 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1461 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1462 if (cp->next_second_allocno_copy != NULL)
1464 if (cp->next_second_allocno_copy->second == second)
1465 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1466 else
1467 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1469 ALLOCNO_COPIES (first) = cp;
1470 ALLOCNO_COPIES (second) = cp;
1473 /* Make a copy CP a canonical copy where number of the
1474 first allocno is less than the second one. */
1475 static void
1476 swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1478 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1479 return;
1481 std::swap (cp->first, cp->second);
1482 std::swap (cp->prev_first_allocno_copy, cp->prev_second_allocno_copy);
1483 std::swap (cp->next_first_allocno_copy, cp->next_second_allocno_copy);
1486 /* Create (or update frequency if the copy already exists) and return
1487 the copy of allocnos FIRST and SECOND with frequency FREQ
1488 corresponding to move insn INSN (if any) and originated from
1489 LOOP_TREE_NODE. */
1490 ira_copy_t
1491 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1492 bool constraint_p, rtx_insn *insn,
1493 ira_loop_tree_node_t loop_tree_node)
1495 ira_copy_t cp;
1497 if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1499 cp->freq += freq;
1500 return cp;
1502 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1503 loop_tree_node);
1504 ira_assert (first != NULL && second != NULL);
1505 add_allocno_copy_to_list (cp);
1506 swap_allocno_copy_ends_if_necessary (cp);
1507 return cp;
1510 /* Print info about copy CP into file F. */
1511 static void
1512 print_copy (FILE *f, ira_copy_t cp)
1514 fprintf (f, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1515 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1516 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1517 cp->insn != NULL
1518 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1521 DEBUG_FUNCTION void
1522 debug (ira_allocno_copy &ref)
1524 print_copy (stderr, &ref);
1527 DEBUG_FUNCTION void
1528 debug (ira_allocno_copy *ptr)
1530 if (ptr)
1531 debug (*ptr);
1532 else
1533 fprintf (stderr, "<nil>\n");
1536 /* Print info about copy CP into stderr. */
1537 void
1538 ira_debug_copy (ira_copy_t cp)
1540 print_copy (stderr, cp);
1543 /* Print info about all copies into file F. */
1544 static void
1545 print_copies (FILE *f)
1547 ira_copy_t cp;
1548 ira_copy_iterator ci;
1550 FOR_EACH_COPY (cp, ci)
1551 print_copy (f, cp);
1554 /* Print info about all copies into stderr. */
1555 void
1556 ira_debug_copies (void)
1558 print_copies (stderr);
1561 /* Print info about copies involving allocno A into file F. */
1562 static void
1563 print_allocno_copies (FILE *f, ira_allocno_t a)
1565 ira_allocno_t another_a;
1566 ira_copy_t cp, next_cp;
1568 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1569 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1571 if (cp->first == a)
1573 next_cp = cp->next_first_allocno_copy;
1574 another_a = cp->second;
1576 else if (cp->second == a)
1578 next_cp = cp->next_second_allocno_copy;
1579 another_a = cp->first;
1581 else
1582 gcc_unreachable ();
1583 fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1584 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1586 fprintf (f, "\n");
1589 DEBUG_FUNCTION void
1590 debug (ira_allocno &ref)
1592 print_allocno_copies (stderr, &ref);
1595 DEBUG_FUNCTION void
1596 debug (ira_allocno *ptr)
1598 if (ptr)
1599 debug (*ptr);
1600 else
1601 fprintf (stderr, "<nil>\n");
1605 /* Print info about copies involving allocno A into stderr. */
1606 void
1607 ira_debug_allocno_copies (ira_allocno_t a)
1609 print_allocno_copies (stderr, a);
1612 /* The function frees memory allocated for copy CP. */
1613 static void
1614 finish_copy (ira_copy_t cp)
1616 pool_free (copy_pool, cp);
1620 /* Free memory allocated for all copies. */
1621 static void
1622 finish_copies (void)
1624 ira_copy_t cp;
1625 ira_copy_iterator ci;
1627 FOR_EACH_COPY (cp, ci)
1628 finish_copy (cp);
1629 copy_vec.release ();
1630 free_alloc_pool (copy_pool);
1635 /* Pools for cost vectors. It is defined only for allocno classes. */
1636 static alloc_pool cost_vector_pool[N_REG_CLASSES];
1638 /* The function initiates work with hard register cost vectors. It
1639 creates allocation pool for each allocno class. */
1640 static void
1641 initiate_cost_vectors (void)
1643 int i;
1644 enum reg_class aclass;
1646 for (i = 0; i < ira_allocno_classes_num; i++)
1648 aclass = ira_allocno_classes[i];
1649 cost_vector_pool[aclass]
1650 = create_alloc_pool ("cost vectors",
1651 sizeof (int) * ira_class_hard_regs_num[aclass],
1652 100);
1656 /* Allocate and return a cost vector VEC for ACLASS. */
1657 int *
1658 ira_allocate_cost_vector (reg_class_t aclass)
1660 return (int *) pool_alloc (cost_vector_pool[(int) aclass]);
1663 /* Free a cost vector VEC for ACLASS. */
1664 void
1665 ira_free_cost_vector (int *vec, reg_class_t aclass)
1667 ira_assert (vec != NULL);
1668 pool_free (cost_vector_pool[(int) aclass], vec);
1671 /* Finish work with hard register cost vectors. Release allocation
1672 pool for each allocno class. */
1673 static void
1674 finish_cost_vectors (void)
1676 int i;
1677 enum reg_class aclass;
1679 for (i = 0; i < ira_allocno_classes_num; i++)
1681 aclass = ira_allocno_classes[i];
1682 free_alloc_pool (cost_vector_pool[aclass]);
1688 /* Compute a post-ordering of the reverse control flow of the loop body
1689 designated by the children nodes of LOOP_NODE, whose body nodes in
1690 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1691 of the reverse loop body.
1693 For the post-order of the reverse CFG, we visit the basic blocks in
1694 LOOP_PREORDER array in the reverse order of where they appear.
1695 This is important: We do not just want to compute a post-order of
1696 the reverse CFG, we want to make a best-guess for a visiting order that
1697 minimizes the number of chain elements per allocno live range. If the
1698 blocks would be visited in a different order, we would still compute a
1699 correct post-ordering but it would be less likely that two nodes
1700 connected by an edge in the CFG are neighbours in the topsort. */
1702 static vec<ira_loop_tree_node_t>
1703 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
1704 vec<ira_loop_tree_node_t> loop_preorder)
1706 vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
1707 unsigned int n_loop_preorder;
1709 n_loop_preorder = loop_preorder.length ();
1710 if (n_loop_preorder != 0)
1712 ira_loop_tree_node_t subloop_node;
1713 unsigned int i;
1714 auto_vec<ira_loop_tree_node_t> dfs_stack;
1716 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1717 the flag to mark blocks we still have to visit to add them to
1718 our post-order. Define an alias to avoid confusion. */
1719 #define BB_TO_VISIT BB_VISITED
1721 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1723 gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
1724 subloop_node->bb->flags |= BB_TO_VISIT;
1727 topsort_nodes.create (n_loop_preorder);
1728 dfs_stack.create (n_loop_preorder);
1730 FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
1732 if (! (subloop_node->bb->flags & BB_TO_VISIT))
1733 continue;
1735 subloop_node->bb->flags &= ~BB_TO_VISIT;
1736 dfs_stack.quick_push (subloop_node);
1737 while (! dfs_stack.is_empty ())
1739 edge e;
1740 edge_iterator ei;
1742 ira_loop_tree_node_t n = dfs_stack.last ();
1743 FOR_EACH_EDGE (e, ei, n->bb->preds)
1745 ira_loop_tree_node_t pred_node;
1746 basic_block pred_bb = e->src;
1748 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1749 continue;
1751 pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
1752 if (pred_node != n
1753 && (pred_node->bb->flags & BB_TO_VISIT))
1755 pred_node->bb->flags &= ~BB_TO_VISIT;
1756 dfs_stack.quick_push (pred_node);
1759 if (n == dfs_stack.last ())
1761 dfs_stack.pop ();
1762 topsort_nodes.quick_push (n);
1767 #undef BB_TO_VISIT
1770 gcc_assert (topsort_nodes.length () == n_loop_preorder);
1771 return topsort_nodes;
1774 /* The current loop tree node and its regno allocno map. */
1775 ira_loop_tree_node_t ira_curr_loop_tree_node;
1776 ira_allocno_t *ira_curr_regno_allocno_map;
1778 /* This recursive function traverses loop tree with root LOOP_NODE
1779 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1780 correspondingly in preorder and postorder. The function sets up
1781 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1782 basic block nodes of LOOP_NODE is also processed (before its
1783 subloop nodes).
1785 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1786 the loop are passed in the *reverse* post-order of the *reverse*
1787 CFG. This is only used by ira_create_allocno_live_ranges, which
1788 wants to visit basic blocks in this order to minimize the number
1789 of elements per live range chain.
1790 Note that the loop tree nodes are still visited in the normal,
1791 forward post-order of the loop tree. */
1793 void
1794 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1795 void (*preorder_func) (ira_loop_tree_node_t),
1796 void (*postorder_func) (ira_loop_tree_node_t))
1798 ira_loop_tree_node_t subloop_node;
1800 ira_assert (loop_node->bb == NULL);
1801 ira_curr_loop_tree_node = loop_node;
1802 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1804 if (preorder_func != NULL)
1805 (*preorder_func) (loop_node);
1807 if (bb_p)
1809 auto_vec<ira_loop_tree_node_t> loop_preorder;
1810 unsigned int i;
1812 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1813 is set up such that nodes in the loop body appear in a pre-order
1814 of their place in the CFG. */
1815 for (subloop_node = loop_node->children;
1816 subloop_node != NULL;
1817 subloop_node = subloop_node->next)
1818 if (subloop_node->bb != NULL)
1819 loop_preorder.safe_push (subloop_node);
1821 if (preorder_func != NULL)
1822 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1823 (*preorder_func) (subloop_node);
1825 if (postorder_func != NULL)
1827 vec<ira_loop_tree_node_t> loop_rev_postorder =
1828 ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
1829 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
1830 (*postorder_func) (subloop_node);
1831 loop_rev_postorder.release ();
1835 for (subloop_node = loop_node->subloops;
1836 subloop_node != NULL;
1837 subloop_node = subloop_node->subloop_next)
1839 ira_assert (subloop_node->bb == NULL);
1840 ira_traverse_loop_tree (bb_p, subloop_node,
1841 preorder_func, postorder_func);
1844 ira_curr_loop_tree_node = loop_node;
1845 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1847 if (postorder_func != NULL)
1848 (*postorder_func) (loop_node);
1853 /* The basic block currently being processed. */
1854 static basic_block curr_bb;
1856 /* This recursive function creates allocnos corresponding to
1857 pseudo-registers containing in X. True OUTPUT_P means that X is
1858 an lvalue. PARENT corresponds to the parent expression of X. */
1859 static void
1860 create_insn_allocnos (rtx x, rtx outer, bool output_p)
1862 int i, j;
1863 const char *fmt;
1864 enum rtx_code code = GET_CODE (x);
1866 if (code == REG)
1868 int regno;
1870 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1872 ira_allocno_t a;
1874 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1876 a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1877 if (outer != NULL && GET_CODE (outer) == SUBREG)
1879 machine_mode wmode = GET_MODE (outer);
1880 if (GET_MODE_SIZE (wmode) > GET_MODE_SIZE (ALLOCNO_WMODE (a)))
1881 ALLOCNO_WMODE (a) = wmode;
1885 ALLOCNO_NREFS (a)++;
1886 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1887 if (output_p)
1888 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1890 return;
1892 else if (code == SET)
1894 create_insn_allocnos (SET_DEST (x), NULL, true);
1895 create_insn_allocnos (SET_SRC (x), NULL, false);
1896 return;
1898 else if (code == CLOBBER)
1900 create_insn_allocnos (XEXP (x, 0), NULL, true);
1901 return;
1903 else if (code == MEM)
1905 create_insn_allocnos (XEXP (x, 0), NULL, false);
1906 return;
1908 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1909 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1911 create_insn_allocnos (XEXP (x, 0), NULL, true);
1912 create_insn_allocnos (XEXP (x, 0), NULL, false);
1913 return;
1916 fmt = GET_RTX_FORMAT (code);
1917 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1919 if (fmt[i] == 'e')
1920 create_insn_allocnos (XEXP (x, i), x, output_p);
1921 else if (fmt[i] == 'E')
1922 for (j = 0; j < XVECLEN (x, i); j++)
1923 create_insn_allocnos (XVECEXP (x, i, j), x, output_p);
1927 /* Create allocnos corresponding to pseudo-registers living in the
1928 basic block represented by the corresponding loop tree node
1929 BB_NODE. */
1930 static void
1931 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1933 basic_block bb;
1934 rtx_insn *insn;
1935 unsigned int i;
1936 bitmap_iterator bi;
1938 curr_bb = bb = bb_node->bb;
1939 ira_assert (bb != NULL);
1940 FOR_BB_INSNS_REVERSE (bb, insn)
1941 if (NONDEBUG_INSN_P (insn))
1942 create_insn_allocnos (PATTERN (insn), NULL, false);
1943 /* It might be a allocno living through from one subloop to
1944 another. */
1945 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
1946 if (ira_curr_regno_allocno_map[i] == NULL)
1947 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1950 /* Create allocnos corresponding to pseudo-registers living on edge E
1951 (a loop entry or exit). Also mark the allocnos as living on the
1952 loop border. */
1953 static void
1954 create_loop_allocnos (edge e)
1956 unsigned int i;
1957 bitmap live_in_regs, border_allocnos;
1958 bitmap_iterator bi;
1959 ira_loop_tree_node_t parent;
1961 live_in_regs = df_get_live_in (e->dest);
1962 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1963 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e->src),
1964 FIRST_PSEUDO_REGISTER, i, bi)
1965 if (bitmap_bit_p (live_in_regs, i))
1967 if (ira_curr_regno_allocno_map[i] == NULL)
1969 /* The order of creations is important for right
1970 ira_regno_allocno_map. */
1971 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1972 && parent->regno_allocno_map[i] == NULL)
1973 ira_create_allocno (i, false, parent);
1974 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1976 bitmap_set_bit (border_allocnos,
1977 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1981 /* Create allocnos corresponding to pseudo-registers living in loop
1982 represented by the corresponding loop tree node LOOP_NODE. This
1983 function is called by ira_traverse_loop_tree. */
1984 static void
1985 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1987 if (loop_node->bb != NULL)
1988 create_bb_allocnos (loop_node);
1989 else if (loop_node != ira_loop_tree_root)
1991 int i;
1992 edge_iterator ei;
1993 edge e;
1994 vec<edge> edges;
1996 ira_assert (current_loops != NULL);
1997 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1998 if (e->src != loop_node->loop->latch)
1999 create_loop_allocnos (e);
2001 edges = get_loop_exit_edges (loop_node->loop);
2002 FOR_EACH_VEC_ELT (edges, i, e)
2003 create_loop_allocnos (e);
2004 edges.release ();
2008 /* Propagate information about allocnos modified inside the loop given
2009 by its LOOP_TREE_NODE to its parent. */
2010 static void
2011 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
2013 if (loop_tree_node == ira_loop_tree_root)
2014 return;
2015 ira_assert (loop_tree_node->bb == NULL);
2016 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
2017 loop_tree_node->modified_regnos);
2020 /* Propagate new info about allocno A (see comments about accumulated
2021 info in allocno definition) to the corresponding allocno on upper
2022 loop tree level. So allocnos on upper levels accumulate
2023 information about the corresponding allocnos in nested regions.
2024 The new info means allocno info finally calculated in this
2025 file. */
2026 static void
2027 propagate_allocno_info (void)
2029 int i;
2030 ira_allocno_t a, parent_a;
2031 ira_loop_tree_node_t parent;
2032 enum reg_class aclass;
2034 if (flag_ira_region != IRA_REGION_ALL
2035 && flag_ira_region != IRA_REGION_MIXED)
2036 return;
2037 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2038 for (a = ira_regno_allocno_map[i];
2039 a != NULL;
2040 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2041 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
2042 && (parent_a = parent->regno_allocno_map[i]) != NULL
2043 /* There are no caps yet at this point. So use
2044 border_allocnos to find allocnos for the propagation. */
2045 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
2046 ALLOCNO_NUM (a)))
2048 if (! ALLOCNO_BAD_SPILL_P (a))
2049 ALLOCNO_BAD_SPILL_P (parent_a) = false;
2050 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
2051 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
2052 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2053 merge_hard_reg_conflicts (a, parent_a, true);
2054 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2055 += ALLOCNO_CALLS_CROSSED_NUM (a);
2056 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2057 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
2058 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
2059 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2060 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2061 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2062 aclass = ALLOCNO_CLASS (a);
2063 ira_assert (aclass == ALLOCNO_CLASS (parent_a));
2064 ira_allocate_and_accumulate_costs
2065 (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
2066 ALLOCNO_HARD_REG_COSTS (a));
2067 ira_allocate_and_accumulate_costs
2068 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
2069 aclass,
2070 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
2071 ALLOCNO_CLASS_COST (parent_a)
2072 += ALLOCNO_CLASS_COST (a);
2073 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
2077 /* Create allocnos corresponding to pseudo-registers in the current
2078 function. Traverse the loop tree for this. */
2079 static void
2080 create_allocnos (void)
2082 /* We need to process BB first to correctly link allocnos by member
2083 next_regno_allocno. */
2084 ira_traverse_loop_tree (true, ira_loop_tree_root,
2085 create_loop_tree_node_allocnos, NULL);
2086 if (optimize)
2087 ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
2088 propagate_modified_regnos);
2093 /* The page contains function to remove some regions from a separate
2094 register allocation. We remove regions whose separate allocation
2095 will hardly improve the result. As a result we speed up regional
2096 register allocation. */
2098 /* The function changes the object in range list given by R to OBJ. */
2099 static void
2100 change_object_in_range_list (live_range_t r, ira_object_t obj)
2102 for (; r != NULL; r = r->next)
2103 r->object = obj;
2106 /* Move all live ranges associated with allocno FROM to allocno TO. */
2107 static void
2108 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2110 int i;
2111 int n = ALLOCNO_NUM_OBJECTS (from);
2113 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2115 for (i = 0; i < n; i++)
2117 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2118 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2119 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2121 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2123 fprintf (ira_dump_file,
2124 " Moving ranges of a%dr%d to a%dr%d: ",
2125 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2126 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2127 ira_print_live_range_list (ira_dump_file, lr);
2129 change_object_in_range_list (lr, to_obj);
2130 OBJECT_LIVE_RANGES (to_obj)
2131 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2132 OBJECT_LIVE_RANGES (from_obj) = NULL;
2136 static void
2137 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2139 int i;
2140 int n = ALLOCNO_NUM_OBJECTS (from);
2142 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2144 for (i = 0; i < n; i++)
2146 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2147 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2148 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2150 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2152 fprintf (ira_dump_file, " Copying ranges of a%dr%d to a%dr%d: ",
2153 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2154 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2155 ira_print_live_range_list (ira_dump_file, lr);
2157 lr = ira_copy_live_range_list (lr);
2158 change_object_in_range_list (lr, to_obj);
2159 OBJECT_LIVE_RANGES (to_obj)
2160 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2164 /* Return TRUE if NODE represents a loop with low register
2165 pressure. */
2166 static bool
2167 low_pressure_loop_node_p (ira_loop_tree_node_t node)
2169 int i;
2170 enum reg_class pclass;
2172 if (node->bb != NULL)
2173 return false;
2175 for (i = 0; i < ira_pressure_classes_num; i++)
2177 pclass = ira_pressure_classes[i];
2178 if (node->reg_pressure[pclass] > ira_class_hard_regs_num[pclass]
2179 && ira_class_hard_regs_num[pclass] > 1)
2180 return false;
2182 return true;
2185 #ifdef STACK_REGS
2186 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2187 form a region from such loop if the target use stack register
2188 because reg-stack.c can not deal with such edges. */
2189 static bool
2190 loop_with_complex_edge_p (struct loop *loop)
2192 int i;
2193 edge_iterator ei;
2194 edge e;
2195 vec<edge> edges;
2196 bool res;
2198 FOR_EACH_EDGE (e, ei, loop->header->preds)
2199 if (e->flags & EDGE_EH)
2200 return true;
2201 edges = get_loop_exit_edges (loop);
2202 res = false;
2203 FOR_EACH_VEC_ELT (edges, i, e)
2204 if (e->flags & EDGE_COMPLEX)
2206 res = true;
2207 break;
2209 edges.release ();
2210 return res;
2212 #endif
2214 /* Sort loops for marking them for removal. We put already marked
2215 loops first, then less frequent loops next, and then outer loops
2216 next. */
2217 static int
2218 loop_compare_func (const void *v1p, const void *v2p)
2220 int diff;
2221 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
2222 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
2224 ira_assert (l1->parent != NULL && l2->parent != NULL);
2225 if (l1->to_remove_p && ! l2->to_remove_p)
2226 return -1;
2227 if (! l1->to_remove_p && l2->to_remove_p)
2228 return 1;
2229 if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
2230 return diff;
2231 if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
2232 return diff;
2233 /* Make sorting stable. */
2234 return l1->loop_num - l2->loop_num;
2237 /* Mark loops which should be removed from regional allocation. We
2238 remove a loop with low register pressure inside another loop with
2239 register pressure. In this case a separate allocation of the loop
2240 hardly helps (for irregular register file architecture it could
2241 help by choosing a better hard register in the loop but we prefer
2242 faster allocation even in this case). We also remove cheap loops
2243 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
2244 exit or enter edges are removed too because the allocation might
2245 require put pseudo moves on the EH edges (we could still do this
2246 for pseudos with caller saved hard registers in some cases but it
2247 is impossible to say here or during top-down allocation pass what
2248 hard register the pseudos get finally). */
2249 static void
2250 mark_loops_for_removal (void)
2252 int i, n;
2253 ira_loop_tree_node_t *sorted_loops;
2254 loop_p loop;
2256 ira_assert (current_loops != NULL);
2257 sorted_loops
2258 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
2259 * number_of_loops (cfun));
2260 for (n = i = 0; vec_safe_iterate (get_loops (cfun), i, &loop); i++)
2261 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2263 if (ira_loop_nodes[i].parent == NULL)
2265 /* Don't remove the root. */
2266 ira_loop_nodes[i].to_remove_p = false;
2267 continue;
2269 sorted_loops[n++] = &ira_loop_nodes[i];
2270 ira_loop_nodes[i].to_remove_p
2271 = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
2272 && low_pressure_loop_node_p (&ira_loop_nodes[i]))
2273 #ifdef STACK_REGS
2274 || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
2275 #endif
2278 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
2279 for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
2281 sorted_loops[i]->to_remove_p = true;
2282 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2283 fprintf
2284 (ira_dump_file,
2285 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2286 sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
2287 sorted_loops[i]->loop->header->frequency,
2288 loop_depth (sorted_loops[i]->loop),
2289 low_pressure_loop_node_p (sorted_loops[i]->parent)
2290 && low_pressure_loop_node_p (sorted_loops[i])
2291 ? "low pressure" : "cheap loop");
2293 ira_free (sorted_loops);
2296 /* Mark all loops but root for removing. */
2297 static void
2298 mark_all_loops_for_removal (void)
2300 int i;
2301 loop_p loop;
2303 ira_assert (current_loops != NULL);
2304 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
2305 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2307 if (ira_loop_nodes[i].parent == NULL)
2309 /* Don't remove the root. */
2310 ira_loop_nodes[i].to_remove_p = false;
2311 continue;
2313 ira_loop_nodes[i].to_remove_p = true;
2314 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2315 fprintf
2316 (ira_dump_file,
2317 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2318 ira_loop_nodes[i].loop_num,
2319 ira_loop_nodes[i].loop->header->index,
2320 ira_loop_nodes[i].loop->header->frequency,
2321 loop_depth (ira_loop_nodes[i].loop));
2325 /* Definition of vector of loop tree nodes. */
2327 /* Vec containing references to all removed loop tree nodes. */
2328 static vec<ira_loop_tree_node_t> removed_loop_vec;
2330 /* Vec containing references to all children of loop tree nodes. */
2331 static vec<ira_loop_tree_node_t> children_vec;
2333 /* Remove subregions of NODE if their separate allocation will not
2334 improve the result. */
2335 static void
2336 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
2338 unsigned int start;
2339 bool remove_p;
2340 ira_loop_tree_node_t subnode;
2342 remove_p = node->to_remove_p;
2343 if (! remove_p)
2344 children_vec.safe_push (node);
2345 start = children_vec.length ();
2346 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2347 if (subnode->bb == NULL)
2348 remove_uneccesary_loop_nodes_from_loop_tree (subnode);
2349 else
2350 children_vec.safe_push (subnode);
2351 node->children = node->subloops = NULL;
2352 if (remove_p)
2354 removed_loop_vec.safe_push (node);
2355 return;
2357 while (children_vec.length () > start)
2359 subnode = children_vec.pop ();
2360 subnode->parent = node;
2361 subnode->next = node->children;
2362 node->children = subnode;
2363 if (subnode->bb == NULL)
2365 subnode->subloop_next = node->subloops;
2366 node->subloops = subnode;
2371 /* Return TRUE if NODE is inside PARENT. */
2372 static bool
2373 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2375 for (node = node->parent; node != NULL; node = node->parent)
2376 if (node == parent)
2377 return true;
2378 return false;
2381 /* Sort allocnos according to their order in regno allocno list. */
2382 static int
2383 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2385 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2386 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2387 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2388 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2390 if (loop_is_inside_p (n1, n2))
2391 return -1;
2392 else if (loop_is_inside_p (n2, n1))
2393 return 1;
2394 /* If allocnos are equally good, sort by allocno numbers, so that
2395 the results of qsort leave nothing to chance. We put allocnos
2396 with higher number first in the list because it is the original
2397 order for allocnos from loops on the same levels. */
2398 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2401 /* This array is used to sort allocnos to restore allocno order in
2402 the regno allocno list. */
2403 static ira_allocno_t *regno_allocnos;
2405 /* Restore allocno order for REGNO in the regno allocno list. */
2406 static void
2407 ira_rebuild_regno_allocno_list (int regno)
2409 int i, n;
2410 ira_allocno_t a;
2412 for (n = 0, a = ira_regno_allocno_map[regno];
2413 a != NULL;
2414 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2415 regno_allocnos[n++] = a;
2416 ira_assert (n > 0);
2417 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2418 regno_allocno_order_compare_func);
2419 for (i = 1; i < n; i++)
2420 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2421 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2422 ira_regno_allocno_map[regno] = regno_allocnos[0];
2423 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2424 fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2427 /* Propagate info from allocno FROM_A to allocno A. */
2428 static void
2429 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2431 enum reg_class aclass;
2433 merge_hard_reg_conflicts (from_a, a, false);
2434 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2435 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2436 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2437 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2438 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
2439 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
2440 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
2441 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a));
2443 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2444 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2445 if (! ALLOCNO_BAD_SPILL_P (from_a))
2446 ALLOCNO_BAD_SPILL_P (a) = false;
2447 aclass = ALLOCNO_CLASS (from_a);
2448 ira_assert (aclass == ALLOCNO_CLASS (a));
2449 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2450 ALLOCNO_HARD_REG_COSTS (from_a));
2451 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2452 aclass,
2453 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2454 ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2455 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2458 /* Remove allocnos from loops removed from the allocation
2459 consideration. */
2460 static void
2461 remove_unnecessary_allocnos (void)
2463 int regno;
2464 bool merged_p, rebuild_p;
2465 ira_allocno_t a, prev_a, next_a, parent_a;
2466 ira_loop_tree_node_t a_node, parent;
2468 merged_p = false;
2469 regno_allocnos = NULL;
2470 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2472 rebuild_p = false;
2473 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2474 a != NULL;
2475 a = next_a)
2477 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2478 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2479 if (! a_node->to_remove_p)
2480 prev_a = a;
2481 else
2483 for (parent = a_node->parent;
2484 (parent_a = parent->regno_allocno_map[regno]) == NULL
2485 && parent->to_remove_p;
2486 parent = parent->parent)
2488 if (parent_a == NULL)
2490 /* There are no allocnos with the same regno in
2491 upper region -- just move the allocno to the
2492 upper region. */
2493 prev_a = a;
2494 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2495 parent->regno_allocno_map[regno] = a;
2496 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2497 rebuild_p = true;
2499 else
2501 /* Remove the allocno and update info of allocno in
2502 the upper region. */
2503 if (prev_a == NULL)
2504 ira_regno_allocno_map[regno] = next_a;
2505 else
2506 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2507 move_allocno_live_ranges (a, parent_a);
2508 merged_p = true;
2509 propagate_some_info_from_allocno (parent_a, a);
2510 /* Remove it from the corresponding regno allocno
2511 map to avoid info propagation of subsequent
2512 allocno into this already removed allocno. */
2513 a_node->regno_allocno_map[regno] = NULL;
2514 ira_remove_allocno_prefs (a);
2515 finish_allocno (a);
2519 if (rebuild_p)
2520 /* We need to restore the order in regno allocno list. */
2522 if (regno_allocnos == NULL)
2523 regno_allocnos
2524 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2525 * ira_allocnos_num);
2526 ira_rebuild_regno_allocno_list (regno);
2529 if (merged_p)
2530 ira_rebuild_start_finish_chains ();
2531 if (regno_allocnos != NULL)
2532 ira_free (regno_allocnos);
2535 /* Remove allocnos from all loops but the root. */
2536 static void
2537 remove_low_level_allocnos (void)
2539 int regno;
2540 bool merged_p, propagate_p;
2541 ira_allocno_t a, top_a;
2542 ira_loop_tree_node_t a_node, parent;
2543 ira_allocno_iterator ai;
2545 merged_p = false;
2546 FOR_EACH_ALLOCNO (a, ai)
2548 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2549 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2550 continue;
2551 regno = ALLOCNO_REGNO (a);
2552 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2554 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2555 ira_loop_tree_root->regno_allocno_map[regno] = a;
2556 continue;
2558 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2559 /* Remove the allocno and update info of allocno in the upper
2560 region. */
2561 move_allocno_live_ranges (a, top_a);
2562 merged_p = true;
2563 if (propagate_p)
2564 propagate_some_info_from_allocno (top_a, a);
2566 FOR_EACH_ALLOCNO (a, ai)
2568 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2569 if (a_node == ira_loop_tree_root)
2570 continue;
2571 parent = a_node->parent;
2572 regno = ALLOCNO_REGNO (a);
2573 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2574 ira_assert (ALLOCNO_CAP (a) != NULL);
2575 else if (ALLOCNO_CAP (a) == NULL)
2576 ira_assert (parent->regno_allocno_map[regno] != NULL);
2578 FOR_EACH_ALLOCNO (a, ai)
2580 regno = ALLOCNO_REGNO (a);
2581 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2583 ira_object_t obj;
2584 ira_allocno_object_iterator oi;
2586 ira_regno_allocno_map[regno] = a;
2587 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2588 ALLOCNO_CAP_MEMBER (a) = NULL;
2589 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2590 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2591 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2592 #ifdef STACK_REGS
2593 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2594 ALLOCNO_NO_STACK_REG_P (a) = true;
2595 #endif
2597 else
2599 ira_remove_allocno_prefs (a);
2600 finish_allocno (a);
2603 if (merged_p)
2604 ira_rebuild_start_finish_chains ();
2607 /* Remove loops from consideration. We remove all loops except for
2608 root if ALL_P or loops for which a separate allocation will not
2609 improve the result. We have to do this after allocno creation and
2610 their costs and allocno class evaluation because only after that
2611 the register pressure can be known and is calculated. */
2612 static void
2613 remove_unnecessary_regions (bool all_p)
2615 if (current_loops == NULL)
2616 return;
2617 if (all_p)
2618 mark_all_loops_for_removal ();
2619 else
2620 mark_loops_for_removal ();
2621 children_vec.create (last_basic_block_for_fn (cfun)
2622 + number_of_loops (cfun));
2623 removed_loop_vec.create (last_basic_block_for_fn (cfun)
2624 + number_of_loops (cfun));
2625 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
2626 children_vec.release ();
2627 if (all_p)
2628 remove_low_level_allocnos ();
2629 else
2630 remove_unnecessary_allocnos ();
2631 while (removed_loop_vec.length () > 0)
2632 finish_loop_tree_node (removed_loop_vec.pop ());
2633 removed_loop_vec.release ();
2638 /* At this point true value of allocno attribute bad_spill_p means
2639 that there is an insn where allocno occurs and where the allocno
2640 can not be used as memory. The function updates the attribute, now
2641 it can be true only for allocnos which can not be used as memory in
2642 an insn and in whose live ranges there is other allocno deaths.
2643 Spilling allocnos with true value will not improve the code because
2644 it will not make other allocnos colorable and additional reloads
2645 for the corresponding pseudo will be generated in reload pass for
2646 each insn it occurs.
2648 This is a trick mentioned in one classic article of Chaitin etc
2649 which is frequently omitted in other implementations of RA based on
2650 graph coloring. */
2651 static void
2652 update_bad_spill_attribute (void)
2654 int i;
2655 ira_allocno_t a;
2656 ira_allocno_iterator ai;
2657 ira_allocno_object_iterator aoi;
2658 ira_object_t obj;
2659 live_range_t r;
2660 enum reg_class aclass;
2661 bitmap_head dead_points[N_REG_CLASSES];
2663 for (i = 0; i < ira_allocno_classes_num; i++)
2665 aclass = ira_allocno_classes[i];
2666 bitmap_initialize (&dead_points[aclass], &reg_obstack);
2668 FOR_EACH_ALLOCNO (a, ai)
2670 aclass = ALLOCNO_CLASS (a);
2671 if (aclass == NO_REGS)
2672 continue;
2673 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2674 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2675 bitmap_set_bit (&dead_points[aclass], r->finish);
2677 FOR_EACH_ALLOCNO (a, ai)
2679 aclass = ALLOCNO_CLASS (a);
2680 if (aclass == NO_REGS)
2681 continue;
2682 if (! ALLOCNO_BAD_SPILL_P (a))
2683 continue;
2684 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2686 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2688 for (i = r->start + 1; i < r->finish; i++)
2689 if (bitmap_bit_p (&dead_points[aclass], i))
2690 break;
2691 if (i < r->finish)
2692 break;
2694 if (r != NULL)
2696 ALLOCNO_BAD_SPILL_P (a) = false;
2697 break;
2701 for (i = 0; i < ira_allocno_classes_num; i++)
2703 aclass = ira_allocno_classes[i];
2704 bitmap_clear (&dead_points[aclass]);
2710 /* Set up minimal and maximal live range points for allocnos. */
2711 static void
2712 setup_min_max_allocno_live_range_point (void)
2714 int i;
2715 ira_allocno_t a, parent_a, cap;
2716 ira_allocno_iterator ai;
2717 #ifdef ENABLE_IRA_CHECKING
2718 ira_object_iterator oi;
2719 ira_object_t obj;
2720 #endif
2721 live_range_t r;
2722 ira_loop_tree_node_t parent;
2724 FOR_EACH_ALLOCNO (a, ai)
2726 int n = ALLOCNO_NUM_OBJECTS (a);
2728 for (i = 0; i < n; i++)
2730 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2731 r = OBJECT_LIVE_RANGES (obj);
2732 if (r == NULL)
2733 continue;
2734 OBJECT_MAX (obj) = r->finish;
2735 for (; r->next != NULL; r = r->next)
2737 OBJECT_MIN (obj) = r->start;
2740 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2741 for (a = ira_regno_allocno_map[i];
2742 a != NULL;
2743 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2745 int j;
2746 int n = ALLOCNO_NUM_OBJECTS (a);
2748 for (j = 0; j < n; j++)
2750 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2751 ira_object_t parent_obj;
2753 if (OBJECT_MAX (obj) < 0)
2754 continue;
2755 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2756 /* Accumulation of range info. */
2757 if (ALLOCNO_CAP (a) != NULL)
2759 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2761 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2762 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2763 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2764 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2765 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2767 continue;
2769 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2770 continue;
2771 parent_a = parent->regno_allocno_map[i];
2772 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2773 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2774 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2775 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2776 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2779 #ifdef ENABLE_IRA_CHECKING
2780 FOR_EACH_OBJECT (obj, oi)
2782 if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2783 && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2784 continue;
2785 gcc_unreachable ();
2787 #endif
2790 /* Sort allocnos according to their live ranges. Allocnos with
2791 smaller allocno class are put first unless we use priority
2792 coloring. Allocnos with the same class are ordered according
2793 their start (min). Allocnos with the same start are ordered
2794 according their finish (max). */
2795 static int
2796 object_range_compare_func (const void *v1p, const void *v2p)
2798 int diff;
2799 ira_object_t obj1 = *(const ira_object_t *) v1p;
2800 ira_object_t obj2 = *(const ira_object_t *) v2p;
2801 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2802 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2804 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2805 return diff;
2806 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2807 return diff;
2808 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2811 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2812 static void
2813 sort_conflict_id_map (void)
2815 int i, num;
2816 ira_allocno_t a;
2817 ira_allocno_iterator ai;
2819 num = 0;
2820 FOR_EACH_ALLOCNO (a, ai)
2822 ira_allocno_object_iterator oi;
2823 ira_object_t obj;
2825 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2826 ira_object_id_map[num++] = obj;
2828 if (num > 1)
2829 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2830 object_range_compare_func);
2831 for (i = 0; i < num; i++)
2833 ira_object_t obj = ira_object_id_map[i];
2835 gcc_assert (obj != NULL);
2836 OBJECT_CONFLICT_ID (obj) = i;
2838 for (i = num; i < ira_objects_num; i++)
2839 ira_object_id_map[i] = NULL;
2842 /* Set up minimal and maximal conflict ids of allocnos with which
2843 given allocno can conflict. */
2844 static void
2845 setup_min_max_conflict_allocno_ids (void)
2847 int aclass;
2848 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2849 int *live_range_min, *last_lived;
2850 int word0_min, word0_max;
2851 ira_allocno_t a;
2852 ira_allocno_iterator ai;
2854 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2855 aclass = -1;
2856 first_not_finished = -1;
2857 for (i = 0; i < ira_objects_num; i++)
2859 ira_object_t obj = ira_object_id_map[i];
2861 if (obj == NULL)
2862 continue;
2864 a = OBJECT_ALLOCNO (obj);
2866 if (aclass < 0)
2868 aclass = ALLOCNO_CLASS (a);
2869 min = i;
2870 first_not_finished = i;
2872 else
2874 start = OBJECT_MIN (obj);
2875 /* If we skip an allocno, the allocno with smaller ids will
2876 be also skipped because of the secondary sorting the
2877 range finishes (see function
2878 object_range_compare_func). */
2879 while (first_not_finished < i
2880 && start > OBJECT_MAX (ira_object_id_map
2881 [first_not_finished]))
2882 first_not_finished++;
2883 min = first_not_finished;
2885 if (min == i)
2886 /* We could increase min further in this case but it is good
2887 enough. */
2888 min++;
2889 live_range_min[i] = OBJECT_MIN (obj);
2890 OBJECT_MIN (obj) = min;
2892 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2893 aclass = -1;
2894 filled_area_start = -1;
2895 for (i = ira_objects_num - 1; i >= 0; i--)
2897 ira_object_t obj = ira_object_id_map[i];
2899 if (obj == NULL)
2900 continue;
2902 a = OBJECT_ALLOCNO (obj);
2903 if (aclass < 0)
2905 aclass = ALLOCNO_CLASS (a);
2906 for (j = 0; j < ira_max_point; j++)
2907 last_lived[j] = -1;
2908 filled_area_start = ira_max_point;
2910 min = live_range_min[i];
2911 finish = OBJECT_MAX (obj);
2912 max = last_lived[finish];
2913 if (max < 0)
2914 /* We could decrease max further in this case but it is good
2915 enough. */
2916 max = OBJECT_CONFLICT_ID (obj) - 1;
2917 OBJECT_MAX (obj) = max;
2918 /* In filling, we can go further A range finish to recognize
2919 intersection quickly because if the finish of subsequently
2920 processed allocno (it has smaller conflict id) range is
2921 further A range finish than they are definitely intersected
2922 (the reason for this is the allocnos with bigger conflict id
2923 have their range starts not smaller than allocnos with
2924 smaller ids. */
2925 for (j = min; j < filled_area_start; j++)
2926 last_lived[j] = i;
2927 filled_area_start = min;
2929 ira_free (last_lived);
2930 ira_free (live_range_min);
2932 /* For allocnos with more than one object, we may later record extra conflicts in
2933 subobject 0 that we cannot really know about here.
2934 For now, simply widen the min/max range of these subobjects. */
2936 word0_min = INT_MAX;
2937 word0_max = INT_MIN;
2939 FOR_EACH_ALLOCNO (a, ai)
2941 int n = ALLOCNO_NUM_OBJECTS (a);
2942 ira_object_t obj0;
2944 if (n < 2)
2945 continue;
2946 obj0 = ALLOCNO_OBJECT (a, 0);
2947 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2948 word0_min = OBJECT_CONFLICT_ID (obj0);
2949 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2950 word0_max = OBJECT_CONFLICT_ID (obj0);
2952 FOR_EACH_ALLOCNO (a, ai)
2954 int n = ALLOCNO_NUM_OBJECTS (a);
2955 ira_object_t obj0;
2957 if (n < 2)
2958 continue;
2959 obj0 = ALLOCNO_OBJECT (a, 0);
2960 if (OBJECT_MIN (obj0) > word0_min)
2961 OBJECT_MIN (obj0) = word0_min;
2962 if (OBJECT_MAX (obj0) < word0_max)
2963 OBJECT_MAX (obj0) = word0_max;
2969 static void
2970 create_caps (void)
2972 ira_allocno_t a;
2973 ira_allocno_iterator ai;
2974 ira_loop_tree_node_t loop_tree_node;
2976 FOR_EACH_ALLOCNO (a, ai)
2978 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2979 continue;
2980 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2981 create_cap_allocno (a);
2982 else if (ALLOCNO_CAP (a) == NULL)
2984 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2985 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2986 create_cap_allocno (a);
2993 /* The page contains code transforming more one region internal
2994 representation (IR) to one region IR which is necessary for reload.
2995 This transformation is called IR flattening. We might just rebuild
2996 the IR for one region but we don't do it because it takes a lot of
2997 time. */
2999 /* Map: regno -> allocnos which will finally represent the regno for
3000 IR with one region. */
3001 static ira_allocno_t *regno_top_level_allocno_map;
3003 /* Find the allocno that corresponds to A at a level one higher up in the
3004 loop tree. Returns NULL if A is a cap, or if it has no parent. */
3005 ira_allocno_t
3006 ira_parent_allocno (ira_allocno_t a)
3008 ira_loop_tree_node_t parent;
3010 if (ALLOCNO_CAP (a) != NULL)
3011 return NULL;
3013 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3014 if (parent == NULL)
3015 return NULL;
3017 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
3020 /* Find the allocno that corresponds to A at a level one higher up in the
3021 loop tree. If ALLOCNO_CAP is set for A, return that. */
3022 ira_allocno_t
3023 ira_parent_or_cap_allocno (ira_allocno_t a)
3025 if (ALLOCNO_CAP (a) != NULL)
3026 return ALLOCNO_CAP (a);
3028 return ira_parent_allocno (a);
3031 /* Process all allocnos originated from pseudo REGNO and copy live
3032 ranges, hard reg conflicts, and allocno stack reg attributes from
3033 low level allocnos to final allocnos which are destinations of
3034 removed stores at a loop exit. Return true if we copied live
3035 ranges. */
3036 static bool
3037 copy_info_to_removed_store_destinations (int regno)
3039 ira_allocno_t a;
3040 ira_allocno_t parent_a = NULL;
3041 ira_loop_tree_node_t parent;
3042 bool merged_p;
3044 merged_p = false;
3045 for (a = ira_regno_allocno_map[regno];
3046 a != NULL;
3047 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3049 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
3050 /* This allocno will be removed. */
3051 continue;
3053 /* Caps will be removed. */
3054 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3055 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3056 parent != NULL;
3057 parent = parent->parent)
3058 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
3059 || (parent_a
3060 == regno_top_level_allocno_map[REGNO
3061 (allocno_emit_reg (parent_a))]
3062 && ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
3063 break;
3064 if (parent == NULL || parent_a == NULL)
3065 continue;
3067 copy_allocno_live_ranges (a, parent_a);
3068 merge_hard_reg_conflicts (a, parent_a, true);
3070 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
3071 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3072 += ALLOCNO_CALLS_CROSSED_NUM (a);
3073 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3074 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3075 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
3076 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
3077 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3078 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3079 merged_p = true;
3081 return merged_p;
3084 /* Flatten the IR. In other words, this function transforms IR as if
3085 it were built with one region (without loops). We could make it
3086 much simpler by rebuilding IR with one region, but unfortunately it
3087 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3088 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3089 IRA_MAX_POINT before emitting insns on the loop borders. */
3090 void
3091 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
3093 int i, j;
3094 bool keep_p;
3095 int hard_regs_num;
3096 bool new_pseudos_p, merged_p, mem_dest_p;
3097 unsigned int n;
3098 enum reg_class aclass;
3099 ira_allocno_t a, parent_a, first, second, node_first, node_second;
3100 ira_copy_t cp;
3101 ira_loop_tree_node_t node;
3102 live_range_t r;
3103 ira_allocno_iterator ai;
3104 ira_copy_iterator ci;
3106 regno_top_level_allocno_map
3107 = (ira_allocno_t *) ira_allocate (max_reg_num ()
3108 * sizeof (ira_allocno_t));
3109 memset (regno_top_level_allocno_map, 0,
3110 max_reg_num () * sizeof (ira_allocno_t));
3111 new_pseudos_p = merged_p = false;
3112 FOR_EACH_ALLOCNO (a, ai)
3114 ira_allocno_object_iterator oi;
3115 ira_object_t obj;
3117 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3118 /* Caps are not in the regno allocno maps and they are never
3119 will be transformed into allocnos existing after IR
3120 flattening. */
3121 continue;
3122 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3123 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
3124 OBJECT_CONFLICT_HARD_REGS (obj));
3125 #ifdef STACK_REGS
3126 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
3127 #endif
3129 /* Fix final allocno attributes. */
3130 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
3132 mem_dest_p = false;
3133 for (a = ira_regno_allocno_map[i];
3134 a != NULL;
3135 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3137 ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
3139 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3140 if (data->somewhere_renamed_p)
3141 new_pseudos_p = true;
3142 parent_a = ira_parent_allocno (a);
3143 if (parent_a == NULL)
3145 ALLOCNO_COPIES (a) = NULL;
3146 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3147 continue;
3149 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
3151 if (data->mem_optimized_dest != NULL)
3152 mem_dest_p = true;
3153 parent_data = ALLOCNO_EMIT_DATA (parent_a);
3154 if (REGNO (data->reg) == REGNO (parent_data->reg))
3156 merge_hard_reg_conflicts (a, parent_a, true);
3157 move_allocno_live_ranges (a, parent_a);
3158 merged_p = true;
3159 parent_data->mem_optimized_dest_p
3160 = (parent_data->mem_optimized_dest_p
3161 || data->mem_optimized_dest_p);
3162 continue;
3164 new_pseudos_p = true;
3165 for (;;)
3167 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
3168 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
3169 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
3170 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3171 -= ALLOCNO_CALLS_CROSSED_NUM (a);
3172 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3173 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3174 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3175 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3176 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
3177 && ALLOCNO_NREFS (parent_a) >= 0
3178 && ALLOCNO_FREQ (parent_a) >= 0);
3179 aclass = ALLOCNO_CLASS (parent_a);
3180 hard_regs_num = ira_class_hard_regs_num[aclass];
3181 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
3182 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
3183 for (j = 0; j < hard_regs_num; j++)
3184 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
3185 -= ALLOCNO_HARD_REG_COSTS (a)[j];
3186 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
3187 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
3188 for (j = 0; j < hard_regs_num; j++)
3189 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
3190 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
3191 ALLOCNO_CLASS_COST (parent_a)
3192 -= ALLOCNO_CLASS_COST (a);
3193 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
3194 parent_a = ira_parent_allocno (parent_a);
3195 if (parent_a == NULL)
3196 break;
3198 ALLOCNO_COPIES (a) = NULL;
3199 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3201 if (mem_dest_p && copy_info_to_removed_store_destinations (i))
3202 merged_p = true;
3204 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
3205 if (merged_p || ira_max_point_before_emit != ira_max_point)
3206 ira_rebuild_start_finish_chains ();
3207 if (new_pseudos_p)
3209 sparseset objects_live;
3211 /* Rebuild conflicts. */
3212 FOR_EACH_ALLOCNO (a, ai)
3214 ira_allocno_object_iterator oi;
3215 ira_object_t obj;
3217 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3218 || ALLOCNO_CAP_MEMBER (a) != NULL)
3219 continue;
3220 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3222 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3223 ira_assert (r->object == obj);
3224 clear_conflicts (obj);
3227 objects_live = sparseset_alloc (ira_objects_num);
3228 for (i = 0; i < ira_max_point; i++)
3230 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
3232 ira_object_t obj = r->object;
3234 a = OBJECT_ALLOCNO (obj);
3235 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3236 || ALLOCNO_CAP_MEMBER (a) != NULL)
3237 continue;
3239 aclass = ALLOCNO_CLASS (a);
3240 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
3242 ira_object_t live_obj = ira_object_id_map[n];
3243 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
3244 enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
3246 if (ira_reg_classes_intersect_p[aclass][live_aclass]
3247 /* Don't set up conflict for the allocno with itself. */
3248 && live_a != a)
3249 ira_add_conflict (obj, live_obj);
3251 sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
3254 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
3255 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
3257 sparseset_free (objects_live);
3258 compress_conflict_vecs ();
3260 /* Mark some copies for removing and change allocnos in the rest
3261 copies. */
3262 FOR_EACH_COPY (cp, ci)
3264 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
3265 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
3267 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3268 fprintf
3269 (ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3270 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
3271 ALLOCNO_NUM (cp->first),
3272 REGNO (allocno_emit_reg (cp->first)),
3273 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
3274 ALLOCNO_NUM (cp->second),
3275 REGNO (allocno_emit_reg (cp->second)));
3276 cp->loop_tree_node = NULL;
3277 continue;
3279 first
3280 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
3281 second
3282 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
3283 node = cp->loop_tree_node;
3284 if (node == NULL)
3285 keep_p = true; /* It copy generated in ira-emit.c. */
3286 else
3288 /* Check that the copy was not propagated from level on
3289 which we will have different pseudos. */
3290 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
3291 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
3292 keep_p = ((REGNO (allocno_emit_reg (first))
3293 == REGNO (allocno_emit_reg (node_first)))
3294 && (REGNO (allocno_emit_reg (second))
3295 == REGNO (allocno_emit_reg (node_second))));
3297 if (keep_p)
3299 cp->loop_tree_node = ira_loop_tree_root;
3300 cp->first = first;
3301 cp->second = second;
3303 else
3305 cp->loop_tree_node = NULL;
3306 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3307 fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
3308 cp->num, ALLOCNO_NUM (cp->first),
3309 REGNO (allocno_emit_reg (cp->first)),
3310 ALLOCNO_NUM (cp->second),
3311 REGNO (allocno_emit_reg (cp->second)));
3314 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3315 FOR_EACH_ALLOCNO (a, ai)
3317 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3318 || ALLOCNO_CAP_MEMBER (a) != NULL)
3320 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3321 fprintf (ira_dump_file, " Remove a%dr%d\n",
3322 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
3323 ira_remove_allocno_prefs (a);
3324 finish_allocno (a);
3325 continue;
3327 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
3328 ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
3329 ALLOCNO_CAP (a) = NULL;
3330 /* Restore updated costs for assignments from reload. */
3331 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3332 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
3333 if (! ALLOCNO_ASSIGNED_P (a))
3334 ira_free_allocno_updated_costs (a);
3335 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3336 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3338 /* Remove unnecessary copies. */
3339 FOR_EACH_COPY (cp, ci)
3341 if (cp->loop_tree_node == NULL)
3343 ira_copies[cp->num] = NULL;
3344 finish_copy (cp);
3345 continue;
3347 ira_assert
3348 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
3349 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
3350 add_allocno_copy_to_list (cp);
3351 swap_allocno_copy_ends_if_necessary (cp);
3353 rebuild_regno_allocno_maps ();
3354 if (ira_max_point != ira_max_point_before_emit)
3355 ira_compress_allocno_live_ranges ();
3356 ira_free (regno_top_level_allocno_map);
3361 #ifdef ENABLE_IRA_CHECKING
3362 /* Check creation of all allocnos. Allocnos on lower levels should
3363 have allocnos or caps on all upper levels. */
3364 static void
3365 check_allocno_creation (void)
3367 ira_allocno_t a;
3368 ira_allocno_iterator ai;
3369 ira_loop_tree_node_t loop_tree_node;
3371 FOR_EACH_ALLOCNO (a, ai)
3373 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3374 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3375 ALLOCNO_NUM (a)));
3376 if (loop_tree_node == ira_loop_tree_root)
3377 continue;
3378 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3379 ira_assert (ALLOCNO_CAP (a) != NULL);
3380 else if (ALLOCNO_CAP (a) == NULL)
3381 ira_assert (loop_tree_node->parent
3382 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3383 && bitmap_bit_p (loop_tree_node->border_allocnos,
3384 ALLOCNO_NUM (a)));
3387 #endif
3389 /* Identify allocnos which prefer a register class with a single hard register.
3390 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3391 less likely to use the preferred singleton register. */
3392 static void
3393 update_conflict_hard_reg_costs (void)
3395 ira_allocno_t a;
3396 ira_allocno_iterator ai;
3397 int i, index, min;
3399 FOR_EACH_ALLOCNO (a, ai)
3401 reg_class_t aclass = ALLOCNO_CLASS (a);
3402 reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3403 int singleton = ira_class_singleton[pref][ALLOCNO_MODE (a)];
3404 if (singleton < 0)
3405 continue;
3406 index = ira_class_hard_reg_index[(int) aclass][singleton];
3407 if (index < 0)
3408 continue;
3409 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3410 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3411 continue;
3412 min = INT_MAX;
3413 for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3414 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3415 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3416 min = ALLOCNO_HARD_REG_COSTS (a)[i];
3417 if (min == INT_MAX)
3418 continue;
3419 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3420 aclass, 0);
3421 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3422 -= min - ALLOCNO_CLASS_COST (a);
3426 /* Create a internal representation (IR) for IRA (allocnos, copies,
3427 loop tree nodes). The function returns TRUE if we generate loop
3428 structure (besides nodes representing all function and the basic
3429 blocks) for regional allocation. A true return means that we
3430 really need to flatten IR before the reload. */
3431 bool
3432 ira_build (void)
3434 bool loops_p;
3436 df_analyze ();
3437 initiate_cost_vectors ();
3438 initiate_allocnos ();
3439 initiate_prefs ();
3440 initiate_copies ();
3441 create_loop_tree_nodes ();
3442 form_loop_tree ();
3443 create_allocnos ();
3444 ira_costs ();
3445 create_allocno_objects ();
3446 ira_create_allocno_live_ranges ();
3447 remove_unnecessary_regions (false);
3448 ira_compress_allocno_live_ranges ();
3449 update_bad_spill_attribute ();
3450 loops_p = more_one_region_p ();
3451 if (loops_p)
3453 propagate_allocno_info ();
3454 create_caps ();
3456 ira_tune_allocno_costs ();
3457 #ifdef ENABLE_IRA_CHECKING
3458 check_allocno_creation ();
3459 #endif
3460 setup_min_max_allocno_live_range_point ();
3461 sort_conflict_id_map ();
3462 setup_min_max_conflict_allocno_ids ();
3463 ira_build_conflicts ();
3464 update_conflict_hard_reg_costs ();
3465 if (! ira_conflicts_p)
3467 ira_allocno_t a;
3468 ira_allocno_iterator ai;
3470 /* Remove all regions but root one. */
3471 if (loops_p)
3473 remove_unnecessary_regions (true);
3474 loops_p = false;
3476 /* We don't save hard registers around calls for fast allocation
3477 -- add caller clobbered registers as conflicting ones to
3478 allocno crossing calls. */
3479 FOR_EACH_ALLOCNO (a, ai)
3480 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3481 ior_hard_reg_conflicts (a, &call_used_reg_set);
3483 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3484 print_copies (ira_dump_file);
3485 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3486 print_prefs (ira_dump_file);
3487 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3489 int n, nr, nr_big;
3490 ira_allocno_t a;
3491 live_range_t r;
3492 ira_allocno_iterator ai;
3494 n = 0;
3495 nr = 0;
3496 nr_big = 0;
3497 FOR_EACH_ALLOCNO (a, ai)
3499 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3501 if (nobj > 1)
3502 nr_big++;
3503 for (j = 0; j < nobj; j++)
3505 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3506 n += OBJECT_NUM_CONFLICTS (obj);
3507 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3508 nr++;
3511 fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
3512 current_loops == NULL ? 1 : number_of_loops (cfun),
3513 n_basic_blocks_for_fn (cfun), ira_max_point);
3514 fprintf (ira_dump_file,
3515 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3516 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3518 return loops_p;
3521 /* Release the data created by function ira_build. */
3522 void
3523 ira_destroy (void)
3525 finish_loop_tree_nodes ();
3526 finish_prefs ();
3527 finish_copies ();
3528 finish_allocnos ();
3529 finish_cost_vectors ();
3530 ira_finish_allocno_live_ranges ();