1 /* IRA conflict builder.
2 Copyright (C) 2006-2020 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "insn-config.h"
34 #include "sparseset.h"
35 #include "addresses.h"
37 /* This file contains code responsible for allocno conflict creation,
38 allocno copy creation and allocno info accumulation on upper level
41 /* ira_allocnos_num array of arrays of bits, recording whether two
42 allocno's conflict (can't go in the same hardware register).
44 Some arrays will be used as conflict bit vector of the
45 corresponding allocnos see function build_object_conflicts. */
46 static IRA_INT_TYPE
**conflicts
;
48 /* Macro to test a conflict of C1 and C2 in `conflicts'. */
49 #define OBJECTS_CONFLICT_P(C1, C2) \
50 (OBJECT_MIN (C1) <= OBJECT_CONFLICT_ID (C2) \
51 && OBJECT_CONFLICT_ID (C2) <= OBJECT_MAX (C1) \
52 && TEST_MINMAX_SET_BIT (conflicts[OBJECT_CONFLICT_ID (C1)], \
53 OBJECT_CONFLICT_ID (C2), \
54 OBJECT_MIN (C1), OBJECT_MAX (C1)))
57 /* Record a conflict between objects OBJ1 and OBJ2. If necessary,
58 canonicalize the conflict by recording it for lower-order subobjects
59 of the corresponding allocnos. */
61 record_object_conflict (ira_object_t obj1
, ira_object_t obj2
)
63 ira_allocno_t a1
= OBJECT_ALLOCNO (obj1
);
64 ira_allocno_t a2
= OBJECT_ALLOCNO (obj2
);
65 int w1
= OBJECT_SUBWORD (obj1
);
66 int w2
= OBJECT_SUBWORD (obj2
);
69 /* Canonicalize the conflict. If two identically-numbered words
70 conflict, always record this as a conflict between words 0. That
71 is the only information we need, and it is easier to test for if
72 it is collected in each allocno's lowest-order object. */
73 if (w1
== w2
&& w1
> 0)
75 obj1
= ALLOCNO_OBJECT (a1
, 0);
76 obj2
= ALLOCNO_OBJECT (a2
, 0);
78 id1
= OBJECT_CONFLICT_ID (obj1
);
79 id2
= OBJECT_CONFLICT_ID (obj2
);
81 SET_MINMAX_SET_BIT (conflicts
[id1
], id2
, OBJECT_MIN (obj1
),
83 SET_MINMAX_SET_BIT (conflicts
[id2
], id1
, OBJECT_MIN (obj2
),
87 /* Build allocno conflict table by processing allocno live ranges.
88 Return true if the table was built. The table is not built if it
91 build_conflict_bit_table (void)
95 enum reg_class aclass
;
96 int object_set_words
, allocated_words_num
, conflict_bit_vec_words_num
;
98 ira_allocno_t allocno
;
99 ira_allocno_iterator ai
;
100 sparseset objects_live
;
102 ira_allocno_object_iterator aoi
;
104 allocated_words_num
= 0;
105 FOR_EACH_ALLOCNO (allocno
, ai
)
106 FOR_EACH_ALLOCNO_OBJECT (allocno
, obj
, aoi
)
108 if (OBJECT_MAX (obj
) < OBJECT_MIN (obj
))
110 conflict_bit_vec_words_num
111 = ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
113 allocated_words_num
+= conflict_bit_vec_words_num
;
114 if ((uint64_t) allocated_words_num
* sizeof (IRA_INT_TYPE
)
115 > (uint64_t) param_ira_max_conflict_table_size
* 1024 * 1024)
117 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
120 "+++Conflict table will be too big(>%dMB) -- don't use it\n",
121 param_ira_max_conflict_table_size
);
126 conflicts
= (IRA_INT_TYPE
**) ira_allocate (sizeof (IRA_INT_TYPE
*)
128 allocated_words_num
= 0;
129 FOR_EACH_ALLOCNO (allocno
, ai
)
130 FOR_EACH_ALLOCNO_OBJECT (allocno
, obj
, aoi
)
132 int id
= OBJECT_CONFLICT_ID (obj
);
133 if (OBJECT_MAX (obj
) < OBJECT_MIN (obj
))
135 conflicts
[id
] = NULL
;
138 conflict_bit_vec_words_num
139 = ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
141 allocated_words_num
+= conflict_bit_vec_words_num
;
143 = (IRA_INT_TYPE
*) ira_allocate (sizeof (IRA_INT_TYPE
)
144 * conflict_bit_vec_words_num
);
145 memset (conflicts
[id
], 0,
146 sizeof (IRA_INT_TYPE
) * conflict_bit_vec_words_num
);
149 object_set_words
= (ira_objects_num
+ IRA_INT_BITS
- 1) / IRA_INT_BITS
;
150 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
153 "+++Allocating %ld bytes for conflict table (uncompressed size %ld)\n",
154 (long) allocated_words_num
* sizeof (IRA_INT_TYPE
),
155 (long) object_set_words
* ira_objects_num
* sizeof (IRA_INT_TYPE
));
157 objects_live
= sparseset_alloc (ira_objects_num
);
158 for (i
= 0; i
< ira_max_point
; i
++)
160 for (r
= ira_start_point_ranges
[i
]; r
!= NULL
; r
= r
->start_next
)
162 ira_object_t obj
= r
->object
;
163 ira_allocno_t allocno
= OBJECT_ALLOCNO (obj
);
164 int id
= OBJECT_CONFLICT_ID (obj
);
166 gcc_assert (id
< ira_objects_num
);
168 aclass
= ALLOCNO_CLASS (allocno
);
169 EXECUTE_IF_SET_IN_SPARSESET (objects_live
, j
)
171 ira_object_t live_obj
= ira_object_id_map
[j
];
172 ira_allocno_t live_a
= OBJECT_ALLOCNO (live_obj
);
173 enum reg_class live_aclass
= ALLOCNO_CLASS (live_a
);
175 if (ira_reg_classes_intersect_p
[aclass
][live_aclass
]
176 /* Don't set up conflict for the allocno with itself. */
177 && live_a
!= allocno
)
179 record_object_conflict (obj
, live_obj
);
182 sparseset_set_bit (objects_live
, id
);
185 for (r
= ira_finish_point_ranges
[i
]; r
!= NULL
; r
= r
->finish_next
)
186 sparseset_clear_bit (objects_live
, OBJECT_CONFLICT_ID (r
->object
));
188 sparseset_free (objects_live
);
192 /* Return true iff allocnos A1 and A2 cannot be allocated to the same
193 register due to conflicts. */
196 allocnos_conflict_for_copy_p (ira_allocno_t a1
, ira_allocno_t a2
)
198 /* Due to the fact that we canonicalize conflicts (see
199 record_object_conflict), we only need to test for conflicts of
200 the lowest order words. */
201 ira_object_t obj1
= ALLOCNO_OBJECT (a1
, 0);
202 ira_object_t obj2
= ALLOCNO_OBJECT (a2
, 0);
204 return OBJECTS_CONFLICT_P (obj1
, obj2
);
207 /* Check that X is REG or SUBREG of REG. */
208 #define REG_SUBREG_P(x) \
209 (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x))))
211 /* Return X if X is a REG, otherwise it should be SUBREG of REG and
212 the function returns the reg in this case. *OFFSET will be set to
213 0 in the first case or the regno offset in the first case. */
215 go_through_subreg (rtx x
, int *offset
)
222 ira_assert (GET_CODE (x
) == SUBREG
);
223 reg
= SUBREG_REG (x
);
224 ira_assert (REG_P (reg
));
225 if (REGNO (reg
) < FIRST_PSEUDO_REGISTER
)
226 *offset
= subreg_regno_offset (REGNO (reg
), GET_MODE (reg
),
227 SUBREG_BYTE (x
), GET_MODE (x
));
228 else if (!can_div_trunc_p (SUBREG_BYTE (x
),
229 REGMODE_NATURAL_SIZE (GET_MODE (x
)), offset
))
230 /* Checked by validate_subreg. We must know at compile time which
231 inner hard registers are being accessed. */
236 /* Process registers REG1 and REG2 in move INSN with execution
237 frequency FREQ. The function also processes the registers in a
238 potential move insn (INSN == NULL in this case) with frequency
239 FREQ. The function can modify hard register costs of the
240 corresponding allocnos or create a copy involving the corresponding
241 allocnos. The function does nothing if the both registers are hard
242 registers. When nothing is changed, the function returns
245 process_regs_for_copy (rtx reg1
, rtx reg2
, bool constraint_p
,
246 rtx_insn
*insn
, int freq
)
248 int allocno_preferenced_hard_regno
, cost
, index
, offset1
, offset2
;
251 reg_class_t rclass
, aclass
;
255 gcc_assert (REG_SUBREG_P (reg1
) && REG_SUBREG_P (reg2
));
256 only_regs_p
= REG_P (reg1
) && REG_P (reg2
);
257 reg1
= go_through_subreg (reg1
, &offset1
);
258 reg2
= go_through_subreg (reg2
, &offset2
);
259 /* Set up hard regno preferenced by allocno. If allocno gets the
260 hard regno the copy (or potential move) insn will be removed. */
261 if (HARD_REGISTER_P (reg1
))
263 if (HARD_REGISTER_P (reg2
))
265 allocno_preferenced_hard_regno
= REGNO (reg1
) + offset1
- offset2
;
266 a
= ira_curr_regno_allocno_map
[REGNO (reg2
)];
268 else if (HARD_REGISTER_P (reg2
))
270 allocno_preferenced_hard_regno
= REGNO (reg2
) + offset2
- offset1
;
271 a
= ira_curr_regno_allocno_map
[REGNO (reg1
)];
275 ira_allocno_t a1
= ira_curr_regno_allocno_map
[REGNO (reg1
)];
276 ira_allocno_t a2
= ira_curr_regno_allocno_map
[REGNO (reg2
)];
278 if (!allocnos_conflict_for_copy_p (a1
, a2
) && offset1
== offset2
)
280 cp
= ira_add_allocno_copy (a1
, a2
, freq
, constraint_p
, insn
,
281 ira_curr_loop_tree_node
);
282 bitmap_set_bit (ira_curr_loop_tree_node
->local_copies
, cp
->num
);
289 if (! IN_RANGE (allocno_preferenced_hard_regno
,
290 0, FIRST_PSEUDO_REGISTER
- 1))
291 /* Cannot be tied. */
293 rclass
= REGNO_REG_CLASS (allocno_preferenced_hard_regno
);
294 mode
= ALLOCNO_MODE (a
);
295 aclass
= ALLOCNO_CLASS (a
);
296 if (only_regs_p
&& insn
!= NULL_RTX
297 && reg_class_size
[rclass
] <= ira_reg_class_max_nregs
[rclass
][mode
])
298 /* It is already taken into account in ira-costs.c. */
300 index
= ira_class_hard_reg_index
[aclass
][allocno_preferenced_hard_regno
];
302 /* Cannot be tied. It is not in the allocno class. */
304 ira_init_register_move_cost_if_necessary (mode
);
305 if (HARD_REGISTER_P (reg1
))
306 cost
= ira_register_move_cost
[mode
][aclass
][rclass
] * freq
;
308 cost
= ira_register_move_cost
[mode
][rclass
][aclass
] * freq
;
311 ira_allocate_and_set_costs
312 (&ALLOCNO_HARD_REG_COSTS (a
), aclass
,
313 ALLOCNO_CLASS_COST (a
));
314 ira_allocate_and_set_costs
315 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
), aclass
, 0);
316 ALLOCNO_HARD_REG_COSTS (a
)[index
] -= cost
;
317 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[index
] -= cost
;
318 if (ALLOCNO_HARD_REG_COSTS (a
)[index
] < ALLOCNO_CLASS_COST (a
))
319 ALLOCNO_CLASS_COST (a
) = ALLOCNO_HARD_REG_COSTS (a
)[index
];
320 ira_add_allocno_pref (a
, allocno_preferenced_hard_regno
, freq
);
321 a
= ira_parent_or_cap_allocno (a
);
327 /* Return true if output operand OUTPUT and input operand INPUT of
328 INSN can use the same register class for at least one alternative.
329 INSN is already described in recog_data and recog_op_alt. */
331 can_use_same_reg_p (rtx_insn
*insn
, int output
, int input
)
333 alternative_mask preferred
= get_preferred_alternatives (insn
);
334 for (int nalt
= 0; nalt
< recog_data
.n_alternatives
; nalt
++)
336 if (!TEST_BIT (preferred
, nalt
))
339 const operand_alternative
*op_alt
340 = &recog_op_alt
[nalt
* recog_data
.n_operands
];
341 if (op_alt
[input
].matches
== output
)
344 if (ira_reg_class_intersect
[op_alt
[input
].cl
][op_alt
[output
].cl
]
351 /* Process all of the output registers of the current insn (INSN) which
352 are not bound (BOUND_P) and the input register REG (its operand number
353 OP_NUM) which dies in the insn as if there were a move insn between
354 them with frequency FREQ. */
356 process_reg_shuffles (rtx_insn
*insn
, rtx reg
, int op_num
, int freq
,
362 gcc_assert (REG_SUBREG_P (reg
));
363 for (i
= 0; i
< recog_data
.n_operands
; i
++)
365 another_reg
= recog_data
.operand
[i
];
367 if (!REG_SUBREG_P (another_reg
) || op_num
== i
368 || recog_data
.operand_type
[i
] != OP_OUT
370 || (!can_use_same_reg_p (insn
, i
, op_num
)
371 && (recog_data
.constraints
[op_num
][0] != '%'
372 || !can_use_same_reg_p (insn
, i
, op_num
+ 1))
374 || recog_data
.constraints
[op_num
- 1][0] != '%'
375 || !can_use_same_reg_p (insn
, i
, op_num
- 1))))
378 process_regs_for_copy (reg
, another_reg
, false, NULL
, freq
);
382 /* Process INSN and create allocno copies if necessary. For example,
383 it might be because INSN is a pseudo-register move or INSN is two
386 add_insn_allocno_copies (rtx_insn
*insn
)
388 rtx set
, operand
, dup
;
389 bool bound_p
[MAX_RECOG_OPERANDS
];
391 alternative_mask alts
;
393 freq
= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn
));
396 if ((set
= single_set (insn
)) != NULL_RTX
397 && REG_SUBREG_P (SET_DEST (set
)) && REG_SUBREG_P (SET_SRC (set
))
398 && ! side_effects_p (set
)
399 && find_reg_note (insn
, REG_DEAD
,
400 REG_P (SET_SRC (set
))
402 : SUBREG_REG (SET_SRC (set
))) != NULL_RTX
)
404 process_regs_for_copy (SET_SRC (set
), SET_DEST (set
),
408 /* Fast check of possibility of constraint or shuffle copies. If
409 there are no dead registers, there will be no such copies. */
410 if (! find_reg_note (insn
, REG_DEAD
, NULL_RTX
))
412 alts
= ira_setup_alts (insn
);
413 for (i
= 0; i
< recog_data
.n_operands
; i
++)
415 for (i
= 0; i
< recog_data
.n_operands
; i
++)
417 operand
= recog_data
.operand
[i
];
418 if (! REG_SUBREG_P (operand
))
420 if ((n
= ira_get_dup_out_num (i
, alts
)) >= 0)
423 dup
= recog_data
.operand
[n
];
424 if (REG_SUBREG_P (dup
)
425 && find_reg_note (insn
, REG_DEAD
,
428 : SUBREG_REG (operand
)) != NULL_RTX
)
429 process_regs_for_copy (operand
, dup
, true, NULL
,
433 for (i
= 0; i
< recog_data
.n_operands
; i
++)
435 operand
= recog_data
.operand
[i
];
436 if (REG_SUBREG_P (operand
)
437 && find_reg_note (insn
, REG_DEAD
,
439 ? operand
: SUBREG_REG (operand
)) != NULL_RTX
)
440 /* If an operand dies, prefer its hard register for the output
441 operands by decreasing the hard register cost or creating
442 the corresponding allocno copies. The cost will not
443 correspond to a real move insn cost, so make the frequency
445 process_reg_shuffles (insn
, operand
, i
, freq
< 8 ? 1 : freq
/ 8,
450 /* Add copies originated from BB given by LOOP_TREE_NODE. */
452 add_copies (ira_loop_tree_node_t loop_tree_node
)
457 bb
= loop_tree_node
->bb
;
460 FOR_BB_INSNS (bb
, insn
)
461 if (NONDEBUG_INSN_P (insn
))
462 add_insn_allocno_copies (insn
);
465 /* Propagate copies the corresponding allocnos on upper loop tree
468 propagate_copies (void)
471 ira_copy_iterator ci
;
472 ira_allocno_t a1
, a2
, parent_a1
, parent_a2
;
474 FOR_EACH_COPY (cp
, ci
)
478 if (ALLOCNO_LOOP_TREE_NODE (a1
) == ira_loop_tree_root
)
480 ira_assert ((ALLOCNO_LOOP_TREE_NODE (a2
) != ira_loop_tree_root
));
481 parent_a1
= ira_parent_or_cap_allocno (a1
);
482 parent_a2
= ira_parent_or_cap_allocno (a2
);
483 ira_assert (parent_a1
!= NULL
&& parent_a2
!= NULL
);
484 if (! allocnos_conflict_for_copy_p (parent_a1
, parent_a2
))
485 ira_add_allocno_copy (parent_a1
, parent_a2
, cp
->freq
,
486 cp
->constraint_p
, cp
->insn
, cp
->loop_tree_node
);
490 /* Array used to collect all conflict allocnos for given allocno. */
491 static ira_object_t
*collected_conflict_objects
;
493 /* Build conflict vectors or bit conflict vectors (whatever is more
494 profitable) for object OBJ from the conflict table. */
496 build_object_conflicts (ira_object_t obj
)
498 int i
, px
, parent_num
;
499 ira_allocno_t parent_a
, another_parent_a
;
500 ira_object_t parent_obj
;
501 ira_allocno_t a
= OBJECT_ALLOCNO (obj
);
502 IRA_INT_TYPE
*object_conflicts
;
503 minmax_set_iterator asi
;
504 int parent_min
, parent_max ATTRIBUTE_UNUSED
;
506 object_conflicts
= conflicts
[OBJECT_CONFLICT_ID (obj
)];
508 FOR_EACH_BIT_IN_MINMAX_SET (object_conflicts
,
509 OBJECT_MIN (obj
), OBJECT_MAX (obj
), i
, asi
)
511 ira_object_t another_obj
= ira_object_id_map
[i
];
512 ira_allocno_t another_a
= OBJECT_ALLOCNO (obj
);
514 ira_assert (ira_reg_classes_intersect_p
515 [ALLOCNO_CLASS (a
)][ALLOCNO_CLASS (another_a
)]);
516 collected_conflict_objects
[px
++] = another_obj
;
518 if (ira_conflict_vector_profitable_p (obj
, px
))
521 ira_allocate_conflict_vec (obj
, px
);
522 vec
= OBJECT_CONFLICT_VEC (obj
);
523 memcpy (vec
, collected_conflict_objects
, sizeof (ira_object_t
) * px
);
525 OBJECT_NUM_CONFLICTS (obj
) = px
;
529 int conflict_bit_vec_words_num
;
531 OBJECT_CONFLICT_ARRAY (obj
) = object_conflicts
;
532 if (OBJECT_MAX (obj
) < OBJECT_MIN (obj
))
533 conflict_bit_vec_words_num
= 0;
535 conflict_bit_vec_words_num
536 = ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
538 OBJECT_CONFLICT_ARRAY_SIZE (obj
)
539 = conflict_bit_vec_words_num
* sizeof (IRA_INT_TYPE
);
542 parent_a
= ira_parent_or_cap_allocno (a
);
543 if (parent_a
== NULL
)
545 ira_assert (ALLOCNO_CLASS (a
) == ALLOCNO_CLASS (parent_a
));
546 ira_assert (ALLOCNO_NUM_OBJECTS (a
) == ALLOCNO_NUM_OBJECTS (parent_a
));
547 parent_obj
= ALLOCNO_OBJECT (parent_a
, OBJECT_SUBWORD (obj
));
548 parent_num
= OBJECT_CONFLICT_ID (parent_obj
);
549 parent_min
= OBJECT_MIN (parent_obj
);
550 parent_max
= OBJECT_MAX (parent_obj
);
551 FOR_EACH_BIT_IN_MINMAX_SET (object_conflicts
,
552 OBJECT_MIN (obj
), OBJECT_MAX (obj
), i
, asi
)
554 ira_object_t another_obj
= ira_object_id_map
[i
];
555 ira_allocno_t another_a
= OBJECT_ALLOCNO (another_obj
);
556 int another_word
= OBJECT_SUBWORD (another_obj
);
558 ira_assert (ira_reg_classes_intersect_p
559 [ALLOCNO_CLASS (a
)][ALLOCNO_CLASS (another_a
)]);
561 another_parent_a
= ira_parent_or_cap_allocno (another_a
);
562 if (another_parent_a
== NULL
)
564 ira_assert (ALLOCNO_NUM (another_parent_a
) >= 0);
565 ira_assert (ALLOCNO_CLASS (another_a
)
566 == ALLOCNO_CLASS (another_parent_a
));
567 ira_assert (ALLOCNO_NUM_OBJECTS (another_a
)
568 == ALLOCNO_NUM_OBJECTS (another_parent_a
));
569 SET_MINMAX_SET_BIT (conflicts
[parent_num
],
570 OBJECT_CONFLICT_ID (ALLOCNO_OBJECT (another_parent_a
,
572 parent_min
, parent_max
);
576 /* Build conflict vectors or bit conflict vectors (whatever is more
577 profitable) of all allocnos from the conflict table. */
579 build_conflicts (void)
582 ira_allocno_t a
, cap
;
584 collected_conflict_objects
585 = (ira_object_t
*) ira_allocate (sizeof (ira_object_t
)
587 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
588 for (a
= ira_regno_allocno_map
[i
];
590 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
592 int j
, nregs
= ALLOCNO_NUM_OBJECTS (a
);
593 for (j
= 0; j
< nregs
; j
++)
595 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
596 build_object_conflicts (obj
);
597 for (cap
= ALLOCNO_CAP (a
); cap
!= NULL
; cap
= ALLOCNO_CAP (cap
))
599 ira_object_t cap_obj
= ALLOCNO_OBJECT (cap
, j
);
600 gcc_assert (ALLOCNO_NUM_OBJECTS (cap
) == ALLOCNO_NUM_OBJECTS (a
));
601 build_object_conflicts (cap_obj
);
605 ira_free (collected_conflict_objects
);
610 /* Print hard reg set SET with TITLE to FILE. */
612 print_hard_reg_set (FILE *file
, const char *title
, HARD_REG_SET set
)
617 for (start
= end
= -1, i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
619 bool reg_included
= TEST_HARD_REG_BIT (set
, i
);
627 if (start
>= 0 && (!reg_included
|| i
== FIRST_PSEUDO_REGISTER
- 1))
630 fprintf (file
, " %d", start
);
631 else if (start
== end
+ 1)
632 fprintf (file
, " %d %d", start
, end
);
634 fprintf (file
, " %d-%d", start
, end
);
642 print_allocno_conflicts (FILE * file
, bool reg_p
, ira_allocno_t a
)
644 HARD_REG_SET conflicting_hard_regs
;
649 fprintf (file
, ";; r%d", ALLOCNO_REGNO (a
));
652 fprintf (file
, ";; a%d(r%d,", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
653 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
654 fprintf (file
, "b%d", bb
->index
);
656 fprintf (file
, "l%d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
660 fputs (" conflicts:", file
);
661 n
= ALLOCNO_NUM_OBJECTS (a
);
662 for (i
= 0; i
< n
; i
++)
664 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
665 ira_object_t conflict_obj
;
666 ira_object_conflict_iterator oci
;
668 if (OBJECT_CONFLICT_ARRAY (obj
) == NULL
)
670 fprintf (file
, "\n;; total conflict hard regs:\n");
671 fprintf (file
, ";; conflict hard regs:\n\n");
676 fprintf (file
, "\n;; subobject %d:", i
);
677 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
679 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
681 fprintf (file
, " r%d,", ALLOCNO_REGNO (conflict_a
));
684 fprintf (file
, " a%d(r%d", ALLOCNO_NUM (conflict_a
),
685 ALLOCNO_REGNO (conflict_a
));
686 if (ALLOCNO_NUM_OBJECTS (conflict_a
) > 1)
687 fprintf (file
, ",w%d", OBJECT_SUBWORD (conflict_obj
));
688 if ((bb
= ALLOCNO_LOOP_TREE_NODE (conflict_a
)->bb
) != NULL
)
689 fprintf (file
, ",b%d", bb
->index
);
691 fprintf (file
, ",l%d",
692 ALLOCNO_LOOP_TREE_NODE (conflict_a
)->loop_num
);
696 conflicting_hard_regs
= (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
)
698 & reg_class_contents
[ALLOCNO_CLASS (a
)]);
699 print_hard_reg_set (file
, "\n;; total conflict hard regs:",
700 conflicting_hard_regs
);
702 conflicting_hard_regs
= (OBJECT_CONFLICT_HARD_REGS (obj
)
704 & reg_class_contents
[ALLOCNO_CLASS (a
)]);
705 print_hard_reg_set (file
, ";; conflict hard regs:",
706 conflicting_hard_regs
);
712 /* Print information about allocno or only regno (if REG_P) conflicts
715 print_conflicts (FILE *file
, bool reg_p
)
718 ira_allocno_iterator ai
;
720 FOR_EACH_ALLOCNO (a
, ai
)
721 print_allocno_conflicts (file
, reg_p
, a
);
725 /* Print information about allocno or only regno (if REG_P) conflicts
728 ira_debug_conflicts (bool reg_p
)
730 print_conflicts (stderr
, reg_p
);
735 /* Entry function which builds allocno conflicts and allocno copies
736 and accumulate some allocno info on upper level regions. */
738 ira_build_conflicts (void)
742 ira_allocno_iterator ai
;
743 HARD_REG_SET temp_hard_reg_set
;
747 ira_conflicts_p
= build_conflict_bit_table ();
751 ira_object_iterator oi
;
754 ira_traverse_loop_tree (true, ira_loop_tree_root
, add_copies
, NULL
);
755 /* We need finished conflict table for the subsequent call. */
756 if (flag_ira_region
== IRA_REGION_ALL
757 || flag_ira_region
== IRA_REGION_MIXED
)
760 /* Now we can free memory for the conflict table (see function
761 build_object_conflicts for details). */
762 FOR_EACH_OBJECT (obj
, oi
)
764 if (OBJECT_CONFLICT_ARRAY (obj
) != conflicts
[OBJECT_CONFLICT_ID (obj
)])
765 ira_free (conflicts
[OBJECT_CONFLICT_ID (obj
)]);
767 ira_free (conflicts
);
770 base
= base_reg_class (VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
, SCRATCH
);
771 if (! targetm
.class_likely_spilled_p (base
))
772 CLEAR_HARD_REG_SET (temp_hard_reg_set
);
774 temp_hard_reg_set
= reg_class_contents
[base
] & ~ira_no_alloc_regs
;
775 FOR_EACH_ALLOCNO (a
, ai
)
777 int i
, n
= ALLOCNO_NUM_OBJECTS (a
);
779 for (i
= 0; i
< n
; i
++)
781 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
782 rtx allocno_reg
= regno_reg_rtx
[ALLOCNO_REGNO (a
)];
784 /* For debugging purposes don't put user defined variables in
785 callee-clobbered registers. However, do allow parameters
786 in callee-clobbered registers to improve debugging. This
787 is a bit of a fragile hack. */
789 && REG_USERVAR_P (allocno_reg
)
790 && ! reg_is_parm_p (allocno_reg
))
792 HARD_REG_SET new_conflict_regs
= crtl
->abi
->full_reg_clobbers ();
793 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
794 OBJECT_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
797 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
799 HARD_REG_SET new_conflict_regs
= ira_need_caller_save_regs (a
);
800 if (flag_caller_saves
)
801 new_conflict_regs
&= (~savable_regs
| temp_hard_reg_set
);
802 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
803 OBJECT_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
806 /* Now we deal with paradoxical subreg cases where certain registers
807 cannot be accessed in the widest mode. */
808 machine_mode outer_mode
= ALLOCNO_WMODE (a
);
809 machine_mode inner_mode
= ALLOCNO_MODE (a
);
810 if (paradoxical_subreg_p (outer_mode
, inner_mode
))
812 enum reg_class aclass
= ALLOCNO_CLASS (a
);
813 for (int j
= ira_class_hard_regs_num
[aclass
] - 1; j
>= 0; --j
)
815 int inner_regno
= ira_class_hard_regs
[aclass
][j
];
816 int outer_regno
= simplify_subreg_regno (inner_regno
,
820 || !in_hard_reg_set_p (reg_class_contents
[aclass
],
821 outer_mode
, outer_regno
))
823 SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
825 SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj
),
832 if (optimize
&& ira_conflicts_p
833 && internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
834 print_conflicts (ira_dump_file
, false);