1 /* IRA conflict builder.
2 Copyright (C) 2006-2023 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "insn-config.h"
34 #include "sparseset.h"
35 #include "addresses.h"
37 /* This file contains code responsible for allocno conflict creation,
38 allocno copy creation and allocno info accumulation on upper level
41 /* ira_allocnos_num array of arrays of bits, recording whether two
42 allocno's conflict (can't go in the same hardware register).
44 Some arrays will be used as conflict bit vector of the
45 corresponding allocnos see function build_object_conflicts. */
46 static IRA_INT_TYPE
**conflicts
;
48 /* Macro to test a conflict of C1 and C2 in `conflicts'. */
49 #define OBJECTS_CONFLICT_P(C1, C2) \
50 (OBJECT_MIN (C1) <= OBJECT_CONFLICT_ID (C2) \
51 && OBJECT_CONFLICT_ID (C2) <= OBJECT_MAX (C1) \
52 && TEST_MINMAX_SET_BIT (conflicts[OBJECT_CONFLICT_ID (C1)], \
53 OBJECT_CONFLICT_ID (C2), \
54 OBJECT_MIN (C1), OBJECT_MAX (C1)))
57 /* Record a conflict between objects OBJ1 and OBJ2. If necessary,
58 canonicalize the conflict by recording it for lower-order subobjects
59 of the corresponding allocnos. */
61 record_object_conflict (ira_object_t obj1
, ira_object_t obj2
)
63 ira_allocno_t a1
= OBJECT_ALLOCNO (obj1
);
64 ira_allocno_t a2
= OBJECT_ALLOCNO (obj2
);
65 int w1
= OBJECT_SUBWORD (obj1
);
66 int w2
= OBJECT_SUBWORD (obj2
);
69 /* Canonicalize the conflict. If two identically-numbered words
70 conflict, always record this as a conflict between words 0. That
71 is the only information we need, and it is easier to test for if
72 it is collected in each allocno's lowest-order object. */
73 if (w1
== w2
&& w1
> 0)
75 obj1
= ALLOCNO_OBJECT (a1
, 0);
76 obj2
= ALLOCNO_OBJECT (a2
, 0);
78 id1
= OBJECT_CONFLICT_ID (obj1
);
79 id2
= OBJECT_CONFLICT_ID (obj2
);
81 SET_MINMAX_SET_BIT (conflicts
[id1
], id2
, OBJECT_MIN (obj1
),
83 SET_MINMAX_SET_BIT (conflicts
[id2
], id1
, OBJECT_MIN (obj2
),
87 /* Build allocno conflict table by processing allocno live ranges.
88 Return true if the table was built. The table is not built if it
91 build_conflict_bit_table (void)
95 enum reg_class aclass
;
96 int object_set_words
, allocated_words_num
, conflict_bit_vec_words_num
;
98 ira_allocno_t allocno
;
99 ira_allocno_iterator ai
;
100 sparseset objects_live
;
102 ira_allocno_object_iterator aoi
;
104 allocated_words_num
= 0;
105 FOR_EACH_ALLOCNO (allocno
, ai
)
106 FOR_EACH_ALLOCNO_OBJECT (allocno
, obj
, aoi
)
108 if (OBJECT_MAX (obj
) < OBJECT_MIN (obj
))
110 conflict_bit_vec_words_num
111 = ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
113 allocated_words_num
+= conflict_bit_vec_words_num
;
114 if ((uint64_t) allocated_words_num
* sizeof (IRA_INT_TYPE
)
115 > (uint64_t) param_ira_max_conflict_table_size
* 1024 * 1024)
117 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
120 "+++Conflict table will be too big(>%dMB) -- don't use it\n",
121 param_ira_max_conflict_table_size
);
126 conflicts
= (IRA_INT_TYPE
**) ira_allocate (sizeof (IRA_INT_TYPE
*)
128 allocated_words_num
= 0;
129 FOR_EACH_ALLOCNO (allocno
, ai
)
130 FOR_EACH_ALLOCNO_OBJECT (allocno
, obj
, aoi
)
132 int id
= OBJECT_CONFLICT_ID (obj
);
133 if (OBJECT_MAX (obj
) < OBJECT_MIN (obj
))
135 conflicts
[id
] = NULL
;
138 conflict_bit_vec_words_num
139 = ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
141 allocated_words_num
+= conflict_bit_vec_words_num
;
143 = (IRA_INT_TYPE
*) ira_allocate (sizeof (IRA_INT_TYPE
)
144 * conflict_bit_vec_words_num
);
145 memset (conflicts
[id
], 0,
146 sizeof (IRA_INT_TYPE
) * conflict_bit_vec_words_num
);
149 object_set_words
= (ira_objects_num
+ IRA_INT_BITS
- 1) / IRA_INT_BITS
;
150 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
153 "+++Allocating %ld bytes for conflict table (uncompressed size %ld)\n",
154 (long) allocated_words_num
* sizeof (IRA_INT_TYPE
),
155 (long) object_set_words
* ira_objects_num
* sizeof (IRA_INT_TYPE
));
157 objects_live
= sparseset_alloc (ira_objects_num
);
158 for (i
= 0; i
< ira_max_point
; i
++)
160 for (r
= ira_start_point_ranges
[i
]; r
!= NULL
; r
= r
->start_next
)
162 ira_object_t obj
= r
->object
;
163 ira_allocno_t allocno
= OBJECT_ALLOCNO (obj
);
164 int id
= OBJECT_CONFLICT_ID (obj
);
166 gcc_assert (id
< ira_objects_num
);
168 aclass
= ALLOCNO_CLASS (allocno
);
169 EXECUTE_IF_SET_IN_SPARSESET (objects_live
, j
)
171 ira_object_t live_obj
= ira_object_id_map
[j
];
172 ira_allocno_t live_a
= OBJECT_ALLOCNO (live_obj
);
173 enum reg_class live_aclass
= ALLOCNO_CLASS (live_a
);
175 if (ira_reg_classes_intersect_p
[aclass
][live_aclass
]
176 /* Don't set up conflict for the allocno with itself. */
177 && live_a
!= allocno
)
179 record_object_conflict (obj
, live_obj
);
182 sparseset_set_bit (objects_live
, id
);
185 for (r
= ira_finish_point_ranges
[i
]; r
!= NULL
; r
= r
->finish_next
)
186 sparseset_clear_bit (objects_live
, OBJECT_CONFLICT_ID (r
->object
));
188 sparseset_free (objects_live
);
192 /* Return true iff allocnos A1 and A2 cannot be allocated to the same
193 register due to conflicts. */
196 allocnos_conflict_for_copy_p (ira_allocno_t a1
, ira_allocno_t a2
)
198 /* Due to the fact that we canonicalize conflicts (see
199 record_object_conflict), we only need to test for conflicts of
200 the lowest order words. */
201 ira_object_t obj1
= ALLOCNO_OBJECT (a1
, 0);
202 ira_object_t obj2
= ALLOCNO_OBJECT (a2
, 0);
204 return OBJECTS_CONFLICT_P (obj1
, obj2
);
207 /* Check that X is REG or SUBREG of REG. */
208 #define REG_SUBREG_P(x) \
209 (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x))))
211 /* Return X if X is a REG, otherwise it should be SUBREG of REG and
212 the function returns the reg in this case. *OFFSET will be set to
213 0 in the first case or the regno offset in the first case. */
215 go_through_subreg (rtx x
, int *offset
)
222 ira_assert (GET_CODE (x
) == SUBREG
);
223 reg
= SUBREG_REG (x
);
224 ira_assert (REG_P (reg
));
225 if (REGNO (reg
) < FIRST_PSEUDO_REGISTER
)
226 *offset
= subreg_regno_offset (REGNO (reg
), GET_MODE (reg
),
227 SUBREG_BYTE (x
), GET_MODE (x
));
228 else if (!can_div_trunc_p (SUBREG_BYTE (x
),
229 REGMODE_NATURAL_SIZE (GET_MODE (x
)), offset
))
230 /* Checked by validate_subreg. We must know at compile time which
231 inner hard registers are being accessed. */
236 /* Return the recomputed frequency for this shuffle copy or its similar
237 case, since it's not for a real move insn, make it smaller. */
240 get_freq_for_shuffle_copy (int freq
)
242 return freq
< 8 ? 1 : freq
/ 8;
245 /* Process registers REG1 and REG2 in move INSN with execution
246 frequency FREQ. The function also processes the registers in a
247 potential move insn (INSN == NULL in this case) with frequency
248 FREQ. The function can modify hard register costs of the
249 corresponding allocnos or create a copy involving the corresponding
250 allocnos. The function does nothing if the both registers are hard
251 registers. When nothing is changed, the function returns FALSE.
252 SINGLE_INPUT_OP_HAS_CSTR_P is only meaningful when constraint_p
253 is true, see function ira_get_dup_out_num for its meaning. */
255 process_regs_for_copy (rtx reg1
, rtx reg2
, bool constraint_p
, rtx_insn
*insn
,
256 int freq
, bool single_input_op_has_cstr_p
= true)
258 int allocno_preferenced_hard_regno
, index
, offset1
, offset2
;
259 int cost
, conflict_cost
, move_cost
;
262 reg_class_t rclass
, aclass
;
266 gcc_assert (REG_SUBREG_P (reg1
) && REG_SUBREG_P (reg2
));
267 only_regs_p
= REG_P (reg1
) && REG_P (reg2
);
268 reg1
= go_through_subreg (reg1
, &offset1
);
269 reg2
= go_through_subreg (reg2
, &offset2
);
270 /* Set up hard regno preferenced by allocno. If allocno gets the
271 hard regno the copy (or potential move) insn will be removed. */
272 if (HARD_REGISTER_P (reg1
))
274 if (HARD_REGISTER_P (reg2
))
276 allocno_preferenced_hard_regno
= REGNO (reg1
) + offset1
- offset2
;
277 a
= ira_curr_regno_allocno_map
[REGNO (reg2
)];
279 else if (HARD_REGISTER_P (reg2
))
281 allocno_preferenced_hard_regno
= REGNO (reg2
) + offset2
- offset1
;
282 a
= ira_curr_regno_allocno_map
[REGNO (reg1
)];
286 ira_allocno_t a1
= ira_curr_regno_allocno_map
[REGNO (reg1
)];
287 ira_allocno_t a2
= ira_curr_regno_allocno_map
[REGNO (reg2
)];
289 if (!allocnos_conflict_for_copy_p (a1
, a2
)
290 && offset1
== offset2
291 && ordered_p (GET_MODE_PRECISION (ALLOCNO_MODE (a1
)),
292 GET_MODE_PRECISION (ALLOCNO_MODE (a2
))))
294 cp
= ira_add_allocno_copy (a1
, a2
, freq
, constraint_p
, insn
,
295 ira_curr_loop_tree_node
);
296 bitmap_set_bit (ira_curr_loop_tree_node
->local_copies
, cp
->num
);
303 if (! IN_RANGE (allocno_preferenced_hard_regno
,
304 0, FIRST_PSEUDO_REGISTER
- 1))
305 /* Cannot be tied. */
307 rclass
= REGNO_REG_CLASS (allocno_preferenced_hard_regno
);
308 mode
= ALLOCNO_MODE (a
);
309 aclass
= ALLOCNO_CLASS (a
);
310 if (only_regs_p
&& insn
!= NULL_RTX
311 && reg_class_size
[rclass
] <= ira_reg_class_max_nregs
[rclass
][mode
])
312 /* It is already taken into account in ira-costs.cc. */
314 index
= ira_class_hard_reg_index
[aclass
][allocno_preferenced_hard_regno
];
316 /* Cannot be tied. It is not in the allocno class. */
318 ira_init_register_move_cost_if_necessary (mode
);
319 if (HARD_REGISTER_P (reg1
))
320 move_cost
= ira_register_move_cost
[mode
][aclass
][rclass
];
322 move_cost
= ira_register_move_cost
[mode
][rclass
][aclass
];
324 if (!single_input_op_has_cstr_p
)
326 /* When this is a constraint copy and the matching constraint
327 doesn't only exist for this given operand but also for some
328 other operand(s), it means saving the possible move cost does
329 NOT need to require reg1 and reg2 to use the same hardware
330 register, so this hardware preference isn't required to be
331 fixed. To avoid it to over prefer this hardware register,
332 and over disparage this hardware register on conflicted
333 objects, we need some cost tweaking here, similar to what
334 we do for shuffle copy. */
335 gcc_assert (constraint_p
);
336 int reduced_freq
= get_freq_for_shuffle_copy (freq
);
337 if (HARD_REGISTER_P (reg1
))
338 /* For reg2 = opcode(reg1, reg3 ...), assume that reg3 is a
339 pseudo register which has matching constraint on reg2,
340 even if reg2 isn't assigned by reg1, it's still possible
341 not to have register moves if reg2 and reg3 use the same
342 hardware register. So to avoid the allocation to over
343 prefer reg1, we can just take it as a shuffle copy. */
344 cost
= conflict_cost
= move_cost
* reduced_freq
;
347 /* For reg1 = opcode(reg2, reg3 ...), assume that reg3 is a
348 pseudo register which has matching constraint on reg2,
349 to save the register move, it's better to assign reg1
350 to either of reg2 and reg3 (or one of other pseudos like
351 reg3), it's reasonable to use freq for the cost. But
352 for conflict_cost, since reg2 and reg3 conflicts with
353 each other, both of them has the chance to be assigned
354 by reg1, assume reg3 has one copy which also conflicts
355 with reg2, we shouldn't make it less preferred on reg1
356 since reg3 has the same chance to be assigned by reg1.
357 So it adjusts the conflic_cost to make it same as what
358 we use for shuffle copy. */
359 cost
= move_cost
* freq
;
360 conflict_cost
= move_cost
* reduced_freq
;
364 cost
= conflict_cost
= move_cost
* freq
;
368 ira_allocate_and_set_costs
369 (&ALLOCNO_HARD_REG_COSTS (a
), aclass
,
370 ALLOCNO_CLASS_COST (a
));
371 ira_allocate_and_set_costs
372 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
), aclass
, 0);
373 ALLOCNO_HARD_REG_COSTS (a
)[index
] -= cost
;
374 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[index
] -= conflict_cost
;
375 if (ALLOCNO_HARD_REG_COSTS (a
)[index
] < ALLOCNO_CLASS_COST (a
))
376 ALLOCNO_CLASS_COST (a
) = ALLOCNO_HARD_REG_COSTS (a
)[index
];
377 ira_add_allocno_pref (a
, allocno_preferenced_hard_regno
, freq
);
378 a
= ira_parent_or_cap_allocno (a
);
384 /* Return true if output operand OUTPUT and input operand INPUT of
385 INSN can use the same register class for at least one alternative.
386 INSN is already described in recog_data and recog_op_alt. */
388 can_use_same_reg_p (rtx_insn
*insn
, int output
, int input
)
390 alternative_mask preferred
= get_preferred_alternatives (insn
);
391 for (int nalt
= 0; nalt
< recog_data
.n_alternatives
; nalt
++)
393 if (!TEST_BIT (preferred
, nalt
))
396 const operand_alternative
*op_alt
397 = &recog_op_alt
[nalt
* recog_data
.n_operands
];
398 if (op_alt
[input
].matches
== output
)
401 if (op_alt
[output
].earlyclobber
)
404 if (ira_reg_class_intersect
[op_alt
[input
].cl
][op_alt
[output
].cl
]
411 /* Process all of the output registers of the current insn (INSN) which
412 are not bound (BOUND_P) and the input register REG (its operand number
413 OP_NUM) which dies in the insn as if there were a move insn between
414 them with frequency FREQ. */
416 process_reg_shuffles (rtx_insn
*insn
, rtx reg
, int op_num
, int freq
,
422 gcc_assert (REG_SUBREG_P (reg
));
423 for (i
= 0; i
< recog_data
.n_operands
; i
++)
425 another_reg
= recog_data
.operand
[i
];
427 if (!REG_SUBREG_P (another_reg
) || op_num
== i
428 || recog_data
.operand_type
[i
] != OP_OUT
430 || (!can_use_same_reg_p (insn
, i
, op_num
)
431 && (recog_data
.constraints
[op_num
][0] != '%'
432 || !can_use_same_reg_p (insn
, i
, op_num
+ 1))
434 || recog_data
.constraints
[op_num
- 1][0] != '%'
435 || !can_use_same_reg_p (insn
, i
, op_num
- 1))))
438 process_regs_for_copy (reg
, another_reg
, false, NULL
, freq
);
442 /* Process INSN and create allocno copies if necessary. For example,
443 it might be because INSN is a pseudo-register move or INSN is two
446 add_insn_allocno_copies (rtx_insn
*insn
)
448 rtx set
, operand
, dup
;
449 bool bound_p
[MAX_RECOG_OPERANDS
];
451 alternative_mask alts
;
453 freq
= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn
));
456 if ((set
= single_set (insn
)) != NULL_RTX
457 && REG_SUBREG_P (SET_DEST (set
)) && REG_SUBREG_P (SET_SRC (set
))
458 && ! side_effects_p (set
)
459 && find_reg_note (insn
, REG_DEAD
,
460 REG_P (SET_SRC (set
))
462 : SUBREG_REG (SET_SRC (set
))) != NULL_RTX
)
464 process_regs_for_copy (SET_SRC (set
), SET_DEST (set
),
468 /* Fast check of possibility of constraint or shuffle copies. If
469 there are no dead registers, there will be no such copies. */
470 if (! find_reg_note (insn
, REG_DEAD
, NULL_RTX
))
472 alts
= ira_setup_alts (insn
);
473 for (i
= 0; i
< recog_data
.n_operands
; i
++)
475 for (i
= 0; i
< recog_data
.n_operands
; i
++)
477 operand
= recog_data
.operand
[i
];
478 if (! REG_SUBREG_P (operand
))
480 bool single_input_op_has_cstr_p
;
481 if ((n
= ira_get_dup_out_num (i
, alts
, single_input_op_has_cstr_p
)) >= 0)
484 dup
= recog_data
.operand
[n
];
485 if (REG_SUBREG_P (dup
)
486 && find_reg_note (insn
, REG_DEAD
,
489 : SUBREG_REG (operand
)) != NULL_RTX
)
490 process_regs_for_copy (operand
, dup
, true, NULL
, freq
,
491 single_input_op_has_cstr_p
);
494 for (i
= 0; i
< recog_data
.n_operands
; i
++)
496 operand
= recog_data
.operand
[i
];
497 if (REG_SUBREG_P (operand
)
498 && find_reg_note (insn
, REG_DEAD
,
500 ? operand
: SUBREG_REG (operand
)) != NULL_RTX
)
502 /* If an operand dies, prefer its hard register for the output
503 operands by decreasing the hard register cost or creating
504 the corresponding allocno copies. The cost will not
505 correspond to a real move insn cost, so make the frequency
507 int new_freq
= get_freq_for_shuffle_copy (freq
);
508 process_reg_shuffles (insn
, operand
, i
, new_freq
, bound_p
);
513 /* Add copies originated from BB given by LOOP_TREE_NODE. */
515 add_copies (ira_loop_tree_node_t loop_tree_node
)
520 bb
= loop_tree_node
->bb
;
523 FOR_BB_INSNS (bb
, insn
)
524 if (NONDEBUG_INSN_P (insn
))
525 add_insn_allocno_copies (insn
);
528 /* Propagate copies the corresponding allocnos on upper loop tree
531 propagate_copies (void)
534 ira_copy_iterator ci
;
535 ira_allocno_t a1
, a2
, parent_a1
, parent_a2
;
537 FOR_EACH_COPY (cp
, ci
)
541 if (ALLOCNO_LOOP_TREE_NODE (a1
) == ira_loop_tree_root
)
543 ira_assert ((ALLOCNO_LOOP_TREE_NODE (a2
) != ira_loop_tree_root
));
544 parent_a1
= ira_parent_or_cap_allocno (a1
);
545 parent_a2
= ira_parent_or_cap_allocno (a2
);
546 ira_assert (parent_a1
!= NULL
&& parent_a2
!= NULL
);
547 if (! allocnos_conflict_for_copy_p (parent_a1
, parent_a2
))
548 ira_add_allocno_copy (parent_a1
, parent_a2
, cp
->freq
,
549 cp
->constraint_p
, cp
->insn
, cp
->loop_tree_node
);
553 /* Array used to collect all conflict allocnos for given allocno. */
554 static ira_object_t
*collected_conflict_objects
;
556 /* Build conflict vectors or bit conflict vectors (whatever is more
557 profitable) for object OBJ from the conflict table. */
559 build_object_conflicts (ira_object_t obj
)
561 int i
, px
, parent_num
;
562 ira_allocno_t parent_a
, another_parent_a
;
563 ira_object_t parent_obj
;
564 ira_allocno_t a
= OBJECT_ALLOCNO (obj
);
565 IRA_INT_TYPE
*object_conflicts
;
566 minmax_set_iterator asi
;
567 int parent_min
, parent_max ATTRIBUTE_UNUSED
;
569 object_conflicts
= conflicts
[OBJECT_CONFLICT_ID (obj
)];
571 FOR_EACH_BIT_IN_MINMAX_SET (object_conflicts
,
572 OBJECT_MIN (obj
), OBJECT_MAX (obj
), i
, asi
)
574 ira_object_t another_obj
= ira_object_id_map
[i
];
575 ira_allocno_t another_a
= OBJECT_ALLOCNO (obj
);
577 ira_assert (ira_reg_classes_intersect_p
578 [ALLOCNO_CLASS (a
)][ALLOCNO_CLASS (another_a
)]);
579 collected_conflict_objects
[px
++] = another_obj
;
581 if (ira_conflict_vector_profitable_p (obj
, px
))
584 ira_allocate_conflict_vec (obj
, px
);
585 vec
= OBJECT_CONFLICT_VEC (obj
);
586 memcpy (vec
, collected_conflict_objects
, sizeof (ira_object_t
) * px
);
588 OBJECT_NUM_CONFLICTS (obj
) = px
;
592 int conflict_bit_vec_words_num
;
594 OBJECT_CONFLICT_ARRAY (obj
) = object_conflicts
;
595 if (OBJECT_MAX (obj
) < OBJECT_MIN (obj
))
596 conflict_bit_vec_words_num
= 0;
598 conflict_bit_vec_words_num
599 = ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
601 OBJECT_CONFLICT_ARRAY_SIZE (obj
)
602 = conflict_bit_vec_words_num
* sizeof (IRA_INT_TYPE
);
605 parent_a
= ira_parent_or_cap_allocno (a
);
606 if (parent_a
== NULL
)
608 ira_assert (ALLOCNO_CLASS (a
) == ALLOCNO_CLASS (parent_a
));
609 ira_assert (ALLOCNO_NUM_OBJECTS (a
) == ALLOCNO_NUM_OBJECTS (parent_a
));
610 parent_obj
= ALLOCNO_OBJECT (parent_a
, OBJECT_SUBWORD (obj
));
611 parent_num
= OBJECT_CONFLICT_ID (parent_obj
);
612 parent_min
= OBJECT_MIN (parent_obj
);
613 parent_max
= OBJECT_MAX (parent_obj
);
614 FOR_EACH_BIT_IN_MINMAX_SET (object_conflicts
,
615 OBJECT_MIN (obj
), OBJECT_MAX (obj
), i
, asi
)
617 ira_object_t another_obj
= ira_object_id_map
[i
];
618 ira_allocno_t another_a
= OBJECT_ALLOCNO (another_obj
);
619 int another_word
= OBJECT_SUBWORD (another_obj
);
621 ira_assert (ira_reg_classes_intersect_p
622 [ALLOCNO_CLASS (a
)][ALLOCNO_CLASS (another_a
)]);
624 another_parent_a
= ira_parent_or_cap_allocno (another_a
);
625 if (another_parent_a
== NULL
)
627 ira_assert (ALLOCNO_NUM (another_parent_a
) >= 0);
628 ira_assert (ALLOCNO_CLASS (another_a
)
629 == ALLOCNO_CLASS (another_parent_a
));
630 ira_assert (ALLOCNO_NUM_OBJECTS (another_a
)
631 == ALLOCNO_NUM_OBJECTS (another_parent_a
));
632 SET_MINMAX_SET_BIT (conflicts
[parent_num
],
633 OBJECT_CONFLICT_ID (ALLOCNO_OBJECT (another_parent_a
,
635 parent_min
, parent_max
);
639 /* Build conflict vectors or bit conflict vectors (whatever is more
640 profitable) of all allocnos from the conflict table. */
642 build_conflicts (void)
645 ira_allocno_t a
, cap
;
647 collected_conflict_objects
648 = (ira_object_t
*) ira_allocate (sizeof (ira_object_t
)
650 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
651 for (a
= ira_regno_allocno_map
[i
];
653 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
655 int j
, nregs
= ALLOCNO_NUM_OBJECTS (a
);
656 for (j
= 0; j
< nregs
; j
++)
658 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
659 build_object_conflicts (obj
);
660 for (cap
= ALLOCNO_CAP (a
); cap
!= NULL
; cap
= ALLOCNO_CAP (cap
))
662 ira_object_t cap_obj
= ALLOCNO_OBJECT (cap
, j
);
663 gcc_assert (ALLOCNO_NUM_OBJECTS (cap
) == ALLOCNO_NUM_OBJECTS (a
));
664 build_object_conflicts (cap_obj
);
668 ira_free (collected_conflict_objects
);
673 /* Print hard reg set SET with TITLE to FILE. */
675 print_hard_reg_set (FILE *file
, const char *title
, HARD_REG_SET set
)
680 for (start
= end
= -1, i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
682 bool reg_included
= TEST_HARD_REG_BIT (set
, i
);
690 if (start
>= 0 && (!reg_included
|| i
== FIRST_PSEUDO_REGISTER
- 1))
693 fprintf (file
, " %d", start
);
694 else if (start
== end
+ 1)
695 fprintf (file
, " %d %d", start
, end
);
697 fprintf (file
, " %d-%d", start
, end
);
705 print_allocno_conflicts (FILE * file
, bool reg_p
, ira_allocno_t a
)
707 HARD_REG_SET conflicting_hard_regs
;
712 fprintf (file
, ";; r%d", ALLOCNO_REGNO (a
));
715 fprintf (file
, ";; a%d(r%d,", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
716 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
717 fprintf (file
, "b%d", bb
->index
);
719 fprintf (file
, "l%d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
723 fputs (" conflicts:", file
);
724 n
= ALLOCNO_NUM_OBJECTS (a
);
725 for (i
= 0; i
< n
; i
++)
727 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
728 ira_object_t conflict_obj
;
729 ira_object_conflict_iterator oci
;
731 if (OBJECT_CONFLICT_ARRAY (obj
) == NULL
)
733 fprintf (file
, "\n;; total conflict hard regs:\n");
734 fprintf (file
, ";; conflict hard regs:\n\n");
739 fprintf (file
, "\n;; subobject %d:", i
);
740 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
742 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
744 fprintf (file
, " r%d,", ALLOCNO_REGNO (conflict_a
));
747 fprintf (file
, " a%d(r%d", ALLOCNO_NUM (conflict_a
),
748 ALLOCNO_REGNO (conflict_a
));
749 if (ALLOCNO_NUM_OBJECTS (conflict_a
) > 1)
750 fprintf (file
, ",w%d", OBJECT_SUBWORD (conflict_obj
));
751 if ((bb
= ALLOCNO_LOOP_TREE_NODE (conflict_a
)->bb
) != NULL
)
752 fprintf (file
, ",b%d", bb
->index
);
754 fprintf (file
, ",l%d",
755 ALLOCNO_LOOP_TREE_NODE (conflict_a
)->loop_num
);
759 conflicting_hard_regs
= (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
)
761 & reg_class_contents
[ALLOCNO_CLASS (a
)]);
762 print_hard_reg_set (file
, "\n;; total conflict hard regs:",
763 conflicting_hard_regs
);
765 conflicting_hard_regs
= (OBJECT_CONFLICT_HARD_REGS (obj
)
767 & reg_class_contents
[ALLOCNO_CLASS (a
)]);
768 print_hard_reg_set (file
, ";; conflict hard regs:",
769 conflicting_hard_regs
);
775 /* Print information about allocno or only regno (if REG_P) conflicts
778 print_conflicts (FILE *file
, bool reg_p
)
781 ira_allocno_iterator ai
;
783 FOR_EACH_ALLOCNO (a
, ai
)
784 print_allocno_conflicts (file
, reg_p
, a
);
788 /* Print information about allocno or only regno (if REG_P) conflicts
791 ira_debug_conflicts (bool reg_p
)
793 print_conflicts (stderr
, reg_p
);
798 /* Entry function which builds allocno conflicts and allocno copies
799 and accumulate some allocno info on upper level regions. */
801 ira_build_conflicts (void)
805 ira_allocno_iterator ai
;
806 HARD_REG_SET temp_hard_reg_set
;
810 ira_conflicts_p
= build_conflict_bit_table ();
814 ira_object_iterator oi
;
817 ira_traverse_loop_tree (true, ira_loop_tree_root
, add_copies
, NULL
);
818 /* We need finished conflict table for the subsequent call. */
819 if (flag_ira_region
== IRA_REGION_ALL
820 || flag_ira_region
== IRA_REGION_MIXED
)
823 /* Now we can free memory for the conflict table (see function
824 build_object_conflicts for details). */
825 FOR_EACH_OBJECT (obj
, oi
)
827 if (OBJECT_CONFLICT_ARRAY (obj
) != conflicts
[OBJECT_CONFLICT_ID (obj
)])
828 ira_free (conflicts
[OBJECT_CONFLICT_ID (obj
)]);
830 ira_free (conflicts
);
833 base
= base_reg_class (VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
, SCRATCH
);
834 if (! targetm
.class_likely_spilled_p (base
))
835 CLEAR_HARD_REG_SET (temp_hard_reg_set
);
837 temp_hard_reg_set
= reg_class_contents
[base
] & ~ira_no_alloc_regs
;
838 FOR_EACH_ALLOCNO (a
, ai
)
840 int i
, n
= ALLOCNO_NUM_OBJECTS (a
);
842 for (i
= 0; i
< n
; i
++)
844 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
845 rtx allocno_reg
= regno_reg_rtx
[ALLOCNO_REGNO (a
)];
847 /* For debugging purposes don't put user defined variables in
848 callee-clobbered registers. However, do allow parameters
849 in callee-clobbered registers to improve debugging. This
850 is a bit of a fragile hack. */
852 && REG_USERVAR_P (allocno_reg
)
853 && ! reg_is_parm_p (allocno_reg
))
855 HARD_REG_SET new_conflict_regs
= crtl
->abi
->full_reg_clobbers ();
856 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
857 OBJECT_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
860 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
862 HARD_REG_SET new_conflict_regs
= ira_need_caller_save_regs (a
);
863 if (flag_caller_saves
)
864 new_conflict_regs
&= (~savable_regs
| temp_hard_reg_set
);
865 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
866 OBJECT_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
869 /* Now we deal with paradoxical subreg cases where certain registers
870 cannot be accessed in the widest mode. */
871 machine_mode outer_mode
= ALLOCNO_WMODE (a
);
872 machine_mode inner_mode
= ALLOCNO_MODE (a
);
873 if (paradoxical_subreg_p (outer_mode
, inner_mode
))
875 enum reg_class aclass
= ALLOCNO_CLASS (a
);
876 for (int j
= ira_class_hard_regs_num
[aclass
] - 1; j
>= 0; --j
)
878 int inner_regno
= ira_class_hard_regs
[aclass
][j
];
879 int outer_regno
= simplify_subreg_regno (inner_regno
,
883 || !in_hard_reg_set_p (reg_class_contents
[aclass
],
884 outer_mode
, outer_regno
))
886 SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
888 SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj
),
895 if (optimize
&& ira_conflicts_p
896 && internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
897 print_conflicts (ira_dump_file
, false);