1 /* IRA conflict builder.
2 Copyright (C) 2006-2021 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "insn-config.h"
34 #include "sparseset.h"
35 #include "addresses.h"
37 /* This file contains code responsible for allocno conflict creation,
38 allocno copy creation and allocno info accumulation on upper level
41 /* ira_allocnos_num array of arrays of bits, recording whether two
42 allocno's conflict (can't go in the same hardware register).
44 Some arrays will be used as conflict bit vector of the
45 corresponding allocnos see function build_object_conflicts. */
46 static IRA_INT_TYPE
**conflicts
;
48 /* Macro to test a conflict of C1 and C2 in `conflicts'. */
49 #define OBJECTS_CONFLICT_P(C1, C2) \
50 (OBJECT_MIN (C1) <= OBJECT_CONFLICT_ID (C2) \
51 && OBJECT_CONFLICT_ID (C2) <= OBJECT_MAX (C1) \
52 && TEST_MINMAX_SET_BIT (conflicts[OBJECT_CONFLICT_ID (C1)], \
53 OBJECT_CONFLICT_ID (C2), \
54 OBJECT_MIN (C1), OBJECT_MAX (C1)))
57 /* Record a conflict between objects OBJ1 and OBJ2. If necessary,
58 canonicalize the conflict by recording it for lower-order subobjects
59 of the corresponding allocnos. */
61 record_object_conflict (ira_object_t obj1
, ira_object_t obj2
)
63 ira_allocno_t a1
= OBJECT_ALLOCNO (obj1
);
64 ira_allocno_t a2
= OBJECT_ALLOCNO (obj2
);
65 int w1
= OBJECT_SUBWORD (obj1
);
66 int w2
= OBJECT_SUBWORD (obj2
);
69 /* Canonicalize the conflict. If two identically-numbered words
70 conflict, always record this as a conflict between words 0. That
71 is the only information we need, and it is easier to test for if
72 it is collected in each allocno's lowest-order object. */
73 if (w1
== w2
&& w1
> 0)
75 obj1
= ALLOCNO_OBJECT (a1
, 0);
76 obj2
= ALLOCNO_OBJECT (a2
, 0);
78 id1
= OBJECT_CONFLICT_ID (obj1
);
79 id2
= OBJECT_CONFLICT_ID (obj2
);
81 SET_MINMAX_SET_BIT (conflicts
[id1
], id2
, OBJECT_MIN (obj1
),
83 SET_MINMAX_SET_BIT (conflicts
[id2
], id1
, OBJECT_MIN (obj2
),
87 /* Build allocno conflict table by processing allocno live ranges.
88 Return true if the table was built. The table is not built if it
91 build_conflict_bit_table (void)
95 enum reg_class aclass
;
96 int object_set_words
, allocated_words_num
, conflict_bit_vec_words_num
;
98 ira_allocno_t allocno
;
99 ira_allocno_iterator ai
;
100 sparseset objects_live
;
102 ira_allocno_object_iterator aoi
;
104 allocated_words_num
= 0;
105 FOR_EACH_ALLOCNO (allocno
, ai
)
106 FOR_EACH_ALLOCNO_OBJECT (allocno
, obj
, aoi
)
108 if (OBJECT_MAX (obj
) < OBJECT_MIN (obj
))
110 conflict_bit_vec_words_num
111 = ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
113 allocated_words_num
+= conflict_bit_vec_words_num
;
114 if ((uint64_t) allocated_words_num
* sizeof (IRA_INT_TYPE
)
115 > (uint64_t) param_ira_max_conflict_table_size
* 1024 * 1024)
117 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
120 "+++Conflict table will be too big(>%dMB) -- don't use it\n",
121 param_ira_max_conflict_table_size
);
126 conflicts
= (IRA_INT_TYPE
**) ira_allocate (sizeof (IRA_INT_TYPE
*)
128 allocated_words_num
= 0;
129 FOR_EACH_ALLOCNO (allocno
, ai
)
130 FOR_EACH_ALLOCNO_OBJECT (allocno
, obj
, aoi
)
132 int id
= OBJECT_CONFLICT_ID (obj
);
133 if (OBJECT_MAX (obj
) < OBJECT_MIN (obj
))
135 conflicts
[id
] = NULL
;
138 conflict_bit_vec_words_num
139 = ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
141 allocated_words_num
+= conflict_bit_vec_words_num
;
143 = (IRA_INT_TYPE
*) ira_allocate (sizeof (IRA_INT_TYPE
)
144 * conflict_bit_vec_words_num
);
145 memset (conflicts
[id
], 0,
146 sizeof (IRA_INT_TYPE
) * conflict_bit_vec_words_num
);
149 object_set_words
= (ira_objects_num
+ IRA_INT_BITS
- 1) / IRA_INT_BITS
;
150 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
153 "+++Allocating %ld bytes for conflict table (uncompressed size %ld)\n",
154 (long) allocated_words_num
* sizeof (IRA_INT_TYPE
),
155 (long) object_set_words
* ira_objects_num
* sizeof (IRA_INT_TYPE
));
157 objects_live
= sparseset_alloc (ira_objects_num
);
158 for (i
= 0; i
< ira_max_point
; i
++)
160 for (r
= ira_start_point_ranges
[i
]; r
!= NULL
; r
= r
->start_next
)
162 ira_object_t obj
= r
->object
;
163 ira_allocno_t allocno
= OBJECT_ALLOCNO (obj
);
164 int id
= OBJECT_CONFLICT_ID (obj
);
166 gcc_assert (id
< ira_objects_num
);
168 aclass
= ALLOCNO_CLASS (allocno
);
169 EXECUTE_IF_SET_IN_SPARSESET (objects_live
, j
)
171 ira_object_t live_obj
= ira_object_id_map
[j
];
172 ira_allocno_t live_a
= OBJECT_ALLOCNO (live_obj
);
173 enum reg_class live_aclass
= ALLOCNO_CLASS (live_a
);
175 if (ira_reg_classes_intersect_p
[aclass
][live_aclass
]
176 /* Don't set up conflict for the allocno with itself. */
177 && live_a
!= allocno
)
179 record_object_conflict (obj
, live_obj
);
182 sparseset_set_bit (objects_live
, id
);
185 for (r
= ira_finish_point_ranges
[i
]; r
!= NULL
; r
= r
->finish_next
)
186 sparseset_clear_bit (objects_live
, OBJECT_CONFLICT_ID (r
->object
));
188 sparseset_free (objects_live
);
192 /* Return true iff allocnos A1 and A2 cannot be allocated to the same
193 register due to conflicts. */
196 allocnos_conflict_for_copy_p (ira_allocno_t a1
, ira_allocno_t a2
)
198 /* Due to the fact that we canonicalize conflicts (see
199 record_object_conflict), we only need to test for conflicts of
200 the lowest order words. */
201 ira_object_t obj1
= ALLOCNO_OBJECT (a1
, 0);
202 ira_object_t obj2
= ALLOCNO_OBJECT (a2
, 0);
204 return OBJECTS_CONFLICT_P (obj1
, obj2
);
207 /* Check that X is REG or SUBREG of REG. */
208 #define REG_SUBREG_P(x) \
209 (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x))))
211 /* Return X if X is a REG, otherwise it should be SUBREG of REG and
212 the function returns the reg in this case. *OFFSET will be set to
213 0 in the first case or the regno offset in the first case. */
215 go_through_subreg (rtx x
, int *offset
)
222 ira_assert (GET_CODE (x
) == SUBREG
);
223 reg
= SUBREG_REG (x
);
224 ira_assert (REG_P (reg
));
225 if (REGNO (reg
) < FIRST_PSEUDO_REGISTER
)
226 *offset
= subreg_regno_offset (REGNO (reg
), GET_MODE (reg
),
227 SUBREG_BYTE (x
), GET_MODE (x
));
228 else if (!can_div_trunc_p (SUBREG_BYTE (x
),
229 REGMODE_NATURAL_SIZE (GET_MODE (x
)), offset
))
230 /* Checked by validate_subreg. We must know at compile time which
231 inner hard registers are being accessed. */
236 /* Process registers REG1 and REG2 in move INSN with execution
237 frequency FREQ. The function also processes the registers in a
238 potential move insn (INSN == NULL in this case) with frequency
239 FREQ. The function can modify hard register costs of the
240 corresponding allocnos or create a copy involving the corresponding
241 allocnos. The function does nothing if the both registers are hard
242 registers. When nothing is changed, the function returns
245 process_regs_for_copy (rtx reg1
, rtx reg2
, bool constraint_p
,
246 rtx_insn
*insn
, int freq
)
248 int allocno_preferenced_hard_regno
, cost
, index
, offset1
, offset2
;
251 reg_class_t rclass
, aclass
;
255 gcc_assert (REG_SUBREG_P (reg1
) && REG_SUBREG_P (reg2
));
256 only_regs_p
= REG_P (reg1
) && REG_P (reg2
);
257 reg1
= go_through_subreg (reg1
, &offset1
);
258 reg2
= go_through_subreg (reg2
, &offset2
);
259 /* Set up hard regno preferenced by allocno. If allocno gets the
260 hard regno the copy (or potential move) insn will be removed. */
261 if (HARD_REGISTER_P (reg1
))
263 if (HARD_REGISTER_P (reg2
))
265 allocno_preferenced_hard_regno
= REGNO (reg1
) + offset1
- offset2
;
266 a
= ira_curr_regno_allocno_map
[REGNO (reg2
)];
268 else if (HARD_REGISTER_P (reg2
))
270 allocno_preferenced_hard_regno
= REGNO (reg2
) + offset2
- offset1
;
271 a
= ira_curr_regno_allocno_map
[REGNO (reg1
)];
275 ira_allocno_t a1
= ira_curr_regno_allocno_map
[REGNO (reg1
)];
276 ira_allocno_t a2
= ira_curr_regno_allocno_map
[REGNO (reg2
)];
278 if (!allocnos_conflict_for_copy_p (a1
, a2
)
279 && offset1
== offset2
280 && ordered_p (GET_MODE_PRECISION (ALLOCNO_MODE (a1
)),
281 GET_MODE_PRECISION (ALLOCNO_MODE (a2
))))
283 cp
= ira_add_allocno_copy (a1
, a2
, freq
, constraint_p
, insn
,
284 ira_curr_loop_tree_node
);
285 bitmap_set_bit (ira_curr_loop_tree_node
->local_copies
, cp
->num
);
292 if (! IN_RANGE (allocno_preferenced_hard_regno
,
293 0, FIRST_PSEUDO_REGISTER
- 1))
294 /* Cannot be tied. */
296 rclass
= REGNO_REG_CLASS (allocno_preferenced_hard_regno
);
297 mode
= ALLOCNO_MODE (a
);
298 aclass
= ALLOCNO_CLASS (a
);
299 if (only_regs_p
&& insn
!= NULL_RTX
300 && reg_class_size
[rclass
] <= ira_reg_class_max_nregs
[rclass
][mode
])
301 /* It is already taken into account in ira-costs.c. */
303 index
= ira_class_hard_reg_index
[aclass
][allocno_preferenced_hard_regno
];
305 /* Cannot be tied. It is not in the allocno class. */
307 ira_init_register_move_cost_if_necessary (mode
);
308 if (HARD_REGISTER_P (reg1
))
309 cost
= ira_register_move_cost
[mode
][aclass
][rclass
] * freq
;
311 cost
= ira_register_move_cost
[mode
][rclass
][aclass
] * freq
;
314 ira_allocate_and_set_costs
315 (&ALLOCNO_HARD_REG_COSTS (a
), aclass
,
316 ALLOCNO_CLASS_COST (a
));
317 ira_allocate_and_set_costs
318 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
), aclass
, 0);
319 ALLOCNO_HARD_REG_COSTS (a
)[index
] -= cost
;
320 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[index
] -= cost
;
321 if (ALLOCNO_HARD_REG_COSTS (a
)[index
] < ALLOCNO_CLASS_COST (a
))
322 ALLOCNO_CLASS_COST (a
) = ALLOCNO_HARD_REG_COSTS (a
)[index
];
323 ira_add_allocno_pref (a
, allocno_preferenced_hard_regno
, freq
);
324 a
= ira_parent_or_cap_allocno (a
);
330 /* Return true if output operand OUTPUT and input operand INPUT of
331 INSN can use the same register class for at least one alternative.
332 INSN is already described in recog_data and recog_op_alt. */
334 can_use_same_reg_p (rtx_insn
*insn
, int output
, int input
)
336 alternative_mask preferred
= get_preferred_alternatives (insn
);
337 for (int nalt
= 0; nalt
< recog_data
.n_alternatives
; nalt
++)
339 if (!TEST_BIT (preferred
, nalt
))
342 const operand_alternative
*op_alt
343 = &recog_op_alt
[nalt
* recog_data
.n_operands
];
344 if (op_alt
[input
].matches
== output
)
347 if (ira_reg_class_intersect
[op_alt
[input
].cl
][op_alt
[output
].cl
]
354 /* Process all of the output registers of the current insn (INSN) which
355 are not bound (BOUND_P) and the input register REG (its operand number
356 OP_NUM) which dies in the insn as if there were a move insn between
357 them with frequency FREQ. */
359 process_reg_shuffles (rtx_insn
*insn
, rtx reg
, int op_num
, int freq
,
365 gcc_assert (REG_SUBREG_P (reg
));
366 for (i
= 0; i
< recog_data
.n_operands
; i
++)
368 another_reg
= recog_data
.operand
[i
];
370 if (!REG_SUBREG_P (another_reg
) || op_num
== i
371 || recog_data
.operand_type
[i
] != OP_OUT
373 || (!can_use_same_reg_p (insn
, i
, op_num
)
374 && (recog_data
.constraints
[op_num
][0] != '%'
375 || !can_use_same_reg_p (insn
, i
, op_num
+ 1))
377 || recog_data
.constraints
[op_num
- 1][0] != '%'
378 || !can_use_same_reg_p (insn
, i
, op_num
- 1))))
381 process_regs_for_copy (reg
, another_reg
, false, NULL
, freq
);
385 /* Process INSN and create allocno copies if necessary. For example,
386 it might be because INSN is a pseudo-register move or INSN is two
389 add_insn_allocno_copies (rtx_insn
*insn
)
391 rtx set
, operand
, dup
;
392 bool bound_p
[MAX_RECOG_OPERANDS
];
394 alternative_mask alts
;
396 freq
= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn
));
399 if ((set
= single_set (insn
)) != NULL_RTX
400 && REG_SUBREG_P (SET_DEST (set
)) && REG_SUBREG_P (SET_SRC (set
))
401 && ! side_effects_p (set
)
402 && find_reg_note (insn
, REG_DEAD
,
403 REG_P (SET_SRC (set
))
405 : SUBREG_REG (SET_SRC (set
))) != NULL_RTX
)
407 process_regs_for_copy (SET_SRC (set
), SET_DEST (set
),
411 /* Fast check of possibility of constraint or shuffle copies. If
412 there are no dead registers, there will be no such copies. */
413 if (! find_reg_note (insn
, REG_DEAD
, NULL_RTX
))
415 alts
= ira_setup_alts (insn
);
416 for (i
= 0; i
< recog_data
.n_operands
; i
++)
418 for (i
= 0; i
< recog_data
.n_operands
; i
++)
420 operand
= recog_data
.operand
[i
];
421 if (! REG_SUBREG_P (operand
))
423 if ((n
= ira_get_dup_out_num (i
, alts
)) >= 0)
426 dup
= recog_data
.operand
[n
];
427 if (REG_SUBREG_P (dup
)
428 && find_reg_note (insn
, REG_DEAD
,
431 : SUBREG_REG (operand
)) != NULL_RTX
)
432 process_regs_for_copy (operand
, dup
, true, NULL
,
436 for (i
= 0; i
< recog_data
.n_operands
; i
++)
438 operand
= recog_data
.operand
[i
];
439 if (REG_SUBREG_P (operand
)
440 && find_reg_note (insn
, REG_DEAD
,
442 ? operand
: SUBREG_REG (operand
)) != NULL_RTX
)
443 /* If an operand dies, prefer its hard register for the output
444 operands by decreasing the hard register cost or creating
445 the corresponding allocno copies. The cost will not
446 correspond to a real move insn cost, so make the frequency
448 process_reg_shuffles (insn
, operand
, i
, freq
< 8 ? 1 : freq
/ 8,
453 /* Add copies originated from BB given by LOOP_TREE_NODE. */
455 add_copies (ira_loop_tree_node_t loop_tree_node
)
460 bb
= loop_tree_node
->bb
;
463 FOR_BB_INSNS (bb
, insn
)
464 if (NONDEBUG_INSN_P (insn
))
465 add_insn_allocno_copies (insn
);
468 /* Propagate copies the corresponding allocnos on upper loop tree
471 propagate_copies (void)
474 ira_copy_iterator ci
;
475 ira_allocno_t a1
, a2
, parent_a1
, parent_a2
;
477 FOR_EACH_COPY (cp
, ci
)
481 if (ALLOCNO_LOOP_TREE_NODE (a1
) == ira_loop_tree_root
)
483 ira_assert ((ALLOCNO_LOOP_TREE_NODE (a2
) != ira_loop_tree_root
));
484 parent_a1
= ira_parent_or_cap_allocno (a1
);
485 parent_a2
= ira_parent_or_cap_allocno (a2
);
486 ira_assert (parent_a1
!= NULL
&& parent_a2
!= NULL
);
487 if (! allocnos_conflict_for_copy_p (parent_a1
, parent_a2
))
488 ira_add_allocno_copy (parent_a1
, parent_a2
, cp
->freq
,
489 cp
->constraint_p
, cp
->insn
, cp
->loop_tree_node
);
493 /* Array used to collect all conflict allocnos for given allocno. */
494 static ira_object_t
*collected_conflict_objects
;
496 /* Build conflict vectors or bit conflict vectors (whatever is more
497 profitable) for object OBJ from the conflict table. */
499 build_object_conflicts (ira_object_t obj
)
501 int i
, px
, parent_num
;
502 ira_allocno_t parent_a
, another_parent_a
;
503 ira_object_t parent_obj
;
504 ira_allocno_t a
= OBJECT_ALLOCNO (obj
);
505 IRA_INT_TYPE
*object_conflicts
;
506 minmax_set_iterator asi
;
507 int parent_min
, parent_max ATTRIBUTE_UNUSED
;
509 object_conflicts
= conflicts
[OBJECT_CONFLICT_ID (obj
)];
511 FOR_EACH_BIT_IN_MINMAX_SET (object_conflicts
,
512 OBJECT_MIN (obj
), OBJECT_MAX (obj
), i
, asi
)
514 ira_object_t another_obj
= ira_object_id_map
[i
];
515 ira_allocno_t another_a
= OBJECT_ALLOCNO (obj
);
517 ira_assert (ira_reg_classes_intersect_p
518 [ALLOCNO_CLASS (a
)][ALLOCNO_CLASS (another_a
)]);
519 collected_conflict_objects
[px
++] = another_obj
;
521 if (ira_conflict_vector_profitable_p (obj
, px
))
524 ira_allocate_conflict_vec (obj
, px
);
525 vec
= OBJECT_CONFLICT_VEC (obj
);
526 memcpy (vec
, collected_conflict_objects
, sizeof (ira_object_t
) * px
);
528 OBJECT_NUM_CONFLICTS (obj
) = px
;
532 int conflict_bit_vec_words_num
;
534 OBJECT_CONFLICT_ARRAY (obj
) = object_conflicts
;
535 if (OBJECT_MAX (obj
) < OBJECT_MIN (obj
))
536 conflict_bit_vec_words_num
= 0;
538 conflict_bit_vec_words_num
539 = ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
541 OBJECT_CONFLICT_ARRAY_SIZE (obj
)
542 = conflict_bit_vec_words_num
* sizeof (IRA_INT_TYPE
);
545 parent_a
= ira_parent_or_cap_allocno (a
);
546 if (parent_a
== NULL
)
548 ira_assert (ALLOCNO_CLASS (a
) == ALLOCNO_CLASS (parent_a
));
549 ira_assert (ALLOCNO_NUM_OBJECTS (a
) == ALLOCNO_NUM_OBJECTS (parent_a
));
550 parent_obj
= ALLOCNO_OBJECT (parent_a
, OBJECT_SUBWORD (obj
));
551 parent_num
= OBJECT_CONFLICT_ID (parent_obj
);
552 parent_min
= OBJECT_MIN (parent_obj
);
553 parent_max
= OBJECT_MAX (parent_obj
);
554 FOR_EACH_BIT_IN_MINMAX_SET (object_conflicts
,
555 OBJECT_MIN (obj
), OBJECT_MAX (obj
), i
, asi
)
557 ira_object_t another_obj
= ira_object_id_map
[i
];
558 ira_allocno_t another_a
= OBJECT_ALLOCNO (another_obj
);
559 int another_word
= OBJECT_SUBWORD (another_obj
);
561 ira_assert (ira_reg_classes_intersect_p
562 [ALLOCNO_CLASS (a
)][ALLOCNO_CLASS (another_a
)]);
564 another_parent_a
= ira_parent_or_cap_allocno (another_a
);
565 if (another_parent_a
== NULL
)
567 ira_assert (ALLOCNO_NUM (another_parent_a
) >= 0);
568 ira_assert (ALLOCNO_CLASS (another_a
)
569 == ALLOCNO_CLASS (another_parent_a
));
570 ira_assert (ALLOCNO_NUM_OBJECTS (another_a
)
571 == ALLOCNO_NUM_OBJECTS (another_parent_a
));
572 SET_MINMAX_SET_BIT (conflicts
[parent_num
],
573 OBJECT_CONFLICT_ID (ALLOCNO_OBJECT (another_parent_a
,
575 parent_min
, parent_max
);
579 /* Build conflict vectors or bit conflict vectors (whatever is more
580 profitable) of all allocnos from the conflict table. */
582 build_conflicts (void)
585 ira_allocno_t a
, cap
;
587 collected_conflict_objects
588 = (ira_object_t
*) ira_allocate (sizeof (ira_object_t
)
590 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
591 for (a
= ira_regno_allocno_map
[i
];
593 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
595 int j
, nregs
= ALLOCNO_NUM_OBJECTS (a
);
596 for (j
= 0; j
< nregs
; j
++)
598 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
599 build_object_conflicts (obj
);
600 for (cap
= ALLOCNO_CAP (a
); cap
!= NULL
; cap
= ALLOCNO_CAP (cap
))
602 ira_object_t cap_obj
= ALLOCNO_OBJECT (cap
, j
);
603 gcc_assert (ALLOCNO_NUM_OBJECTS (cap
) == ALLOCNO_NUM_OBJECTS (a
));
604 build_object_conflicts (cap_obj
);
608 ira_free (collected_conflict_objects
);
613 /* Print hard reg set SET with TITLE to FILE. */
615 print_hard_reg_set (FILE *file
, const char *title
, HARD_REG_SET set
)
620 for (start
= end
= -1, i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
622 bool reg_included
= TEST_HARD_REG_BIT (set
, i
);
630 if (start
>= 0 && (!reg_included
|| i
== FIRST_PSEUDO_REGISTER
- 1))
633 fprintf (file
, " %d", start
);
634 else if (start
== end
+ 1)
635 fprintf (file
, " %d %d", start
, end
);
637 fprintf (file
, " %d-%d", start
, end
);
645 print_allocno_conflicts (FILE * file
, bool reg_p
, ira_allocno_t a
)
647 HARD_REG_SET conflicting_hard_regs
;
652 fprintf (file
, ";; r%d", ALLOCNO_REGNO (a
));
655 fprintf (file
, ";; a%d(r%d,", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
656 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
657 fprintf (file
, "b%d", bb
->index
);
659 fprintf (file
, "l%d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
663 fputs (" conflicts:", file
);
664 n
= ALLOCNO_NUM_OBJECTS (a
);
665 for (i
= 0; i
< n
; i
++)
667 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
668 ira_object_t conflict_obj
;
669 ira_object_conflict_iterator oci
;
671 if (OBJECT_CONFLICT_ARRAY (obj
) == NULL
)
673 fprintf (file
, "\n;; total conflict hard regs:\n");
674 fprintf (file
, ";; conflict hard regs:\n\n");
679 fprintf (file
, "\n;; subobject %d:", i
);
680 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
682 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
684 fprintf (file
, " r%d,", ALLOCNO_REGNO (conflict_a
));
687 fprintf (file
, " a%d(r%d", ALLOCNO_NUM (conflict_a
),
688 ALLOCNO_REGNO (conflict_a
));
689 if (ALLOCNO_NUM_OBJECTS (conflict_a
) > 1)
690 fprintf (file
, ",w%d", OBJECT_SUBWORD (conflict_obj
));
691 if ((bb
= ALLOCNO_LOOP_TREE_NODE (conflict_a
)->bb
) != NULL
)
692 fprintf (file
, ",b%d", bb
->index
);
694 fprintf (file
, ",l%d",
695 ALLOCNO_LOOP_TREE_NODE (conflict_a
)->loop_num
);
699 conflicting_hard_regs
= (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
)
701 & reg_class_contents
[ALLOCNO_CLASS (a
)]);
702 print_hard_reg_set (file
, "\n;; total conflict hard regs:",
703 conflicting_hard_regs
);
705 conflicting_hard_regs
= (OBJECT_CONFLICT_HARD_REGS (obj
)
707 & reg_class_contents
[ALLOCNO_CLASS (a
)]);
708 print_hard_reg_set (file
, ";; conflict hard regs:",
709 conflicting_hard_regs
);
715 /* Print information about allocno or only regno (if REG_P) conflicts
718 print_conflicts (FILE *file
, bool reg_p
)
721 ira_allocno_iterator ai
;
723 FOR_EACH_ALLOCNO (a
, ai
)
724 print_allocno_conflicts (file
, reg_p
, a
);
728 /* Print information about allocno or only regno (if REG_P) conflicts
731 ira_debug_conflicts (bool reg_p
)
733 print_conflicts (stderr
, reg_p
);
738 /* Entry function which builds allocno conflicts and allocno copies
739 and accumulate some allocno info on upper level regions. */
741 ira_build_conflicts (void)
745 ira_allocno_iterator ai
;
746 HARD_REG_SET temp_hard_reg_set
;
750 ira_conflicts_p
= build_conflict_bit_table ();
754 ira_object_iterator oi
;
757 ira_traverse_loop_tree (true, ira_loop_tree_root
, add_copies
, NULL
);
758 /* We need finished conflict table for the subsequent call. */
759 if (flag_ira_region
== IRA_REGION_ALL
760 || flag_ira_region
== IRA_REGION_MIXED
)
763 /* Now we can free memory for the conflict table (see function
764 build_object_conflicts for details). */
765 FOR_EACH_OBJECT (obj
, oi
)
767 if (OBJECT_CONFLICT_ARRAY (obj
) != conflicts
[OBJECT_CONFLICT_ID (obj
)])
768 ira_free (conflicts
[OBJECT_CONFLICT_ID (obj
)]);
770 ira_free (conflicts
);
773 base
= base_reg_class (VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
, SCRATCH
);
774 if (! targetm
.class_likely_spilled_p (base
))
775 CLEAR_HARD_REG_SET (temp_hard_reg_set
);
777 temp_hard_reg_set
= reg_class_contents
[base
] & ~ira_no_alloc_regs
;
778 FOR_EACH_ALLOCNO (a
, ai
)
780 int i
, n
= ALLOCNO_NUM_OBJECTS (a
);
782 for (i
= 0; i
< n
; i
++)
784 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
785 rtx allocno_reg
= regno_reg_rtx
[ALLOCNO_REGNO (a
)];
787 /* For debugging purposes don't put user defined variables in
788 callee-clobbered registers. However, do allow parameters
789 in callee-clobbered registers to improve debugging. This
790 is a bit of a fragile hack. */
792 && REG_USERVAR_P (allocno_reg
)
793 && ! reg_is_parm_p (allocno_reg
))
795 HARD_REG_SET new_conflict_regs
= crtl
->abi
->full_reg_clobbers ();
796 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
797 OBJECT_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
800 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
802 HARD_REG_SET new_conflict_regs
= ira_need_caller_save_regs (a
);
803 if (flag_caller_saves
)
804 new_conflict_regs
&= (~savable_regs
| temp_hard_reg_set
);
805 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
806 OBJECT_CONFLICT_HARD_REGS (obj
) |= new_conflict_regs
;
809 /* Now we deal with paradoxical subreg cases where certain registers
810 cannot be accessed in the widest mode. */
811 machine_mode outer_mode
= ALLOCNO_WMODE (a
);
812 machine_mode inner_mode
= ALLOCNO_MODE (a
);
813 if (paradoxical_subreg_p (outer_mode
, inner_mode
))
815 enum reg_class aclass
= ALLOCNO_CLASS (a
);
816 for (int j
= ira_class_hard_regs_num
[aclass
] - 1; j
>= 0; --j
)
818 int inner_regno
= ira_class_hard_regs
[aclass
][j
];
819 int outer_regno
= simplify_subreg_regno (inner_regno
,
823 || !in_hard_reg_set_p (reg_class_contents
[aclass
],
824 outer_mode
, outer_regno
))
826 SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
828 SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj
),
835 if (optimize
&& ira_conflicts_p
836 && internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
837 print_conflicts (ira_dump_file
, false);