[C++ PATCH] refactor duplicate_decls
[official-gcc.git] / gcc / ira-costs.c
blob6fa917ab1a1aade107adb9586c949a62b5d26d95
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2018 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "addresses.h"
36 #include "reload.h"
38 /* The flags is set up every time when we calculate pseudo register
39 classes through function ira_set_pseudo_classes. */
40 static bool pseudo_classes_defined_p = false;
42 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
43 static bool allocno_p;
45 /* Number of elements in array `costs'. */
46 static int cost_elements_num;
48 /* The `costs' struct records the cost of using hard registers of each
49 class considered for the calculation and of using memory for each
50 allocno or pseudo. */
51 struct costs
53 int mem_cost;
54 /* Costs for register classes start here. We process only some
55 allocno classes. */
56 int cost[1];
59 #define max_struct_costs_size \
60 (this_target_ira_int->x_max_struct_costs_size)
61 #define init_cost \
62 (this_target_ira_int->x_init_cost)
63 #define temp_costs \
64 (this_target_ira_int->x_temp_costs)
65 #define op_costs \
66 (this_target_ira_int->x_op_costs)
67 #define this_op_costs \
68 (this_target_ira_int->x_this_op_costs)
70 /* Costs of each class for each allocno or pseudo. */
71 static struct costs *costs;
73 /* Accumulated costs of each class for each allocno. */
74 static struct costs *total_allocno_costs;
76 /* It is the current size of struct costs. */
77 static size_t struct_costs_size;
79 /* Return pointer to structure containing costs of allocno or pseudo
80 with given NUM in array ARR. */
81 #define COSTS(arr, num) \
82 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
84 /* Return index in COSTS when processing reg with REGNO. */
85 #define COST_INDEX(regno) (allocno_p \
86 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
87 : (int) regno)
89 /* Record register class preferences of each allocno or pseudo. Null
90 value means no preferences. It happens on the 1st iteration of the
91 cost calculation. */
92 static enum reg_class *pref;
94 /* Allocated buffers for pref. */
95 static enum reg_class *pref_buffer;
97 /* Record allocno class of each allocno with the same regno. */
98 static enum reg_class *regno_aclass;
100 /* Record cost gains for not allocating a register with an invariant
101 equivalence. */
102 static int *regno_equiv_gains;
104 /* Execution frequency of the current insn. */
105 static int frequency;
109 /* Info about reg classes whose costs are calculated for a pseudo. */
110 struct cost_classes
112 /* Number of the cost classes in the subsequent array. */
113 int num;
114 /* Container of the cost classes. */
115 enum reg_class classes[N_REG_CLASSES];
116 /* Map reg class -> index of the reg class in the previous array.
117 -1 if it is not a cost class. */
118 int index[N_REG_CLASSES];
119 /* Map hard regno index of first class in array CLASSES containing
120 the hard regno, -1 otherwise. */
121 int hard_regno_index[FIRST_PSEUDO_REGISTER];
124 /* Types of pointers to the structure above. */
125 typedef struct cost_classes *cost_classes_t;
126 typedef const struct cost_classes *const_cost_classes_t;
128 /* Info about cost classes for each pseudo. */
129 static cost_classes_t *regno_cost_classes;
131 /* Helper for cost_classes hashing. */
133 struct cost_classes_hasher : pointer_hash <cost_classes>
135 static inline hashval_t hash (const cost_classes *);
136 static inline bool equal (const cost_classes *, const cost_classes *);
137 static inline void remove (cost_classes *);
140 /* Returns hash value for cost classes info HV. */
141 inline hashval_t
142 cost_classes_hasher::hash (const cost_classes *hv)
144 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
147 /* Compares cost classes info HV1 and HV2. */
148 inline bool
149 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
151 return (hv1->num == hv2->num
152 && memcmp (hv1->classes, hv2->classes,
153 sizeof (enum reg_class) * hv1->num) == 0);
156 /* Delete cost classes info V from the hash table. */
157 inline void
158 cost_classes_hasher::remove (cost_classes *v)
160 ira_free (v);
163 /* Hash table of unique cost classes. */
164 static hash_table<cost_classes_hasher> *cost_classes_htab;
166 /* Map allocno class -> cost classes for pseudo of given allocno
167 class. */
168 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
170 /* Map mode -> cost classes for pseudo of give mode. */
171 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
173 /* Cost classes that include all classes in ira_important_classes. */
174 static cost_classes all_cost_classes;
176 /* Use the array of classes in CLASSES_PTR to fill out the rest of
177 the structure. */
178 static void
179 complete_cost_classes (cost_classes_t classes_ptr)
181 for (int i = 0; i < N_REG_CLASSES; i++)
182 classes_ptr->index[i] = -1;
183 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
184 classes_ptr->hard_regno_index[i] = -1;
185 for (int i = 0; i < classes_ptr->num; i++)
187 enum reg_class cl = classes_ptr->classes[i];
188 classes_ptr->index[cl] = i;
189 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
191 unsigned int hard_regno = ira_class_hard_regs[cl][j];
192 if (classes_ptr->hard_regno_index[hard_regno] < 0)
193 classes_ptr->hard_regno_index[hard_regno] = i;
198 /* Initialize info about the cost classes for each pseudo. */
199 static void
200 initiate_regno_cost_classes (void)
202 int size = sizeof (cost_classes_t) * max_reg_num ();
204 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
205 memset (regno_cost_classes, 0, size);
206 memset (cost_classes_aclass_cache, 0,
207 sizeof (cost_classes_t) * N_REG_CLASSES);
208 memset (cost_classes_mode_cache, 0,
209 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
210 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
211 all_cost_classes.num = ira_important_classes_num;
212 for (int i = 0; i < ira_important_classes_num; i++)
213 all_cost_classes.classes[i] = ira_important_classes[i];
214 complete_cost_classes (&all_cost_classes);
217 /* Create new cost classes from cost classes FROM and set up members
218 index and hard_regno_index. Return the new classes. The function
219 implements some common code of two functions
220 setup_regno_cost_classes_by_aclass and
221 setup_regno_cost_classes_by_mode. */
222 static cost_classes_t
223 setup_cost_classes (cost_classes_t from)
225 cost_classes_t classes_ptr;
227 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
228 classes_ptr->num = from->num;
229 for (int i = 0; i < from->num; i++)
230 classes_ptr->classes[i] = from->classes[i];
231 complete_cost_classes (classes_ptr);
232 return classes_ptr;
235 /* Return a version of FULL that only considers registers in REGS that are
236 valid for mode MODE. Both FULL and the returned class are globally
237 allocated. */
238 static cost_classes_t
239 restrict_cost_classes (cost_classes_t full, machine_mode mode,
240 const HARD_REG_SET &regs)
242 static struct cost_classes narrow;
243 int map[N_REG_CLASSES];
244 narrow.num = 0;
245 for (int i = 0; i < full->num; i++)
247 /* Assume that we'll drop the class. */
248 map[i] = -1;
250 /* Ignore classes that are too small for the mode. */
251 enum reg_class cl = full->classes[i];
252 if (!contains_reg_of_mode[cl][mode])
253 continue;
255 /* Calculate the set of registers in CL that belong to REGS and
256 are valid for MODE. */
257 HARD_REG_SET valid_for_cl;
258 COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
259 AND_HARD_REG_SET (valid_for_cl, regs);
260 AND_COMPL_HARD_REG_SET (valid_for_cl,
261 ira_prohibited_class_mode_regs[cl][mode]);
262 AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
263 if (hard_reg_set_empty_p (valid_for_cl))
264 continue;
266 /* Don't use this class if the set of valid registers is a subset
267 of an existing class. For example, suppose we have two classes
268 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
269 that the mode changes allowed by FR_REGS are not as general as
270 the mode changes allowed by GR_REGS.
272 In this situation, the mode changes for GR_AND_FR_REGS could
273 either be seen as the union or the intersection of the mode
274 changes allowed by the two subclasses. The justification for
275 the union-based definition would be that, if you want a mode
276 change that's only allowed by GR_REGS, you can pick a register
277 from the GR_REGS subclass. The justification for the
278 intersection-based definition would be that every register
279 from the class would allow the mode change.
281 However, if we have a register that needs to be in GR_REGS,
282 using GR_AND_FR_REGS with the intersection-based definition
283 would be too pessimistic, since it would bring in restrictions
284 that only apply to FR_REGS. Conversely, if we have a register
285 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
286 union-based definition would lose the extra restrictions
287 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
288 for cases where GR_REGS and FP_REGS are both valid. */
289 int pos;
290 for (pos = 0; pos < narrow.num; ++pos)
292 enum reg_class cl2 = narrow.classes[pos];
293 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
294 break;
296 map[i] = pos;
297 if (pos == narrow.num)
299 /* If several classes are equivalent, prefer to use the one
300 that was chosen as the allocno class. */
301 enum reg_class cl2 = ira_allocno_class_translate[cl];
302 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
303 cl = cl2;
304 narrow.classes[narrow.num++] = cl;
307 if (narrow.num == full->num)
308 return full;
310 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
311 if (*slot == NULL)
313 cost_classes_t classes = setup_cost_classes (&narrow);
314 /* Map equivalent classes to the representative that we chose above. */
315 for (int i = 0; i < ira_important_classes_num; i++)
317 enum reg_class cl = ira_important_classes[i];
318 int index = full->index[cl];
319 if (index >= 0)
320 classes->index[cl] = map[index];
322 *slot = classes;
324 return *slot;
327 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
328 This function is used when we know an initial approximation of
329 allocno class of the pseudo already, e.g. on the second iteration
330 of class cost calculation or after class cost calculation in
331 register-pressure sensitive insn scheduling or register-pressure
332 sensitive loop-invariant motion. */
333 static void
334 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
336 static struct cost_classes classes;
337 cost_classes_t classes_ptr;
338 enum reg_class cl;
339 int i;
340 cost_classes **slot;
341 HARD_REG_SET temp, temp2;
342 bool exclude_p;
344 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
346 COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
347 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
348 /* We exclude classes from consideration which are subsets of
349 ACLASS only if ACLASS is an uniform class. */
350 exclude_p = ira_uniform_class_p[aclass];
351 classes.num = 0;
352 for (i = 0; i < ira_important_classes_num; i++)
354 cl = ira_important_classes[i];
355 if (exclude_p)
357 /* Exclude non-uniform classes which are subsets of
358 ACLASS. */
359 COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
360 AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
361 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
362 continue;
364 classes.classes[classes.num++] = cl;
366 slot = cost_classes_htab->find_slot (&classes, INSERT);
367 if (*slot == NULL)
369 classes_ptr = setup_cost_classes (&classes);
370 *slot = classes_ptr;
372 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
374 if (regno_reg_rtx[regno] != NULL_RTX)
376 /* Restrict the classes to those that are valid for REGNO's mode
377 (which might for example exclude singleton classes if the mode
378 requires two registers). Also restrict the classes to those that
379 are valid for subregs of REGNO. */
380 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
381 if (!valid_regs)
382 valid_regs = &reg_class_contents[ALL_REGS];
383 classes_ptr = restrict_cost_classes (classes_ptr,
384 PSEUDO_REGNO_MODE (regno),
385 *valid_regs);
387 regno_cost_classes[regno] = classes_ptr;
390 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
391 decrease number of cost classes for the pseudo, if hard registers
392 of some important classes can not hold a value of MODE. So the
393 pseudo can not get hard register of some important classes and cost
394 calculation for such important classes is only wasting CPU
395 time. */
396 static void
397 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
399 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
400 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
401 mode, *valid_regs);
402 else
404 if (cost_classes_mode_cache[mode] == NULL)
405 cost_classes_mode_cache[mode]
406 = restrict_cost_classes (&all_cost_classes, mode,
407 reg_class_contents[ALL_REGS]);
408 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
412 /* Finalize info about the cost classes for each pseudo. */
413 static void
414 finish_regno_cost_classes (void)
416 ira_free (regno_cost_classes);
417 delete cost_classes_htab;
418 cost_classes_htab = NULL;
423 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
424 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
425 be a pseudo register. */
426 static int
427 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
428 secondary_reload_info *prev_sri)
430 secondary_reload_info sri;
431 reg_class_t secondary_class = NO_REGS;
433 /* If X is a SCRATCH, there is actually nothing to move since we are
434 assuming optimal allocation. */
435 if (GET_CODE (x) == SCRATCH)
436 return 0;
438 /* Get the class we will actually use for a reload. */
439 rclass = targetm.preferred_reload_class (x, rclass);
441 /* If we need a secondary reload for an intermediate, the cost is
442 that to load the input into the intermediate register, then to
443 copy it. */
444 sri.prev_sri = prev_sri;
445 sri.extra_cost = 0;
446 /* PR 68770: Secondary reload might examine the t_icode field. */
447 sri.t_icode = CODE_FOR_nothing;
449 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
451 if (secondary_class != NO_REGS)
453 ira_init_register_move_cost_if_necessary (mode);
454 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
455 + sri.extra_cost
456 + copy_cost (x, mode, secondary_class, to_p, &sri));
459 /* For memory, use the memory move cost, for (hard) registers, use
460 the cost to move between the register classes, and use 2 for
461 everything else (constants). */
462 if (MEM_P (x) || rclass == NO_REGS)
463 return sri.extra_cost
464 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
465 else if (REG_P (x))
467 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
469 ira_init_register_move_cost_if_necessary (mode);
470 return (sri.extra_cost
471 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
473 else
474 /* If this is a constant, we may eventually want to call rtx_cost
475 here. */
476 return sri.extra_cost + COSTS_N_INSNS (1);
481 /* Record the cost of using memory or hard registers of various
482 classes for the operands in INSN.
484 N_ALTS is the number of alternatives.
485 N_OPS is the number of operands.
486 OPS is an array of the operands.
487 MODES are the modes of the operands, in case any are VOIDmode.
488 CONSTRAINTS are the constraints to use for the operands. This array
489 is modified by this procedure.
491 This procedure works alternative by alternative. For each
492 alternative we assume that we will be able to allocate all allocnos
493 to their ideal register class and calculate the cost of using that
494 alternative. Then we compute, for each operand that is a
495 pseudo-register, the cost of having the allocno allocated to each
496 register class and using it in that alternative. To this cost is
497 added the cost of the alternative.
499 The cost of each class for this insn is its lowest cost among all
500 the alternatives. */
501 static void
502 record_reg_classes (int n_alts, int n_ops, rtx *ops,
503 machine_mode *modes, const char **constraints,
504 rtx_insn *insn, enum reg_class *pref)
506 int alt;
507 int i, j, k;
508 int insn_allows_mem[MAX_RECOG_OPERANDS];
509 move_table *move_in_cost, *move_out_cost;
510 short (*mem_cost)[2];
512 for (i = 0; i < n_ops; i++)
513 insn_allows_mem[i] = 0;
515 /* Process each alternative, each time minimizing an operand's cost
516 with the cost for each operand in that alternative. */
517 alternative_mask preferred = get_preferred_alternatives (insn);
518 for (alt = 0; alt < n_alts; alt++)
520 enum reg_class classes[MAX_RECOG_OPERANDS];
521 int allows_mem[MAX_RECOG_OPERANDS];
522 enum reg_class rclass;
523 int alt_fail = 0;
524 int alt_cost = 0, op_cost_add;
526 if (!TEST_BIT (preferred, alt))
528 for (i = 0; i < recog_data.n_operands; i++)
529 constraints[i] = skip_alternative (constraints[i]);
531 continue;
534 for (i = 0; i < n_ops; i++)
536 unsigned char c;
537 const char *p = constraints[i];
538 rtx op = ops[i];
539 machine_mode mode = modes[i];
540 int allows_addr = 0;
541 int win = 0;
543 /* Initially show we know nothing about the register class. */
544 classes[i] = NO_REGS;
545 allows_mem[i] = 0;
547 /* If this operand has no constraints at all, we can
548 conclude nothing about it since anything is valid. */
549 if (*p == 0)
551 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
552 memset (this_op_costs[i], 0, struct_costs_size);
553 continue;
556 /* If this alternative is only relevant when this operand
557 matches a previous operand, we do different things
558 depending on whether this operand is a allocno-reg or not.
559 We must process any modifiers for the operand before we
560 can make this test. */
561 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
562 p++;
564 if (p[0] >= '0' && p[0] <= '0' + i)
566 /* Copy class and whether memory is allowed from the
567 matching alternative. Then perform any needed cost
568 computations and/or adjustments. */
569 j = p[0] - '0';
570 classes[i] = classes[j];
571 allows_mem[i] = allows_mem[j];
572 if (allows_mem[i])
573 insn_allows_mem[i] = 1;
575 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
577 /* If this matches the other operand, we have no
578 added cost and we win. */
579 if (rtx_equal_p (ops[j], op))
580 win = 1;
581 /* If we can put the other operand into a register,
582 add to the cost of this alternative the cost to
583 copy this operand to the register used for the
584 other operand. */
585 else if (classes[j] != NO_REGS)
587 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
588 win = 1;
591 else if (! REG_P (ops[j])
592 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
594 /* This op is an allocno but the one it matches is
595 not. */
597 /* If we can't put the other operand into a
598 register, this alternative can't be used. */
600 if (classes[j] == NO_REGS)
601 alt_fail = 1;
602 /* Otherwise, add to the cost of this alternative
603 the cost to copy the other operand to the hard
604 register used for this operand. */
605 else
606 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
608 else
610 /* The costs of this operand are not the same as the
611 other operand since move costs are not symmetric.
612 Moreover, if we cannot tie them, this alternative
613 needs to do a copy, which is one insn. */
614 struct costs *pp = this_op_costs[i];
615 int *pp_costs = pp->cost;
616 cost_classes_t cost_classes_ptr
617 = regno_cost_classes[REGNO (op)];
618 enum reg_class *cost_classes = cost_classes_ptr->classes;
619 bool in_p = recog_data.operand_type[i] != OP_OUT;
620 bool out_p = recog_data.operand_type[i] != OP_IN;
621 enum reg_class op_class = classes[i];
623 ira_init_register_move_cost_if_necessary (mode);
624 if (! in_p)
626 ira_assert (out_p);
627 if (op_class == NO_REGS)
629 mem_cost = ira_memory_move_cost[mode];
630 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
632 rclass = cost_classes[k];
633 pp_costs[k] = mem_cost[rclass][0] * frequency;
636 else
638 move_out_cost = ira_may_move_out_cost[mode];
639 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
641 rclass = cost_classes[k];
642 pp_costs[k]
643 = move_out_cost[op_class][rclass] * frequency;
647 else if (! out_p)
649 ira_assert (in_p);
650 if (op_class == NO_REGS)
652 mem_cost = ira_memory_move_cost[mode];
653 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
655 rclass = cost_classes[k];
656 pp_costs[k] = mem_cost[rclass][1] * frequency;
659 else
661 move_in_cost = ira_may_move_in_cost[mode];
662 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
664 rclass = cost_classes[k];
665 pp_costs[k]
666 = move_in_cost[rclass][op_class] * frequency;
670 else
672 if (op_class == NO_REGS)
674 mem_cost = ira_memory_move_cost[mode];
675 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
677 rclass = cost_classes[k];
678 pp_costs[k] = ((mem_cost[rclass][0]
679 + mem_cost[rclass][1])
680 * frequency);
683 else
685 move_in_cost = ira_may_move_in_cost[mode];
686 move_out_cost = ira_may_move_out_cost[mode];
687 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
689 rclass = cost_classes[k];
690 pp_costs[k] = ((move_in_cost[rclass][op_class]
691 + move_out_cost[op_class][rclass])
692 * frequency);
697 /* If the alternative actually allows memory, make
698 things a bit cheaper since we won't need an extra
699 insn to load it. */
700 pp->mem_cost
701 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
702 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
703 - allows_mem[i]) * frequency;
705 /* If we have assigned a class to this allocno in
706 our first pass, add a cost to this alternative
707 corresponding to what we would add if this
708 allocno were not in the appropriate class. */
709 if (pref)
711 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
713 if (pref_class == NO_REGS)
714 alt_cost
715 += ((out_p
716 ? ira_memory_move_cost[mode][op_class][0] : 0)
717 + (in_p
718 ? ira_memory_move_cost[mode][op_class][1]
719 : 0));
720 else if (ira_reg_class_intersect
721 [pref_class][op_class] == NO_REGS)
722 alt_cost
723 += ira_register_move_cost[mode][pref_class][op_class];
725 if (REGNO (ops[i]) != REGNO (ops[j])
726 && ! find_reg_note (insn, REG_DEAD, op))
727 alt_cost += 2;
729 p++;
733 /* Scan all the constraint letters. See if the operand
734 matches any of the constraints. Collect the valid
735 register classes and see if this operand accepts
736 memory. */
737 while ((c = *p))
739 switch (c)
741 case '*':
742 /* Ignore the next letter for this pass. */
743 c = *++p;
744 break;
746 case '^':
747 alt_cost += 2;
748 break;
750 case '?':
751 alt_cost += 2;
752 break;
754 case 'g':
755 if (MEM_P (op)
756 || (CONSTANT_P (op)
757 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
758 win = 1;
759 insn_allows_mem[i] = allows_mem[i] = 1;
760 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
761 break;
763 default:
764 enum constraint_num cn = lookup_constraint (p);
765 enum reg_class cl;
766 switch (get_constraint_type (cn))
768 case CT_REGISTER:
769 cl = reg_class_for_constraint (cn);
770 if (cl != NO_REGS)
771 classes[i] = ira_reg_class_subunion[classes[i]][cl];
772 break;
774 case CT_CONST_INT:
775 if (CONST_INT_P (op)
776 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
777 win = 1;
778 break;
780 case CT_MEMORY:
781 /* Every MEM can be reloaded to fit. */
782 insn_allows_mem[i] = allows_mem[i] = 1;
783 if (MEM_P (op))
784 win = 1;
785 break;
787 case CT_SPECIAL_MEMORY:
788 insn_allows_mem[i] = allows_mem[i] = 1;
789 if (MEM_P (op) && constraint_satisfied_p (op, cn))
790 win = 1;
791 break;
793 case CT_ADDRESS:
794 /* Every address can be reloaded to fit. */
795 allows_addr = 1;
796 if (address_operand (op, GET_MODE (op))
797 || constraint_satisfied_p (op, cn))
798 win = 1;
799 /* We know this operand is an address, so we
800 want it to be allocated to a hard register
801 that can be the base of an address,
802 i.e. BASE_REG_CLASS. */
803 classes[i]
804 = ira_reg_class_subunion[classes[i]]
805 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
806 ADDRESS, SCRATCH)];
807 break;
809 case CT_FIXED_FORM:
810 if (constraint_satisfied_p (op, cn))
811 win = 1;
812 break;
814 break;
816 p += CONSTRAINT_LEN (c, p);
817 if (c == ',')
818 break;
821 constraints[i] = p;
823 if (alt_fail)
824 break;
826 /* How we account for this operand now depends on whether it
827 is a pseudo register or not. If it is, we first check if
828 any register classes are valid. If not, we ignore this
829 alternative, since we want to assume that all allocnos get
830 allocated for register preferencing. If some register
831 class is valid, compute the costs of moving the allocno
832 into that class. */
833 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
835 if (classes[i] == NO_REGS && ! allows_mem[i])
837 /* We must always fail if the operand is a REG, but
838 we did not find a suitable class and memory is
839 not allowed.
841 Otherwise we may perform an uninitialized read
842 from this_op_costs after the `continue' statement
843 below. */
844 alt_fail = 1;
846 else
848 unsigned int regno = REGNO (op);
849 struct costs *pp = this_op_costs[i];
850 int *pp_costs = pp->cost;
851 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
852 enum reg_class *cost_classes = cost_classes_ptr->classes;
853 bool in_p = recog_data.operand_type[i] != OP_OUT;
854 bool out_p = recog_data.operand_type[i] != OP_IN;
855 enum reg_class op_class = classes[i];
857 ira_init_register_move_cost_if_necessary (mode);
858 if (! in_p)
860 ira_assert (out_p);
861 if (op_class == NO_REGS)
863 mem_cost = ira_memory_move_cost[mode];
864 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
866 rclass = cost_classes[k];
867 pp_costs[k] = mem_cost[rclass][0] * frequency;
870 else
872 move_out_cost = ira_may_move_out_cost[mode];
873 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
875 rclass = cost_classes[k];
876 pp_costs[k]
877 = move_out_cost[op_class][rclass] * frequency;
881 else if (! out_p)
883 ira_assert (in_p);
884 if (op_class == NO_REGS)
886 mem_cost = ira_memory_move_cost[mode];
887 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
889 rclass = cost_classes[k];
890 pp_costs[k] = mem_cost[rclass][1] * frequency;
893 else
895 move_in_cost = ira_may_move_in_cost[mode];
896 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
898 rclass = cost_classes[k];
899 pp_costs[k]
900 = move_in_cost[rclass][op_class] * frequency;
904 else
906 if (op_class == NO_REGS)
908 mem_cost = ira_memory_move_cost[mode];
909 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
911 rclass = cost_classes[k];
912 pp_costs[k] = ((mem_cost[rclass][0]
913 + mem_cost[rclass][1])
914 * frequency);
917 else
919 move_in_cost = ira_may_move_in_cost[mode];
920 move_out_cost = ira_may_move_out_cost[mode];
921 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
923 rclass = cost_classes[k];
924 pp_costs[k] = ((move_in_cost[rclass][op_class]
925 + move_out_cost[op_class][rclass])
926 * frequency);
931 if (op_class == NO_REGS)
932 /* Although we don't need insn to reload from
933 memory, still accessing memory is usually more
934 expensive than a register. */
935 pp->mem_cost = frequency;
936 else
937 /* If the alternative actually allows memory, make
938 things a bit cheaper since we won't need an
939 extra insn to load it. */
940 pp->mem_cost
941 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
942 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
943 - allows_mem[i]) * frequency;
944 /* If we have assigned a class to this allocno in
945 our first pass, add a cost to this alternative
946 corresponding to what we would add if this
947 allocno were not in the appropriate class. */
948 if (pref)
950 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
952 if (pref_class == NO_REGS)
954 if (op_class != NO_REGS)
955 alt_cost
956 += ((out_p
957 ? ira_memory_move_cost[mode][op_class][0]
958 : 0)
959 + (in_p
960 ? ira_memory_move_cost[mode][op_class][1]
961 : 0));
963 else if (op_class == NO_REGS)
964 alt_cost
965 += ((out_p
966 ? ira_memory_move_cost[mode][pref_class][1]
967 : 0)
968 + (in_p
969 ? ira_memory_move_cost[mode][pref_class][0]
970 : 0));
971 else if (ira_reg_class_intersect[pref_class][op_class]
972 == NO_REGS)
973 alt_cost += (ira_register_move_cost
974 [mode][pref_class][op_class]);
979 /* Otherwise, if this alternative wins, either because we
980 have already determined that or if we have a hard
981 register of the proper class, there is no cost for this
982 alternative. */
983 else if (win || (REG_P (op)
984 && reg_fits_class_p (op, classes[i],
985 0, GET_MODE (op))))
988 /* If registers are valid, the cost of this alternative
989 includes copying the object to and/or from a
990 register. */
991 else if (classes[i] != NO_REGS)
993 if (recog_data.operand_type[i] != OP_OUT)
994 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
996 if (recog_data.operand_type[i] != OP_IN)
997 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
999 /* The only other way this alternative can be used is if
1000 this is a constant that could be placed into memory. */
1001 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
1002 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1003 else
1004 alt_fail = 1;
1006 if (alt_fail)
1007 break;
1010 if (alt_fail)
1012 /* The loop above might have exited early once the failure
1013 was seen. Skip over the constraints for the remaining
1014 operands. */
1015 i += 1;
1016 for (; i < n_ops; ++i)
1017 constraints[i] = skip_alternative (constraints[i]);
1018 continue;
1021 op_cost_add = alt_cost * frequency;
1022 /* Finally, update the costs with the information we've
1023 calculated about this alternative. */
1024 for (i = 0; i < n_ops; i++)
1025 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1027 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1028 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1029 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1030 cost_classes_t cost_classes_ptr
1031 = regno_cost_classes[REGNO (ops[i])];
1033 pp->mem_cost = MIN (pp->mem_cost,
1034 (qq->mem_cost + op_cost_add) * scale);
1036 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1037 pp_costs[k]
1038 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1042 if (allocno_p)
1043 for (i = 0; i < n_ops; i++)
1045 ira_allocno_t a;
1046 rtx op = ops[i];
1048 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1049 continue;
1050 a = ira_curr_regno_allocno_map [REGNO (op)];
1051 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1052 ALLOCNO_BAD_SPILL_P (a) = true;
1059 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1060 static inline bool
1061 ok_for_index_p_nonstrict (rtx reg)
1063 unsigned regno = REGNO (reg);
1065 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1068 /* A version of regno_ok_for_base_p for use here, when all
1069 pseudo-registers should count as OK. Arguments as for
1070 regno_ok_for_base_p. */
1071 static inline bool
1072 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1073 enum rtx_code outer_code, enum rtx_code index_code)
1075 unsigned regno = REGNO (reg);
1077 if (regno >= FIRST_PSEUDO_REGISTER)
1078 return true;
1079 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1082 /* Record the pseudo registers we must reload into hard registers in a
1083 subexpression of a memory address, X.
1085 If CONTEXT is 0, we are looking at the base part of an address,
1086 otherwise we are looking at the index part.
1088 MODE and AS are the mode and address space of the memory reference;
1089 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1090 These four arguments are passed down to base_reg_class.
1092 SCALE is twice the amount to multiply the cost by (it is twice so
1093 we can represent half-cost adjustments). */
1094 static void
1095 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1096 int context, enum rtx_code outer_code,
1097 enum rtx_code index_code, int scale)
1099 enum rtx_code code = GET_CODE (x);
1100 enum reg_class rclass;
1102 if (context == 1)
1103 rclass = INDEX_REG_CLASS;
1104 else
1105 rclass = base_reg_class (mode, as, outer_code, index_code);
1107 switch (code)
1109 case CONST_INT:
1110 case CONST:
1111 case CC0:
1112 case PC:
1113 case SYMBOL_REF:
1114 case LABEL_REF:
1115 return;
1117 case PLUS:
1118 /* When we have an address that is a sum, we must determine
1119 whether registers are "base" or "index" regs. If there is a
1120 sum of two registers, we must choose one to be the "base".
1121 Luckily, we can use the REG_POINTER to make a good choice
1122 most of the time. We only need to do this on machines that
1123 can have two registers in an address and where the base and
1124 index register classes are different.
1126 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1127 but that seems bogus since it should only be set when we are
1128 sure the register is being used as a pointer. */
1130 rtx arg0 = XEXP (x, 0);
1131 rtx arg1 = XEXP (x, 1);
1132 enum rtx_code code0 = GET_CODE (arg0);
1133 enum rtx_code code1 = GET_CODE (arg1);
1135 /* Look inside subregs. */
1136 if (code0 == SUBREG)
1137 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1138 if (code1 == SUBREG)
1139 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1141 /* If index registers do not appear, or coincide with base registers,
1142 just record registers in any non-constant operands. We
1143 assume here, as well as in the tests below, that all
1144 addresses are in canonical form. */
1145 if (MAX_REGS_PER_ADDRESS == 1
1146 || INDEX_REG_CLASS == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1148 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1149 if (! CONSTANT_P (arg1))
1150 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1153 /* If the second operand is a constant integer, it doesn't
1154 change what class the first operand must be. */
1155 else if (CONST_SCALAR_INT_P (arg1))
1156 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1157 /* If the second operand is a symbolic constant, the first
1158 operand must be an index register. */
1159 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1160 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1161 /* If both operands are registers but one is already a hard
1162 register of index or reg-base class, give the other the
1163 class that the hard register is not. */
1164 else if (code0 == REG && code1 == REG
1165 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1166 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1167 || ok_for_index_p_nonstrict (arg0)))
1168 record_address_regs (mode, as, arg1,
1169 ok_for_base_p_nonstrict (arg0, mode, as,
1170 PLUS, REG) ? 1 : 0,
1171 PLUS, REG, scale);
1172 else if (code0 == REG && code1 == REG
1173 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1174 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1175 || ok_for_index_p_nonstrict (arg1)))
1176 record_address_regs (mode, as, arg0,
1177 ok_for_base_p_nonstrict (arg1, mode, as,
1178 PLUS, REG) ? 1 : 0,
1179 PLUS, REG, scale);
1180 /* If one operand is known to be a pointer, it must be the
1181 base with the other operand the index. Likewise if the
1182 other operand is a MULT. */
1183 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1185 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1186 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1188 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1190 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1191 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1193 /* Otherwise, count equal chances that each might be a base or
1194 index register. This case should be rare. */
1195 else
1197 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1198 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1199 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1200 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1203 break;
1205 /* Double the importance of an allocno that is incremented or
1206 decremented, since it would take two extra insns if it ends
1207 up in the wrong place. */
1208 case POST_MODIFY:
1209 case PRE_MODIFY:
1210 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1211 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1212 if (REG_P (XEXP (XEXP (x, 1), 1)))
1213 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1214 2 * scale);
1215 break;
1217 case POST_INC:
1218 case PRE_INC:
1219 case POST_DEC:
1220 case PRE_DEC:
1221 /* Double the importance of an allocno that is incremented or
1222 decremented, since it would take two extra insns if it ends
1223 up in the wrong place. */
1224 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1225 break;
1227 case REG:
1229 struct costs *pp;
1230 int *pp_costs;
1231 enum reg_class i;
1232 int k, regno, add_cost;
1233 cost_classes_t cost_classes_ptr;
1234 enum reg_class *cost_classes;
1235 move_table *move_in_cost;
1237 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1238 break;
1240 regno = REGNO (x);
1241 if (allocno_p)
1242 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1243 pp = COSTS (costs, COST_INDEX (regno));
1244 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1245 if (INT_MAX - add_cost < pp->mem_cost)
1246 pp->mem_cost = INT_MAX;
1247 else
1248 pp->mem_cost += add_cost;
1249 cost_classes_ptr = regno_cost_classes[regno];
1250 cost_classes = cost_classes_ptr->classes;
1251 pp_costs = pp->cost;
1252 ira_init_register_move_cost_if_necessary (Pmode);
1253 move_in_cost = ira_may_move_in_cost[Pmode];
1254 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1256 i = cost_classes[k];
1257 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1258 if (INT_MAX - add_cost < pp_costs[k])
1259 pp_costs[k] = INT_MAX;
1260 else
1261 pp_costs[k] += add_cost;
1264 break;
1266 default:
1268 const char *fmt = GET_RTX_FORMAT (code);
1269 int i;
1270 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1271 if (fmt[i] == 'e')
1272 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1273 scale);
1280 /* Calculate the costs of insn operands. */
1281 static void
1282 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1284 const char *constraints[MAX_RECOG_OPERANDS];
1285 machine_mode modes[MAX_RECOG_OPERANDS];
1286 rtx ops[MAX_RECOG_OPERANDS];
1287 rtx set;
1288 int i;
1290 for (i = 0; i < recog_data.n_operands; i++)
1292 constraints[i] = recog_data.constraints[i];
1293 modes[i] = recog_data.operand_mode[i];
1296 /* If we get here, we are set up to record the costs of all the
1297 operands for this insn. Start by initializing the costs. Then
1298 handle any address registers. Finally record the desired classes
1299 for any allocnos, doing it twice if some pair of operands are
1300 commutative. */
1301 for (i = 0; i < recog_data.n_operands; i++)
1303 memcpy (op_costs[i], init_cost, struct_costs_size);
1305 ops[i] = recog_data.operand[i];
1306 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1307 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1309 if (MEM_P (recog_data.operand[i]))
1310 record_address_regs (GET_MODE (recog_data.operand[i]),
1311 MEM_ADDR_SPACE (recog_data.operand[i]),
1312 XEXP (recog_data.operand[i], 0),
1313 0, MEM, SCRATCH, frequency * 2);
1314 else if (constraints[i][0] == 'p'
1315 || (insn_extra_address_constraint
1316 (lookup_constraint (constraints[i]))))
1317 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1318 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1319 frequency * 2);
1322 /* Check for commutative in a separate loop so everything will have
1323 been initialized. We must do this even if one operand is a
1324 constant--see addsi3 in m68k.md. */
1325 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1326 if (constraints[i][0] == '%')
1328 const char *xconstraints[MAX_RECOG_OPERANDS];
1329 int j;
1331 /* Handle commutative operands by swapping the constraints.
1332 We assume the modes are the same. */
1333 for (j = 0; j < recog_data.n_operands; j++)
1334 xconstraints[j] = constraints[j];
1336 xconstraints[i] = constraints[i+1];
1337 xconstraints[i+1] = constraints[i];
1338 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1339 recog_data.operand, modes,
1340 xconstraints, insn, pref);
1342 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1343 recog_data.operand, modes,
1344 constraints, insn, pref);
1346 /* If this insn is a single set copying operand 1 to operand 0 and
1347 one operand is an allocno with the other a hard reg or an allocno
1348 that prefers a hard register that is in its own register class
1349 then we may want to adjust the cost of that register class to -1.
1351 Avoid the adjustment if the source does not die to avoid
1352 stressing of register allocator by preferencing two colliding
1353 registers into single class.
1355 Also avoid the adjustment if a copy between hard registers of the
1356 class is expensive (ten times the cost of a default copy is
1357 considered arbitrarily expensive). This avoids losing when the
1358 preferred class is very expensive as the source of a copy
1359 instruction. */
1360 if ((set = single_set (insn)) != NULL_RTX
1361 /* In rare cases the single set insn might have less 2 operands
1362 as the source can be a fixed special reg. */
1363 && recog_data.n_operands > 1
1364 && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set))
1366 int regno, other_regno;
1367 rtx dest = SET_DEST (set);
1368 rtx src = SET_SRC (set);
1370 if (GET_CODE (dest) == SUBREG
1371 && known_eq (GET_MODE_SIZE (GET_MODE (dest)),
1372 GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1373 dest = SUBREG_REG (dest);
1374 if (GET_CODE (src) == SUBREG
1375 && known_eq (GET_MODE_SIZE (GET_MODE (src)),
1376 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1377 src = SUBREG_REG (src);
1378 if (REG_P (src) && REG_P (dest)
1379 && find_regno_note (insn, REG_DEAD, REGNO (src))
1380 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1381 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1382 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1383 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1385 machine_mode mode = GET_MODE (src);
1386 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1387 enum reg_class *cost_classes = cost_classes_ptr->classes;
1388 reg_class_t rclass;
1389 int k;
1391 i = regno == (int) REGNO (src) ? 1 : 0;
1392 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1394 rclass = cost_classes[k];
1395 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1396 && (reg_class_size[(int) rclass]
1397 == ira_reg_class_max_nregs [(int) rclass][(int) mode]))
1399 if (reg_class_size[rclass] == 1)
1400 op_costs[i]->cost[k] = -frequency;
1401 else if (in_hard_reg_set_p (reg_class_contents[rclass],
1402 mode, other_regno))
1403 op_costs[i]->cost[k] = -frequency;
1412 /* Process one insn INSN. Scan it and record each time it would save
1413 code to put a certain allocnos in a certain class. Return the last
1414 insn processed, so that the scan can be continued from there. */
1415 static rtx_insn *
1416 scan_one_insn (rtx_insn *insn)
1418 enum rtx_code pat_code;
1419 rtx set, note;
1420 int i, k;
1421 bool counted_mem;
1423 if (!NONDEBUG_INSN_P (insn))
1424 return insn;
1426 pat_code = GET_CODE (PATTERN (insn));
1427 if (pat_code == ASM_INPUT)
1428 return insn;
1430 /* If INSN is a USE/CLOBBER of a pseudo in a mode M then go ahead
1431 and initialize the register move costs of mode M.
1433 The pseudo may be related to another pseudo via a copy (implicit or
1434 explicit) and if there are no mode M uses/sets of the original
1435 pseudo, then we may leave the register move costs uninitialized for
1436 mode M. */
1437 if (pat_code == USE || pat_code == CLOBBER)
1439 rtx x = XEXP (PATTERN (insn), 0);
1440 if (GET_CODE (x) == REG
1441 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1442 && have_regs_of_mode[GET_MODE (x)])
1443 ira_init_register_move_cost_if_necessary (GET_MODE (x));
1444 return insn;
1447 if (pat_code == CLOBBER_HIGH)
1449 gcc_assert (REG_P (XEXP (PATTERN (insn), 0))
1450 && HARD_REGISTER_P (XEXP (PATTERN (insn), 0)));
1451 return insn;
1454 counted_mem = false;
1455 set = single_set (insn);
1456 extract_insn (insn);
1458 /* If this insn loads a parameter from its stack slot, then it
1459 represents a savings, rather than a cost, if the parameter is
1460 stored in memory. Record this fact.
1462 Similarly if we're loading other constants from memory (constant
1463 pool, TOC references, small data areas, etc) and this is the only
1464 assignment to the destination pseudo.
1466 Don't do this if SET_SRC (set) isn't a general operand, if it is
1467 a memory requiring special instructions to load it, decreasing
1468 mem_cost might result in it being loaded using the specialized
1469 instruction into a register, then stored into stack and loaded
1470 again from the stack. See PR52208.
1472 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1473 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1474 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1475 && ((MEM_P (XEXP (note, 0))
1476 && !side_effects_p (SET_SRC (set)))
1477 || (CONSTANT_P (XEXP (note, 0))
1478 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1479 XEXP (note, 0))
1480 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1481 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set)))
1482 /* LRA does not use equiv with a symbol for PIC code. */
1483 && (! ira_use_lra_p || ! pic_offset_table_rtx
1484 || ! contains_symbol_ref_p (XEXP (note, 0))))
1486 enum reg_class cl = GENERAL_REGS;
1487 rtx reg = SET_DEST (set);
1488 int num = COST_INDEX (REGNO (reg));
1490 COSTS (costs, num)->mem_cost
1491 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1492 record_address_regs (GET_MODE (SET_SRC (set)),
1493 MEM_ADDR_SPACE (SET_SRC (set)),
1494 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1495 frequency * 2);
1496 counted_mem = true;
1499 record_operand_costs (insn, pref);
1501 /* Now add the cost for each operand to the total costs for its
1502 allocno. */
1503 for (i = 0; i < recog_data.n_operands; i++)
1504 if (REG_P (recog_data.operand[i])
1505 && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER)
1507 int regno = REGNO (recog_data.operand[i]);
1508 struct costs *p = COSTS (costs, COST_INDEX (regno));
1509 struct costs *q = op_costs[i];
1510 int *p_costs = p->cost, *q_costs = q->cost;
1511 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1512 int add_cost;
1514 /* If the already accounted for the memory "cost" above, don't
1515 do so again. */
1516 if (!counted_mem)
1518 add_cost = q->mem_cost;
1519 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1520 p->mem_cost = INT_MAX;
1521 else
1522 p->mem_cost += add_cost;
1524 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1526 add_cost = q_costs[k];
1527 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1528 p_costs[k] = INT_MAX;
1529 else
1530 p_costs[k] += add_cost;
1534 return insn;
1539 /* Print allocnos costs to file F. */
1540 static void
1541 print_allocno_costs (FILE *f)
1543 int k;
1544 ira_allocno_t a;
1545 ira_allocno_iterator ai;
1547 ira_assert (allocno_p);
1548 fprintf (f, "\n");
1549 FOR_EACH_ALLOCNO (a, ai)
1551 int i, rclass;
1552 basic_block bb;
1553 int regno = ALLOCNO_REGNO (a);
1554 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1555 enum reg_class *cost_classes = cost_classes_ptr->classes;
1557 i = ALLOCNO_NUM (a);
1558 fprintf (f, " a%d(r%d,", i, regno);
1559 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1560 fprintf (f, "b%d", bb->index);
1561 else
1562 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1563 fprintf (f, ") costs:");
1564 for (k = 0; k < cost_classes_ptr->num; k++)
1566 rclass = cost_classes[k];
1567 fprintf (f, " %s:%d", reg_class_names[rclass],
1568 COSTS (costs, i)->cost[k]);
1569 if (flag_ira_region == IRA_REGION_ALL
1570 || flag_ira_region == IRA_REGION_MIXED)
1571 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1573 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1574 if (flag_ira_region == IRA_REGION_ALL
1575 || flag_ira_region == IRA_REGION_MIXED)
1576 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1577 fprintf (f, "\n");
1581 /* Print pseudo costs to file F. */
1582 static void
1583 print_pseudo_costs (FILE *f)
1585 int regno, k;
1586 int rclass;
1587 cost_classes_t cost_classes_ptr;
1588 enum reg_class *cost_classes;
1590 ira_assert (! allocno_p);
1591 fprintf (f, "\n");
1592 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1594 if (REG_N_REFS (regno) <= 0)
1595 continue;
1596 cost_classes_ptr = regno_cost_classes[regno];
1597 cost_classes = cost_classes_ptr->classes;
1598 fprintf (f, " r%d costs:", regno);
1599 for (k = 0; k < cost_classes_ptr->num; k++)
1601 rclass = cost_classes[k];
1602 fprintf (f, " %s:%d", reg_class_names[rclass],
1603 COSTS (costs, regno)->cost[k]);
1605 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1609 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1610 costs. */
1611 static void
1612 process_bb_for_costs (basic_block bb)
1614 rtx_insn *insn;
1616 frequency = REG_FREQ_FROM_BB (bb);
1617 if (frequency == 0)
1618 frequency = 1;
1619 FOR_BB_INSNS (bb, insn)
1620 insn = scan_one_insn (insn);
1623 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1624 costs. */
1625 static void
1626 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1628 basic_block bb;
1630 bb = loop_tree_node->bb;
1631 if (bb != NULL)
1632 process_bb_for_costs (bb);
1635 /* Find costs of register classes and memory for allocnos or pseudos
1636 and their best costs. Set up preferred, alternative and allocno
1637 classes for pseudos. */
1638 static void
1639 find_costs_and_classes (FILE *dump_file)
1641 int i, k, start, max_cost_classes_num;
1642 int pass;
1643 basic_block bb;
1644 enum reg_class *regno_best_class, new_class;
1646 init_recog ();
1647 regno_best_class
1648 = (enum reg_class *) ira_allocate (max_reg_num ()
1649 * sizeof (enum reg_class));
1650 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1651 regno_best_class[i] = NO_REGS;
1652 if (!resize_reg_info () && allocno_p
1653 && pseudo_classes_defined_p && flag_expensive_optimizations)
1655 ira_allocno_t a;
1656 ira_allocno_iterator ai;
1658 pref = pref_buffer;
1659 max_cost_classes_num = 1;
1660 FOR_EACH_ALLOCNO (a, ai)
1662 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1663 setup_regno_cost_classes_by_aclass
1664 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1665 max_cost_classes_num
1666 = MAX (max_cost_classes_num,
1667 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1669 start = 1;
1671 else
1673 pref = NULL;
1674 max_cost_classes_num = ira_important_classes_num;
1675 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1676 if (regno_reg_rtx[i] != NULL_RTX)
1677 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1678 else
1679 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1680 start = 0;
1682 if (allocno_p)
1683 /* Clear the flag for the next compiled function. */
1684 pseudo_classes_defined_p = false;
1685 /* Normally we scan the insns once and determine the best class to
1686 use for each allocno. However, if -fexpensive-optimizations are
1687 on, we do so twice, the second time using the tentative best
1688 classes to guide the selection. */
1689 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1691 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1692 fprintf (dump_file,
1693 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1695 if (pass != start)
1697 max_cost_classes_num = 1;
1698 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1700 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1701 max_cost_classes_num
1702 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1706 struct_costs_size
1707 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1708 /* Zero out our accumulation of the cost of each class for each
1709 allocno. */
1710 memset (costs, 0, cost_elements_num * struct_costs_size);
1712 if (allocno_p)
1714 /* Scan the instructions and record each time it would save code
1715 to put a certain allocno in a certain class. */
1716 ira_traverse_loop_tree (true, ira_loop_tree_root,
1717 process_bb_node_for_costs, NULL);
1719 memcpy (total_allocno_costs, costs,
1720 max_struct_costs_size * ira_allocnos_num);
1722 else
1724 basic_block bb;
1726 FOR_EACH_BB_FN (bb, cfun)
1727 process_bb_for_costs (bb);
1730 if (pass == 0)
1731 pref = pref_buffer;
1733 /* Now for each allocno look at how desirable each class is and
1734 find which class is preferred. */
1735 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1737 ira_allocno_t a, parent_a;
1738 int rclass, a_num, parent_a_num, add_cost;
1739 ira_loop_tree_node_t parent;
1740 int best_cost, allocno_cost;
1741 enum reg_class best, alt_class;
1742 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1743 enum reg_class *cost_classes;
1744 int *i_costs = temp_costs->cost;
1745 int i_mem_cost;
1746 int equiv_savings = regno_equiv_gains[i];
1748 if (! allocno_p)
1750 if (regno_reg_rtx[i] == NULL_RTX)
1751 continue;
1752 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1753 i_mem_cost = temp_costs->mem_cost;
1754 cost_classes = cost_classes_ptr->classes;
1756 else
1758 if (ira_regno_allocno_map[i] == NULL)
1759 continue;
1760 memset (temp_costs, 0, struct_costs_size);
1761 i_mem_cost = 0;
1762 cost_classes = cost_classes_ptr->classes;
1763 /* Find cost of all allocnos with the same regno. */
1764 for (a = ira_regno_allocno_map[i];
1765 a != NULL;
1766 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1768 int *a_costs, *p_costs;
1770 a_num = ALLOCNO_NUM (a);
1771 if ((flag_ira_region == IRA_REGION_ALL
1772 || flag_ira_region == IRA_REGION_MIXED)
1773 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1774 && (parent_a = parent->regno_allocno_map[i]) != NULL
1775 /* There are no caps yet. */
1776 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1777 (a)->border_allocnos,
1778 ALLOCNO_NUM (a)))
1780 /* Propagate costs to upper levels in the region
1781 tree. */
1782 parent_a_num = ALLOCNO_NUM (parent_a);
1783 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1784 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1785 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1787 add_cost = a_costs[k];
1788 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1789 p_costs[k] = INT_MAX;
1790 else
1791 p_costs[k] += add_cost;
1793 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1794 if (add_cost > 0
1795 && (INT_MAX - add_cost
1796 < COSTS (total_allocno_costs,
1797 parent_a_num)->mem_cost))
1798 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1799 = INT_MAX;
1800 else
1801 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1802 += add_cost;
1804 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1805 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1807 a_costs = COSTS (costs, a_num)->cost;
1808 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1810 add_cost = a_costs[k];
1811 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1812 i_costs[k] = INT_MAX;
1813 else
1814 i_costs[k] += add_cost;
1816 add_cost = COSTS (costs, a_num)->mem_cost;
1817 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1818 i_mem_cost = INT_MAX;
1819 else
1820 i_mem_cost += add_cost;
1823 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1824 i_mem_cost = 0;
1825 else if (equiv_savings < 0)
1826 i_mem_cost = -equiv_savings;
1827 else if (equiv_savings > 0)
1829 i_mem_cost = 0;
1830 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1831 i_costs[k] += equiv_savings;
1834 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1835 best = ALL_REGS;
1836 alt_class = NO_REGS;
1837 /* Find best common class for all allocnos with the same
1838 regno. */
1839 for (k = 0; k < cost_classes_ptr->num; k++)
1841 rclass = cost_classes[k];
1842 if (i_costs[k] < best_cost)
1844 best_cost = i_costs[k];
1845 best = (enum reg_class) rclass;
1847 else if (i_costs[k] == best_cost)
1848 best = ira_reg_class_subunion[best][rclass];
1849 if (pass == flag_expensive_optimizations
1850 /* We still prefer registers to memory even at this
1851 stage if their costs are the same. We will make
1852 a final decision during assigning hard registers
1853 when we have all info including more accurate
1854 costs which might be affected by assigning hard
1855 registers to other pseudos because the pseudos
1856 involved in moves can be coalesced. */
1857 && i_costs[k] <= i_mem_cost
1858 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1859 > reg_class_size[alt_class]))
1860 alt_class = reg_class_subunion[alt_class][rclass];
1862 alt_class = ira_allocno_class_translate[alt_class];
1863 if (best_cost > i_mem_cost
1864 && ! non_spilled_static_chain_regno_p (i))
1865 regno_aclass[i] = NO_REGS;
1866 else if (!optimize && !targetm.class_likely_spilled_p (best))
1867 /* Registers in the alternative class are likely to need
1868 longer or slower sequences than registers in the best class.
1869 When optimizing we make some effort to use the best class
1870 over the alternative class where possible, but at -O0 we
1871 effectively give the alternative class equal weight.
1872 We then run the risk of using slower alternative registers
1873 when plenty of registers from the best class are still free.
1874 This is especially true because live ranges tend to be very
1875 short in -O0 code and so register pressure tends to be low.
1877 Avoid that by ignoring the alternative class if the best
1878 class has plenty of registers.
1880 The union class arrays give important classes and only
1881 part of it are allocno classes. So translate them into
1882 allocno classes. */
1883 regno_aclass[i] = ira_allocno_class_translate[best];
1884 else
1886 /* Make the common class the biggest class of best and
1887 alt_class. Translate the common class into an
1888 allocno class too. */
1889 regno_aclass[i] = (ira_allocno_class_translate
1890 [ira_reg_class_superunion[best][alt_class]]);
1891 ira_assert (regno_aclass[i] != NO_REGS
1892 && ira_reg_allocno_class_p[regno_aclass[i]]);
1894 if ((new_class
1895 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1896 (i, regno_aclass[i], best))) != regno_aclass[i])
1898 regno_aclass[i] = new_class;
1899 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1900 reg_class_contents[best]))
1901 best = new_class;
1902 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1903 reg_class_contents[alt_class]))
1904 alt_class = new_class;
1906 if (pass == flag_expensive_optimizations)
1908 if (best_cost > i_mem_cost
1909 /* Do not assign NO_REGS to static chain pointer
1910 pseudo when non-local goto is used. */
1911 && ! non_spilled_static_chain_regno_p (i))
1912 best = alt_class = NO_REGS;
1913 else if (best == alt_class)
1914 alt_class = NO_REGS;
1915 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1916 if ((!allocno_p || internal_flag_ira_verbose > 2)
1917 && dump_file != NULL)
1918 fprintf (dump_file,
1919 " r%d: preferred %s, alternative %s, allocno %s\n",
1920 i, reg_class_names[best], reg_class_names[alt_class],
1921 reg_class_names[regno_aclass[i]]);
1923 regno_best_class[i] = best;
1924 if (! allocno_p)
1926 pref[i] = (best_cost > i_mem_cost
1927 && ! non_spilled_static_chain_regno_p (i)
1928 ? NO_REGS : best);
1929 continue;
1931 for (a = ira_regno_allocno_map[i];
1932 a != NULL;
1933 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1935 enum reg_class aclass = regno_aclass[i];
1936 int a_num = ALLOCNO_NUM (a);
1937 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1938 int *a_costs = COSTS (costs, a_num)->cost;
1940 if (aclass == NO_REGS)
1941 best = NO_REGS;
1942 else
1944 /* Finding best class which is subset of the common
1945 class. */
1946 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1947 allocno_cost = best_cost;
1948 best = ALL_REGS;
1949 for (k = 0; k < cost_classes_ptr->num; k++)
1951 rclass = cost_classes[k];
1952 if (! ira_class_subset_p[rclass][aclass])
1953 continue;
1954 if (total_a_costs[k] < best_cost)
1956 best_cost = total_a_costs[k];
1957 allocno_cost = a_costs[k];
1958 best = (enum reg_class) rclass;
1960 else if (total_a_costs[k] == best_cost)
1962 best = ira_reg_class_subunion[best][rclass];
1963 allocno_cost = MAX (allocno_cost, a_costs[k]);
1966 ALLOCNO_CLASS_COST (a) = allocno_cost;
1968 if (internal_flag_ira_verbose > 2 && dump_file != NULL
1969 && (pass == 0 || pref[a_num] != best))
1971 fprintf (dump_file, " a%d (r%d,", a_num, i);
1972 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1973 fprintf (dump_file, "b%d", bb->index);
1974 else
1975 fprintf (dump_file, "l%d",
1976 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1977 fprintf (dump_file, ") best %s, allocno %s\n",
1978 reg_class_names[best],
1979 reg_class_names[aclass]);
1981 pref[a_num] = best;
1982 if (pass == flag_expensive_optimizations && best != aclass
1983 && ira_class_hard_regs_num[best] > 0
1984 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
1985 >= ira_class_hard_regs_num[best]))
1987 int ind = cost_classes_ptr->index[aclass];
1989 ira_assert (ind >= 0);
1990 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
1991 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
1992 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
1993 / (ira_register_move_cost
1994 [ALLOCNO_MODE (a)][best][aclass]));
1995 for (k = 0; k < cost_classes_ptr->num; k++)
1996 if (ira_class_subset_p[cost_classes[k]][best])
1997 a_costs[k] = a_costs[ind];
2002 if (internal_flag_ira_verbose > 4 && dump_file)
2004 if (allocno_p)
2005 print_allocno_costs (dump_file);
2006 else
2007 print_pseudo_costs (dump_file);
2008 fprintf (dump_file,"\n");
2011 ira_free (regno_best_class);
2016 /* Process moves involving hard regs to modify allocno hard register
2017 costs. We can do this only after determining allocno class. If a
2018 hard register forms a register class, then moves with the hard
2019 register are already taken into account in class costs for the
2020 allocno. */
2021 static void
2022 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
2024 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
2025 bool to_p;
2026 ira_allocno_t a, curr_a;
2027 ira_loop_tree_node_t curr_loop_tree_node;
2028 enum reg_class rclass;
2029 basic_block bb;
2030 rtx_insn *insn;
2031 rtx set, src, dst;
2033 bb = loop_tree_node->bb;
2034 if (bb == NULL)
2035 return;
2036 freq = REG_FREQ_FROM_BB (bb);
2037 if (freq == 0)
2038 freq = 1;
2039 FOR_BB_INSNS (bb, insn)
2041 if (!NONDEBUG_INSN_P (insn))
2042 continue;
2043 set = single_set (insn);
2044 if (set == NULL_RTX)
2045 continue;
2046 dst = SET_DEST (set);
2047 src = SET_SRC (set);
2048 if (! REG_P (dst) || ! REG_P (src))
2049 continue;
2050 dst_regno = REGNO (dst);
2051 src_regno = REGNO (src);
2052 if (dst_regno >= FIRST_PSEUDO_REGISTER
2053 && src_regno < FIRST_PSEUDO_REGISTER)
2055 hard_regno = src_regno;
2056 a = ira_curr_regno_allocno_map[dst_regno];
2057 to_p = true;
2059 else if (src_regno >= FIRST_PSEUDO_REGISTER
2060 && dst_regno < FIRST_PSEUDO_REGISTER)
2062 hard_regno = dst_regno;
2063 a = ira_curr_regno_allocno_map[src_regno];
2064 to_p = false;
2066 else
2067 continue;
2068 rclass = ALLOCNO_CLASS (a);
2069 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2070 continue;
2071 i = ira_class_hard_reg_index[rclass][hard_regno];
2072 if (i < 0)
2073 continue;
2074 a_regno = ALLOCNO_REGNO (a);
2075 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2076 curr_loop_tree_node != NULL;
2077 curr_loop_tree_node = curr_loop_tree_node->parent)
2078 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2079 ira_add_allocno_pref (curr_a, hard_regno, freq);
2081 int cost;
2082 enum reg_class hard_reg_class;
2083 machine_mode mode;
2085 mode = ALLOCNO_MODE (a);
2086 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2087 ira_init_register_move_cost_if_necessary (mode);
2088 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2089 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2090 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2091 ALLOCNO_CLASS_COST (a));
2092 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2093 rclass, 0);
2094 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2095 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2096 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2097 ALLOCNO_HARD_REG_COSTS (a)[i]);
2102 /* After we find hard register and memory costs for allocnos, define
2103 its class and modify hard register cost because insns moving
2104 allocno to/from hard registers. */
2105 static void
2106 setup_allocno_class_and_costs (void)
2108 int i, j, n, regno, hard_regno, num;
2109 int *reg_costs;
2110 enum reg_class aclass, rclass;
2111 ira_allocno_t a;
2112 ira_allocno_iterator ai;
2113 cost_classes_t cost_classes_ptr;
2115 ira_assert (allocno_p);
2116 FOR_EACH_ALLOCNO (a, ai)
2118 i = ALLOCNO_NUM (a);
2119 regno = ALLOCNO_REGNO (a);
2120 aclass = regno_aclass[regno];
2121 cost_classes_ptr = regno_cost_classes[regno];
2122 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2123 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2124 ira_set_allocno_class (a, aclass);
2125 if (aclass == NO_REGS)
2126 continue;
2127 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2129 n = ira_class_hard_regs_num[aclass];
2130 ALLOCNO_HARD_REG_COSTS (a)
2131 = reg_costs = ira_allocate_cost_vector (aclass);
2132 for (j = n - 1; j >= 0; j--)
2134 hard_regno = ira_class_hard_regs[aclass][j];
2135 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2136 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2137 else
2139 rclass = REGNO_REG_CLASS (hard_regno);
2140 num = cost_classes_ptr->index[rclass];
2141 if (num < 0)
2143 num = cost_classes_ptr->hard_regno_index[hard_regno];
2144 ira_assert (num >= 0);
2146 reg_costs[j] = COSTS (costs, i)->cost[num];
2151 if (optimize)
2152 ira_traverse_loop_tree (true, ira_loop_tree_root,
2153 process_bb_node_for_hard_reg_moves, NULL);
2158 /* Function called once during compiler work. */
2159 void
2160 ira_init_costs_once (void)
2162 int i;
2164 init_cost = NULL;
2165 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2167 op_costs[i] = NULL;
2168 this_op_costs[i] = NULL;
2170 temp_costs = NULL;
2173 /* Free allocated temporary cost vectors. */
2174 void
2175 target_ira_int::free_ira_costs ()
2177 int i;
2179 free (x_init_cost);
2180 x_init_cost = NULL;
2181 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2183 free (x_op_costs[i]);
2184 free (x_this_op_costs[i]);
2185 x_op_costs[i] = x_this_op_costs[i] = NULL;
2187 free (x_temp_costs);
2188 x_temp_costs = NULL;
2191 /* This is called each time register related information is
2192 changed. */
2193 void
2194 ira_init_costs (void)
2196 int i;
2198 this_target_ira_int->free_ira_costs ();
2199 max_struct_costs_size
2200 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2201 /* Don't use ira_allocate because vectors live through several IRA
2202 calls. */
2203 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2204 init_cost->mem_cost = 1000000;
2205 for (i = 0; i < ira_important_classes_num; i++)
2206 init_cost->cost[i] = 1000000;
2207 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2209 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2210 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2212 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2217 /* Common initialization function for ira_costs and
2218 ira_set_pseudo_classes. */
2219 static void
2220 init_costs (void)
2222 init_subregs_of_mode ();
2223 costs = (struct costs *) ira_allocate (max_struct_costs_size
2224 * cost_elements_num);
2225 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2226 * cost_elements_num);
2227 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2228 * max_reg_num ());
2229 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2230 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2233 /* Common finalization function for ira_costs and
2234 ira_set_pseudo_classes. */
2235 static void
2236 finish_costs (void)
2238 finish_subregs_of_mode ();
2239 ira_free (regno_equiv_gains);
2240 ira_free (regno_aclass);
2241 ira_free (pref_buffer);
2242 ira_free (costs);
2245 /* Entry function which defines register class, memory and hard
2246 register costs for each allocno. */
2247 void
2248 ira_costs (void)
2250 allocno_p = true;
2251 cost_elements_num = ira_allocnos_num;
2252 init_costs ();
2253 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2254 * ira_allocnos_num);
2255 initiate_regno_cost_classes ();
2256 calculate_elim_costs_all_insns ();
2257 find_costs_and_classes (ira_dump_file);
2258 setup_allocno_class_and_costs ();
2259 finish_regno_cost_classes ();
2260 finish_costs ();
2261 ira_free (total_allocno_costs);
2264 /* Entry function which defines classes for pseudos.
2265 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2266 void
2267 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2269 allocno_p = false;
2270 internal_flag_ira_verbose = flag_ira_verbose;
2271 cost_elements_num = max_reg_num ();
2272 init_costs ();
2273 initiate_regno_cost_classes ();
2274 find_costs_and_classes (dump_file);
2275 finish_regno_cost_classes ();
2276 if (define_pseudo_classes)
2277 pseudo_classes_defined_p = true;
2279 finish_costs ();
2284 /* Change hard register costs for allocnos which lives through
2285 function calls. This is called only when we found all intersected
2286 calls during building allocno live ranges. */
2287 void
2288 ira_tune_allocno_costs (void)
2290 int j, n, regno;
2291 int cost, min_cost, *reg_costs;
2292 enum reg_class aclass, rclass;
2293 machine_mode mode;
2294 ira_allocno_t a;
2295 ira_allocno_iterator ai;
2296 ira_allocno_object_iterator oi;
2297 ira_object_t obj;
2298 bool skip_p;
2299 HARD_REG_SET *crossed_calls_clobber_regs;
2301 FOR_EACH_ALLOCNO (a, ai)
2303 aclass = ALLOCNO_CLASS (a);
2304 if (aclass == NO_REGS)
2305 continue;
2306 mode = ALLOCNO_MODE (a);
2307 n = ira_class_hard_regs_num[aclass];
2308 min_cost = INT_MAX;
2309 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2310 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2312 ira_allocate_and_set_costs
2313 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2314 ALLOCNO_CLASS_COST (a));
2315 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2316 for (j = n - 1; j >= 0; j--)
2318 regno = ira_class_hard_regs[aclass][j];
2319 skip_p = false;
2320 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2322 if (ira_hard_reg_set_intersection_p (regno, mode,
2323 OBJECT_CONFLICT_HARD_REGS
2324 (obj)))
2326 skip_p = true;
2327 break;
2330 if (skip_p)
2331 continue;
2332 rclass = REGNO_REG_CLASS (regno);
2333 cost = 0;
2334 crossed_calls_clobber_regs
2335 = &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2336 if (ira_hard_reg_set_intersection_p (regno, mode,
2337 *crossed_calls_clobber_regs)
2338 && (ira_hard_reg_set_intersection_p (regno, mode,
2339 call_used_reg_set)
2340 || targetm.hard_regno_call_part_clobbered (regno,
2341 mode)))
2342 cost += (ALLOCNO_CALL_FREQ (a)
2343 * (ira_memory_move_cost[mode][rclass][0]
2344 + ira_memory_move_cost[mode][rclass][1]));
2345 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2346 cost += ((ira_memory_move_cost[mode][rclass][0]
2347 + ira_memory_move_cost[mode][rclass][1])
2348 * ALLOCNO_FREQ (a)
2349 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2350 #endif
2351 if (INT_MAX - cost < reg_costs[j])
2352 reg_costs[j] = INT_MAX;
2353 else
2354 reg_costs[j] += cost;
2355 if (min_cost > reg_costs[j])
2356 min_cost = reg_costs[j];
2359 if (min_cost != INT_MAX)
2360 ALLOCNO_CLASS_COST (a) = min_cost;
2362 /* Some targets allow pseudos to be allocated to unaligned sequences
2363 of hard registers. However, selecting an unaligned sequence can
2364 unnecessarily restrict later allocations. So increase the cost of
2365 unaligned hard regs to encourage the use of aligned hard regs. */
2367 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2369 if (nregs > 1)
2371 ira_allocate_and_set_costs
2372 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2373 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2374 for (j = n - 1; j >= 0; j--)
2376 regno = ira_non_ordered_class_hard_regs[aclass][j];
2377 if ((regno % nregs) != 0)
2379 int index = ira_class_hard_reg_index[aclass][regno];
2380 ira_assert (index != -1);
2381 reg_costs[index] += ALLOCNO_FREQ (a);
2389 /* Add COST to the estimated gain for eliminating REGNO with its
2390 equivalence. If COST is zero, record that no such elimination is
2391 possible. */
2393 void
2394 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2396 if (cost == 0)
2397 regno_equiv_gains[regno] = 0;
2398 else
2399 regno_equiv_gains[regno] += cost;
2402 void
2403 ira_costs_c_finalize (void)
2405 this_target_ira_int->free_ira_costs ();