Use gfc_add_*_component defines where appropriate
[official-gcc.git] / gcc / ira-costs.c
blobf3d31e178afd09887f13164eff53100e4a99c656
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2016 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "tm_p.h"
30 #include "insn-config.h"
31 #include "regs.h"
32 #include "ira.h"
33 #include "ira-int.h"
34 #include "addresses.h"
35 #include "reload.h"
37 /* The flags is set up every time when we calculate pseudo register
38 classes through function ira_set_pseudo_classes. */
39 static bool pseudo_classes_defined_p = false;
41 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
42 static bool allocno_p;
44 /* Number of elements in array `costs'. */
45 static int cost_elements_num;
47 /* The `costs' struct records the cost of using hard registers of each
48 class considered for the calculation and of using memory for each
49 allocno or pseudo. */
50 struct costs
52 int mem_cost;
53 /* Costs for register classes start here. We process only some
54 allocno classes. */
55 int cost[1];
58 #define max_struct_costs_size \
59 (this_target_ira_int->x_max_struct_costs_size)
60 #define init_cost \
61 (this_target_ira_int->x_init_cost)
62 #define temp_costs \
63 (this_target_ira_int->x_temp_costs)
64 #define op_costs \
65 (this_target_ira_int->x_op_costs)
66 #define this_op_costs \
67 (this_target_ira_int->x_this_op_costs)
69 /* Costs of each class for each allocno or pseudo. */
70 static struct costs *costs;
72 /* Accumulated costs of each class for each allocno. */
73 static struct costs *total_allocno_costs;
75 /* It is the current size of struct costs. */
76 static int struct_costs_size;
78 /* Return pointer to structure containing costs of allocno or pseudo
79 with given NUM in array ARR. */
80 #define COSTS(arr, num) \
81 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
83 /* Return index in COSTS when processing reg with REGNO. */
84 #define COST_INDEX(regno) (allocno_p \
85 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
86 : (int) regno)
88 /* Record register class preferences of each allocno or pseudo. Null
89 value means no preferences. It happens on the 1st iteration of the
90 cost calculation. */
91 static enum reg_class *pref;
93 /* Allocated buffers for pref. */
94 static enum reg_class *pref_buffer;
96 /* Record allocno class of each allocno with the same regno. */
97 static enum reg_class *regno_aclass;
99 /* Record cost gains for not allocating a register with an invariant
100 equivalence. */
101 static int *regno_equiv_gains;
103 /* Execution frequency of the current insn. */
104 static int frequency;
108 /* Info about reg classes whose costs are calculated for a pseudo. */
109 struct cost_classes
111 /* Number of the cost classes in the subsequent array. */
112 int num;
113 /* Container of the cost classes. */
114 enum reg_class classes[N_REG_CLASSES];
115 /* Map reg class -> index of the reg class in the previous array.
116 -1 if it is not a cost class. */
117 int index[N_REG_CLASSES];
118 /* Map hard regno index of first class in array CLASSES containing
119 the hard regno, -1 otherwise. */
120 int hard_regno_index[FIRST_PSEUDO_REGISTER];
123 /* Types of pointers to the structure above. */
124 typedef struct cost_classes *cost_classes_t;
125 typedef const struct cost_classes *const_cost_classes_t;
127 /* Info about cost classes for each pseudo. */
128 static cost_classes_t *regno_cost_classes;
130 /* Helper for cost_classes hashing. */
132 struct cost_classes_hasher : pointer_hash <cost_classes>
134 static inline hashval_t hash (const cost_classes *);
135 static inline bool equal (const cost_classes *, const cost_classes *);
136 static inline void remove (cost_classes *);
139 /* Returns hash value for cost classes info HV. */
140 inline hashval_t
141 cost_classes_hasher::hash (const cost_classes *hv)
143 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
146 /* Compares cost classes info HV1 and HV2. */
147 inline bool
148 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
150 return (hv1->num == hv2->num
151 && memcmp (hv1->classes, hv2->classes,
152 sizeof (enum reg_class) * hv1->num) == 0);
155 /* Delete cost classes info V from the hash table. */
156 inline void
157 cost_classes_hasher::remove (cost_classes *v)
159 ira_free (v);
162 /* Hash table of unique cost classes. */
163 static hash_table<cost_classes_hasher> *cost_classes_htab;
165 /* Map allocno class -> cost classes for pseudo of given allocno
166 class. */
167 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
169 /* Map mode -> cost classes for pseudo of give mode. */
170 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
172 /* Cost classes that include all classes in ira_important_classes. */
173 static cost_classes all_cost_classes;
175 /* Use the array of classes in CLASSES_PTR to fill out the rest of
176 the structure. */
177 static void
178 complete_cost_classes (cost_classes_t classes_ptr)
180 for (int i = 0; i < N_REG_CLASSES; i++)
181 classes_ptr->index[i] = -1;
182 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
183 classes_ptr->hard_regno_index[i] = -1;
184 for (int i = 0; i < classes_ptr->num; i++)
186 enum reg_class cl = classes_ptr->classes[i];
187 classes_ptr->index[cl] = i;
188 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
190 unsigned int hard_regno = ira_class_hard_regs[cl][j];
191 if (classes_ptr->hard_regno_index[hard_regno] < 0)
192 classes_ptr->hard_regno_index[hard_regno] = i;
197 /* Initialize info about the cost classes for each pseudo. */
198 static void
199 initiate_regno_cost_classes (void)
201 int size = sizeof (cost_classes_t) * max_reg_num ();
203 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
204 memset (regno_cost_classes, 0, size);
205 memset (cost_classes_aclass_cache, 0,
206 sizeof (cost_classes_t) * N_REG_CLASSES);
207 memset (cost_classes_mode_cache, 0,
208 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
209 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
210 all_cost_classes.num = ira_important_classes_num;
211 for (int i = 0; i < ira_important_classes_num; i++)
212 all_cost_classes.classes[i] = ira_important_classes[i];
213 complete_cost_classes (&all_cost_classes);
216 /* Create new cost classes from cost classes FROM and set up members
217 index and hard_regno_index. Return the new classes. The function
218 implements some common code of two functions
219 setup_regno_cost_classes_by_aclass and
220 setup_regno_cost_classes_by_mode. */
221 static cost_classes_t
222 setup_cost_classes (cost_classes_t from)
224 cost_classes_t classes_ptr;
226 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
227 classes_ptr->num = from->num;
228 for (int i = 0; i < from->num; i++)
229 classes_ptr->classes[i] = from->classes[i];
230 complete_cost_classes (classes_ptr);
231 return classes_ptr;
234 /* Return a version of FULL that only considers registers in REGS that are
235 valid for mode MODE. Both FULL and the returned class are globally
236 allocated. */
237 static cost_classes_t
238 restrict_cost_classes (cost_classes_t full, machine_mode mode,
239 const HARD_REG_SET &regs)
241 static struct cost_classes narrow;
242 int map[N_REG_CLASSES];
243 narrow.num = 0;
244 for (int i = 0; i < full->num; i++)
246 /* Assume that we'll drop the class. */
247 map[i] = -1;
249 /* Ignore classes that are too small for the mode. */
250 enum reg_class cl = full->classes[i];
251 if (!contains_reg_of_mode[cl][mode])
252 continue;
254 /* Calculate the set of registers in CL that belong to REGS and
255 are valid for MODE. */
256 HARD_REG_SET valid_for_cl;
257 COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
258 AND_HARD_REG_SET (valid_for_cl, regs);
259 AND_COMPL_HARD_REG_SET (valid_for_cl,
260 ira_prohibited_class_mode_regs[cl][mode]);
261 AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
262 if (hard_reg_set_empty_p (valid_for_cl))
263 continue;
265 /* Don't use this class if the set of valid registers is a subset
266 of an existing class. For example, suppose we have two classes
267 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
268 that the mode changes allowed by FR_REGS are not as general as
269 the mode changes allowed by GR_REGS.
271 In this situation, the mode changes for GR_AND_FR_REGS could
272 either be seen as the union or the intersection of the mode
273 changes allowed by the two subclasses. The justification for
274 the union-based definition would be that, if you want a mode
275 change that's only allowed by GR_REGS, you can pick a register
276 from the GR_REGS subclass. The justification for the
277 intersection-based definition would be that every register
278 from the class would allow the mode change.
280 However, if we have a register that needs to be in GR_REGS,
281 using GR_AND_FR_REGS with the intersection-based definition
282 would be too pessimistic, since it would bring in restrictions
283 that only apply to FR_REGS. Conversely, if we have a register
284 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
285 union-based definition would lose the extra restrictions
286 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
287 for cases where GR_REGS and FP_REGS are both valid. */
288 int pos;
289 for (pos = 0; pos < narrow.num; ++pos)
291 enum reg_class cl2 = narrow.classes[pos];
292 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
293 break;
295 map[i] = pos;
296 if (pos == narrow.num)
298 /* If several classes are equivalent, prefer to use the one
299 that was chosen as the allocno class. */
300 enum reg_class cl2 = ira_allocno_class_translate[cl];
301 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
302 cl = cl2;
303 narrow.classes[narrow.num++] = cl;
306 if (narrow.num == full->num)
307 return full;
309 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
310 if (*slot == NULL)
312 cost_classes_t classes = setup_cost_classes (&narrow);
313 /* Map equivalent classes to the representative that we chose above. */
314 for (int i = 0; i < ira_important_classes_num; i++)
316 enum reg_class cl = ira_important_classes[i];
317 int index = full->index[cl];
318 if (index >= 0)
319 classes->index[cl] = map[index];
321 *slot = classes;
323 return *slot;
326 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
327 This function is used when we know an initial approximation of
328 allocno class of the pseudo already, e.g. on the second iteration
329 of class cost calculation or after class cost calculation in
330 register-pressure sensitive insn scheduling or register-pressure
331 sensitive loop-invariant motion. */
332 static void
333 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
335 static struct cost_classes classes;
336 cost_classes_t classes_ptr;
337 enum reg_class cl;
338 int i;
339 cost_classes **slot;
340 HARD_REG_SET temp, temp2;
341 bool exclude_p;
343 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
345 COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
346 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
347 /* We exclude classes from consideration which are subsets of
348 ACLASS only if ACLASS is an uniform class. */
349 exclude_p = ira_uniform_class_p[aclass];
350 classes.num = 0;
351 for (i = 0; i < ira_important_classes_num; i++)
353 cl = ira_important_classes[i];
354 if (exclude_p)
356 /* Exclude non-uniform classes which are subsets of
357 ACLASS. */
358 COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
359 AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
360 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
361 continue;
363 classes.classes[classes.num++] = cl;
365 slot = cost_classes_htab->find_slot (&classes, INSERT);
366 if (*slot == NULL)
368 classes_ptr = setup_cost_classes (&classes);
369 *slot = classes_ptr;
371 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
373 if (regno_reg_rtx[regno] != NULL_RTX)
375 /* Restrict the classes to those that are valid for REGNO's mode
376 (which might for example exclude singleton classes if the mode
377 requires two registers). Also restrict the classes to those that
378 are valid for subregs of REGNO. */
379 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
380 if (!valid_regs)
381 valid_regs = &reg_class_contents[ALL_REGS];
382 classes_ptr = restrict_cost_classes (classes_ptr,
383 PSEUDO_REGNO_MODE (regno),
384 *valid_regs);
386 regno_cost_classes[regno] = classes_ptr;
389 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
390 decrease number of cost classes for the pseudo, if hard registers
391 of some important classes can not hold a value of MODE. So the
392 pseudo can not get hard register of some important classes and cost
393 calculation for such important classes is only wasting CPU
394 time. */
395 static void
396 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
398 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
399 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
400 mode, *valid_regs);
401 else
403 if (cost_classes_mode_cache[mode] == NULL)
404 cost_classes_mode_cache[mode]
405 = restrict_cost_classes (&all_cost_classes, mode,
406 reg_class_contents[ALL_REGS]);
407 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
411 /* Finalize info about the cost classes for each pseudo. */
412 static void
413 finish_regno_cost_classes (void)
415 ira_free (regno_cost_classes);
416 delete cost_classes_htab;
417 cost_classes_htab = NULL;
422 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
423 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
424 be a pseudo register. */
425 static int
426 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
427 secondary_reload_info *prev_sri)
429 secondary_reload_info sri;
430 reg_class_t secondary_class = NO_REGS;
432 /* If X is a SCRATCH, there is actually nothing to move since we are
433 assuming optimal allocation. */
434 if (GET_CODE (x) == SCRATCH)
435 return 0;
437 /* Get the class we will actually use for a reload. */
438 rclass = targetm.preferred_reload_class (x, rclass);
440 /* If we need a secondary reload for an intermediate, the cost is
441 that to load the input into the intermediate register, then to
442 copy it. */
443 sri.prev_sri = prev_sri;
444 sri.extra_cost = 0;
445 /* PR 68770: Secondary reload might examine the t_icode field. */
446 sri.t_icode = CODE_FOR_nothing;
448 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
450 if (secondary_class != NO_REGS)
452 ira_init_register_move_cost_if_necessary (mode);
453 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
454 + sri.extra_cost
455 + copy_cost (x, mode, secondary_class, to_p, &sri));
458 /* For memory, use the memory move cost, for (hard) registers, use
459 the cost to move between the register classes, and use 2 for
460 everything else (constants). */
461 if (MEM_P (x) || rclass == NO_REGS)
462 return sri.extra_cost
463 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
464 else if (REG_P (x))
466 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
468 ira_init_register_move_cost_if_necessary (mode);
469 return (sri.extra_cost
470 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
472 else
473 /* If this is a constant, we may eventually want to call rtx_cost
474 here. */
475 return sri.extra_cost + COSTS_N_INSNS (1);
480 /* Record the cost of using memory or hard registers of various
481 classes for the operands in INSN.
483 N_ALTS is the number of alternatives.
484 N_OPS is the number of operands.
485 OPS is an array of the operands.
486 MODES are the modes of the operands, in case any are VOIDmode.
487 CONSTRAINTS are the constraints to use for the operands. This array
488 is modified by this procedure.
490 This procedure works alternative by alternative. For each
491 alternative we assume that we will be able to allocate all allocnos
492 to their ideal register class and calculate the cost of using that
493 alternative. Then we compute, for each operand that is a
494 pseudo-register, the cost of having the allocno allocated to each
495 register class and using it in that alternative. To this cost is
496 added the cost of the alternative.
498 The cost of each class for this insn is its lowest cost among all
499 the alternatives. */
500 static void
501 record_reg_classes (int n_alts, int n_ops, rtx *ops,
502 machine_mode *modes, const char **constraints,
503 rtx_insn *insn, enum reg_class *pref)
505 int alt;
506 int i, j, k;
507 int insn_allows_mem[MAX_RECOG_OPERANDS];
508 move_table *move_in_cost, *move_out_cost;
509 short (*mem_cost)[2];
511 for (i = 0; i < n_ops; i++)
512 insn_allows_mem[i] = 0;
514 /* Process each alternative, each time minimizing an operand's cost
515 with the cost for each operand in that alternative. */
516 alternative_mask preferred = get_preferred_alternatives (insn);
517 for (alt = 0; alt < n_alts; alt++)
519 enum reg_class classes[MAX_RECOG_OPERANDS];
520 int allows_mem[MAX_RECOG_OPERANDS];
521 enum reg_class rclass;
522 int alt_fail = 0;
523 int alt_cost = 0, op_cost_add;
525 if (!TEST_BIT (preferred, alt))
527 for (i = 0; i < recog_data.n_operands; i++)
528 constraints[i] = skip_alternative (constraints[i]);
530 continue;
533 for (i = 0; i < n_ops; i++)
535 unsigned char c;
536 const char *p = constraints[i];
537 rtx op = ops[i];
538 machine_mode mode = modes[i];
539 int allows_addr = 0;
540 int win = 0;
542 /* Initially show we know nothing about the register class. */
543 classes[i] = NO_REGS;
544 allows_mem[i] = 0;
546 /* If this operand has no constraints at all, we can
547 conclude nothing about it since anything is valid. */
548 if (*p == 0)
550 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
551 memset (this_op_costs[i], 0, struct_costs_size);
552 continue;
555 /* If this alternative is only relevant when this operand
556 matches a previous operand, we do different things
557 depending on whether this operand is a allocno-reg or not.
558 We must process any modifiers for the operand before we
559 can make this test. */
560 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
561 p++;
563 if (p[0] >= '0' && p[0] <= '0' + i)
565 /* Copy class and whether memory is allowed from the
566 matching alternative. Then perform any needed cost
567 computations and/or adjustments. */
568 j = p[0] - '0';
569 classes[i] = classes[j];
570 allows_mem[i] = allows_mem[j];
571 if (allows_mem[i])
572 insn_allows_mem[i] = 1;
574 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
576 /* If this matches the other operand, we have no
577 added cost and we win. */
578 if (rtx_equal_p (ops[j], op))
579 win = 1;
580 /* If we can put the other operand into a register,
581 add to the cost of this alternative the cost to
582 copy this operand to the register used for the
583 other operand. */
584 else if (classes[j] != NO_REGS)
586 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
587 win = 1;
590 else if (! REG_P (ops[j])
591 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
593 /* This op is an allocno but the one it matches is
594 not. */
596 /* If we can't put the other operand into a
597 register, this alternative can't be used. */
599 if (classes[j] == NO_REGS)
600 alt_fail = 1;
601 /* Otherwise, add to the cost of this alternative
602 the cost to copy the other operand to the hard
603 register used for this operand. */
604 else
605 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
607 else
609 /* The costs of this operand are not the same as the
610 other operand since move costs are not symmetric.
611 Moreover, if we cannot tie them, this alternative
612 needs to do a copy, which is one insn. */
613 struct costs *pp = this_op_costs[i];
614 int *pp_costs = pp->cost;
615 cost_classes_t cost_classes_ptr
616 = regno_cost_classes[REGNO (op)];
617 enum reg_class *cost_classes = cost_classes_ptr->classes;
618 bool in_p = recog_data.operand_type[i] != OP_OUT;
619 bool out_p = recog_data.operand_type[i] != OP_IN;
620 enum reg_class op_class = classes[i];
622 ira_init_register_move_cost_if_necessary (mode);
623 if (! in_p)
625 ira_assert (out_p);
626 if (op_class == NO_REGS)
628 mem_cost = ira_memory_move_cost[mode];
629 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
631 rclass = cost_classes[k];
632 pp_costs[k] = mem_cost[rclass][0] * frequency;
635 else
637 move_out_cost = ira_may_move_out_cost[mode];
638 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
640 rclass = cost_classes[k];
641 pp_costs[k]
642 = move_out_cost[op_class][rclass] * frequency;
646 else if (! out_p)
648 ira_assert (in_p);
649 if (op_class == NO_REGS)
651 mem_cost = ira_memory_move_cost[mode];
652 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
654 rclass = cost_classes[k];
655 pp_costs[k] = mem_cost[rclass][1] * frequency;
658 else
660 move_in_cost = ira_may_move_in_cost[mode];
661 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
663 rclass = cost_classes[k];
664 pp_costs[k]
665 = move_in_cost[rclass][op_class] * frequency;
669 else
671 if (op_class == NO_REGS)
673 mem_cost = ira_memory_move_cost[mode];
674 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
676 rclass = cost_classes[k];
677 pp_costs[k] = ((mem_cost[rclass][0]
678 + mem_cost[rclass][1])
679 * frequency);
682 else
684 move_in_cost = ira_may_move_in_cost[mode];
685 move_out_cost = ira_may_move_out_cost[mode];
686 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
688 rclass = cost_classes[k];
689 pp_costs[k] = ((move_in_cost[rclass][op_class]
690 + move_out_cost[op_class][rclass])
691 * frequency);
696 /* If the alternative actually allows memory, make
697 things a bit cheaper since we won't need an extra
698 insn to load it. */
699 pp->mem_cost
700 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
701 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
702 - allows_mem[i]) * frequency;
704 /* If we have assigned a class to this allocno in
705 our first pass, add a cost to this alternative
706 corresponding to what we would add if this
707 allocno were not in the appropriate class. */
708 if (pref)
710 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
712 if (pref_class == NO_REGS)
713 alt_cost
714 += ((out_p
715 ? ira_memory_move_cost[mode][op_class][0] : 0)
716 + (in_p
717 ? ira_memory_move_cost[mode][op_class][1]
718 : 0));
719 else if (ira_reg_class_intersect
720 [pref_class][op_class] == NO_REGS)
721 alt_cost
722 += ira_register_move_cost[mode][pref_class][op_class];
724 if (REGNO (ops[i]) != REGNO (ops[j])
725 && ! find_reg_note (insn, REG_DEAD, op))
726 alt_cost += 2;
728 p++;
732 /* Scan all the constraint letters. See if the operand
733 matches any of the constraints. Collect the valid
734 register classes and see if this operand accepts
735 memory. */
736 while ((c = *p))
738 switch (c)
740 case '*':
741 /* Ignore the next letter for this pass. */
742 c = *++p;
743 break;
745 case '^':
746 alt_cost += 2;
747 break;
749 case '?':
750 alt_cost += 2;
751 break;
753 case 'g':
754 if (MEM_P (op)
755 || (CONSTANT_P (op)
756 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
757 win = 1;
758 insn_allows_mem[i] = allows_mem[i] = 1;
759 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
760 break;
762 default:
763 enum constraint_num cn = lookup_constraint (p);
764 enum reg_class cl;
765 switch (get_constraint_type (cn))
767 case CT_REGISTER:
768 cl = reg_class_for_constraint (cn);
769 if (cl != NO_REGS)
770 classes[i] = ira_reg_class_subunion[classes[i]][cl];
771 break;
773 case CT_CONST_INT:
774 if (CONST_INT_P (op)
775 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
776 win = 1;
777 break;
779 case CT_MEMORY:
780 /* Every MEM can be reloaded to fit. */
781 insn_allows_mem[i] = allows_mem[i] = 1;
782 if (MEM_P (op))
783 win = 1;
784 break;
786 case CT_SPECIAL_MEMORY:
787 insn_allows_mem[i] = allows_mem[i] = 1;
788 if (MEM_P (op) && constraint_satisfied_p (op, cn))
789 win = 1;
790 break;
792 case CT_ADDRESS:
793 /* Every address can be reloaded to fit. */
794 allows_addr = 1;
795 if (address_operand (op, GET_MODE (op))
796 || constraint_satisfied_p (op, cn))
797 win = 1;
798 /* We know this operand is an address, so we
799 want it to be allocated to a hard register
800 that can be the base of an address,
801 i.e. BASE_REG_CLASS. */
802 classes[i]
803 = ira_reg_class_subunion[classes[i]]
804 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
805 ADDRESS, SCRATCH)];
806 break;
808 case CT_FIXED_FORM:
809 if (constraint_satisfied_p (op, cn))
810 win = 1;
811 break;
813 break;
815 p += CONSTRAINT_LEN (c, p);
816 if (c == ',')
817 break;
820 constraints[i] = p;
822 /* How we account for this operand now depends on whether it
823 is a pseudo register or not. If it is, we first check if
824 any register classes are valid. If not, we ignore this
825 alternative, since we want to assume that all allocnos get
826 allocated for register preferencing. If some register
827 class is valid, compute the costs of moving the allocno
828 into that class. */
829 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
831 if (classes[i] == NO_REGS && ! allows_mem[i])
833 /* We must always fail if the operand is a REG, but
834 we did not find a suitable class and memory is
835 not allowed.
837 Otherwise we may perform an uninitialized read
838 from this_op_costs after the `continue' statement
839 below. */
840 alt_fail = 1;
842 else
844 unsigned int regno = REGNO (op);
845 struct costs *pp = this_op_costs[i];
846 int *pp_costs = pp->cost;
847 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
848 enum reg_class *cost_classes = cost_classes_ptr->classes;
849 bool in_p = recog_data.operand_type[i] != OP_OUT;
850 bool out_p = recog_data.operand_type[i] != OP_IN;
851 enum reg_class op_class = classes[i];
853 ira_init_register_move_cost_if_necessary (mode);
854 if (! in_p)
856 ira_assert (out_p);
857 if (op_class == NO_REGS)
859 mem_cost = ira_memory_move_cost[mode];
860 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
862 rclass = cost_classes[k];
863 pp_costs[k] = mem_cost[rclass][0] * frequency;
866 else
868 move_out_cost = ira_may_move_out_cost[mode];
869 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
871 rclass = cost_classes[k];
872 pp_costs[k]
873 = move_out_cost[op_class][rclass] * frequency;
877 else if (! out_p)
879 ira_assert (in_p);
880 if (op_class == NO_REGS)
882 mem_cost = ira_memory_move_cost[mode];
883 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
885 rclass = cost_classes[k];
886 pp_costs[k] = mem_cost[rclass][1] * frequency;
889 else
891 move_in_cost = ira_may_move_in_cost[mode];
892 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
894 rclass = cost_classes[k];
895 pp_costs[k]
896 = move_in_cost[rclass][op_class] * frequency;
900 else
902 if (op_class == NO_REGS)
904 mem_cost = ira_memory_move_cost[mode];
905 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
907 rclass = cost_classes[k];
908 pp_costs[k] = ((mem_cost[rclass][0]
909 + mem_cost[rclass][1])
910 * frequency);
913 else
915 move_in_cost = ira_may_move_in_cost[mode];
916 move_out_cost = ira_may_move_out_cost[mode];
917 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
919 rclass = cost_classes[k];
920 pp_costs[k] = ((move_in_cost[rclass][op_class]
921 + move_out_cost[op_class][rclass])
922 * frequency);
927 if (op_class == NO_REGS)
928 /* Although we don't need insn to reload from
929 memory, still accessing memory is usually more
930 expensive than a register. */
931 pp->mem_cost = frequency;
932 else
933 /* If the alternative actually allows memory, make
934 things a bit cheaper since we won't need an
935 extra insn to load it. */
936 pp->mem_cost
937 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
938 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
939 - allows_mem[i]) * frequency;
940 /* If we have assigned a class to this allocno in
941 our first pass, add a cost to this alternative
942 corresponding to what we would add if this
943 allocno were not in the appropriate class. */
944 if (pref)
946 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
948 if (pref_class == NO_REGS)
950 if (op_class != NO_REGS)
951 alt_cost
952 += ((out_p
953 ? ira_memory_move_cost[mode][op_class][0]
954 : 0)
955 + (in_p
956 ? ira_memory_move_cost[mode][op_class][1]
957 : 0));
959 else if (op_class == NO_REGS)
960 alt_cost
961 += ((out_p
962 ? ira_memory_move_cost[mode][pref_class][1]
963 : 0)
964 + (in_p
965 ? ira_memory_move_cost[mode][pref_class][0]
966 : 0));
967 else if (ira_reg_class_intersect[pref_class][op_class]
968 == NO_REGS)
969 alt_cost += (ira_register_move_cost
970 [mode][pref_class][op_class]);
975 /* Otherwise, if this alternative wins, either because we
976 have already determined that or if we have a hard
977 register of the proper class, there is no cost for this
978 alternative. */
979 else if (win || (REG_P (op)
980 && reg_fits_class_p (op, classes[i],
981 0, GET_MODE (op))))
984 /* If registers are valid, the cost of this alternative
985 includes copying the object to and/or from a
986 register. */
987 else if (classes[i] != NO_REGS)
989 if (recog_data.operand_type[i] != OP_OUT)
990 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
992 if (recog_data.operand_type[i] != OP_IN)
993 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
995 /* The only other way this alternative can be used is if
996 this is a constant that could be placed into memory. */
997 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
998 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
999 else
1000 alt_fail = 1;
1003 if (alt_fail)
1004 continue;
1006 op_cost_add = alt_cost * frequency;
1007 /* Finally, update the costs with the information we've
1008 calculated about this alternative. */
1009 for (i = 0; i < n_ops; i++)
1010 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1012 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1013 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1014 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1015 cost_classes_t cost_classes_ptr
1016 = regno_cost_classes[REGNO (ops[i])];
1018 pp->mem_cost = MIN (pp->mem_cost,
1019 (qq->mem_cost + op_cost_add) * scale);
1021 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1022 pp_costs[k]
1023 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1027 if (allocno_p)
1028 for (i = 0; i < n_ops; i++)
1030 ira_allocno_t a;
1031 rtx op = ops[i];
1033 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1034 continue;
1035 a = ira_curr_regno_allocno_map [REGNO (op)];
1036 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1037 ALLOCNO_BAD_SPILL_P (a) = true;
1044 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1045 static inline bool
1046 ok_for_index_p_nonstrict (rtx reg)
1048 unsigned regno = REGNO (reg);
1050 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1053 /* A version of regno_ok_for_base_p for use here, when all
1054 pseudo-registers should count as OK. Arguments as for
1055 regno_ok_for_base_p. */
1056 static inline bool
1057 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1058 enum rtx_code outer_code, enum rtx_code index_code)
1060 unsigned regno = REGNO (reg);
1062 if (regno >= FIRST_PSEUDO_REGISTER)
1063 return true;
1064 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1067 /* Record the pseudo registers we must reload into hard registers in a
1068 subexpression of a memory address, X.
1070 If CONTEXT is 0, we are looking at the base part of an address,
1071 otherwise we are looking at the index part.
1073 MODE and AS are the mode and address space of the memory reference;
1074 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1075 These four arguments are passed down to base_reg_class.
1077 SCALE is twice the amount to multiply the cost by (it is twice so
1078 we can represent half-cost adjustments). */
1079 static void
1080 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1081 int context, enum rtx_code outer_code,
1082 enum rtx_code index_code, int scale)
1084 enum rtx_code code = GET_CODE (x);
1085 enum reg_class rclass;
1087 if (context == 1)
1088 rclass = INDEX_REG_CLASS;
1089 else
1090 rclass = base_reg_class (mode, as, outer_code, index_code);
1092 switch (code)
1094 case CONST_INT:
1095 case CONST:
1096 case CC0:
1097 case PC:
1098 case SYMBOL_REF:
1099 case LABEL_REF:
1100 return;
1102 case PLUS:
1103 /* When we have an address that is a sum, we must determine
1104 whether registers are "base" or "index" regs. If there is a
1105 sum of two registers, we must choose one to be the "base".
1106 Luckily, we can use the REG_POINTER to make a good choice
1107 most of the time. We only need to do this on machines that
1108 can have two registers in an address and where the base and
1109 index register classes are different.
1111 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1112 but that seems bogus since it should only be set when we are
1113 sure the register is being used as a pointer. */
1115 rtx arg0 = XEXP (x, 0);
1116 rtx arg1 = XEXP (x, 1);
1117 enum rtx_code code0 = GET_CODE (arg0);
1118 enum rtx_code code1 = GET_CODE (arg1);
1120 /* Look inside subregs. */
1121 if (code0 == SUBREG)
1122 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1123 if (code1 == SUBREG)
1124 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1126 /* If this machine only allows one register per address, it
1127 must be in the first operand. */
1128 if (MAX_REGS_PER_ADDRESS == 1)
1129 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1131 /* If index and base registers are the same on this machine,
1132 just record registers in any non-constant operands. We
1133 assume here, as well as in the tests below, that all
1134 addresses are in canonical form. */
1135 else if (INDEX_REG_CLASS
1136 == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1138 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1139 if (! CONSTANT_P (arg1))
1140 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1143 /* If the second operand is a constant integer, it doesn't
1144 change what class the first operand must be. */
1145 else if (CONST_SCALAR_INT_P (arg1))
1146 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1147 /* If the second operand is a symbolic constant, the first
1148 operand must be an index register. */
1149 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1150 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1151 /* If both operands are registers but one is already a hard
1152 register of index or reg-base class, give the other the
1153 class that the hard register is not. */
1154 else if (code0 == REG && code1 == REG
1155 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1156 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1157 || ok_for_index_p_nonstrict (arg0)))
1158 record_address_regs (mode, as, arg1,
1159 ok_for_base_p_nonstrict (arg0, mode, as,
1160 PLUS, REG) ? 1 : 0,
1161 PLUS, REG, scale);
1162 else if (code0 == REG && code1 == REG
1163 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1164 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1165 || ok_for_index_p_nonstrict (arg1)))
1166 record_address_regs (mode, as, arg0,
1167 ok_for_base_p_nonstrict (arg1, mode, as,
1168 PLUS, REG) ? 1 : 0,
1169 PLUS, REG, scale);
1170 /* If one operand is known to be a pointer, it must be the
1171 base with the other operand the index. Likewise if the
1172 other operand is a MULT. */
1173 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1175 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1176 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1178 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1180 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1181 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1183 /* Otherwise, count equal chances that each might be a base or
1184 index register. This case should be rare. */
1185 else
1187 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1188 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1189 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1190 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1193 break;
1195 /* Double the importance of an allocno that is incremented or
1196 decremented, since it would take two extra insns if it ends
1197 up in the wrong place. */
1198 case POST_MODIFY:
1199 case PRE_MODIFY:
1200 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1201 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1202 if (REG_P (XEXP (XEXP (x, 1), 1)))
1203 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1204 2 * scale);
1205 break;
1207 case POST_INC:
1208 case PRE_INC:
1209 case POST_DEC:
1210 case PRE_DEC:
1211 /* Double the importance of an allocno that is incremented or
1212 decremented, since it would take two extra insns if it ends
1213 up in the wrong place. */
1214 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1215 break;
1217 case REG:
1219 struct costs *pp;
1220 int *pp_costs;
1221 enum reg_class i;
1222 int k, regno, add_cost;
1223 cost_classes_t cost_classes_ptr;
1224 enum reg_class *cost_classes;
1225 move_table *move_in_cost;
1227 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1228 break;
1230 regno = REGNO (x);
1231 if (allocno_p)
1232 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1233 pp = COSTS (costs, COST_INDEX (regno));
1234 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1235 if (INT_MAX - add_cost < pp->mem_cost)
1236 pp->mem_cost = INT_MAX;
1237 else
1238 pp->mem_cost += add_cost;
1239 cost_classes_ptr = regno_cost_classes[regno];
1240 cost_classes = cost_classes_ptr->classes;
1241 pp_costs = pp->cost;
1242 ira_init_register_move_cost_if_necessary (Pmode);
1243 move_in_cost = ira_may_move_in_cost[Pmode];
1244 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1246 i = cost_classes[k];
1247 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1248 if (INT_MAX - add_cost < pp_costs[k])
1249 pp_costs[k] = INT_MAX;
1250 else
1251 pp_costs[k] += add_cost;
1254 break;
1256 default:
1258 const char *fmt = GET_RTX_FORMAT (code);
1259 int i;
1260 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1261 if (fmt[i] == 'e')
1262 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1263 scale);
1270 /* Calculate the costs of insn operands. */
1271 static void
1272 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1274 const char *constraints[MAX_RECOG_OPERANDS];
1275 machine_mode modes[MAX_RECOG_OPERANDS];
1276 rtx ops[MAX_RECOG_OPERANDS];
1277 rtx set;
1278 int i;
1280 for (i = 0; i < recog_data.n_operands; i++)
1282 constraints[i] = recog_data.constraints[i];
1283 modes[i] = recog_data.operand_mode[i];
1286 /* If we get here, we are set up to record the costs of all the
1287 operands for this insn. Start by initializing the costs. Then
1288 handle any address registers. Finally record the desired classes
1289 for any allocnos, doing it twice if some pair of operands are
1290 commutative. */
1291 for (i = 0; i < recog_data.n_operands; i++)
1293 memcpy (op_costs[i], init_cost, struct_costs_size);
1295 ops[i] = recog_data.operand[i];
1296 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1297 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1299 if (MEM_P (recog_data.operand[i]))
1300 record_address_regs (GET_MODE (recog_data.operand[i]),
1301 MEM_ADDR_SPACE (recog_data.operand[i]),
1302 XEXP (recog_data.operand[i], 0),
1303 0, MEM, SCRATCH, frequency * 2);
1304 else if (constraints[i][0] == 'p'
1305 || (insn_extra_address_constraint
1306 (lookup_constraint (constraints[i]))))
1307 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1308 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1309 frequency * 2);
1312 /* Check for commutative in a separate loop so everything will have
1313 been initialized. We must do this even if one operand is a
1314 constant--see addsi3 in m68k.md. */
1315 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1316 if (constraints[i][0] == '%')
1318 const char *xconstraints[MAX_RECOG_OPERANDS];
1319 int j;
1321 /* Handle commutative operands by swapping the constraints.
1322 We assume the modes are the same. */
1323 for (j = 0; j < recog_data.n_operands; j++)
1324 xconstraints[j] = constraints[j];
1326 xconstraints[i] = constraints[i+1];
1327 xconstraints[i+1] = constraints[i];
1328 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1329 recog_data.operand, modes,
1330 xconstraints, insn, pref);
1332 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1333 recog_data.operand, modes,
1334 constraints, insn, pref);
1336 /* If this insn is a single set copying operand 1 to operand 0 and
1337 one operand is an allocno with the other a hard reg or an allocno
1338 that prefers a hard register that is in its own register class
1339 then we may want to adjust the cost of that register class to -1.
1341 Avoid the adjustment if the source does not die to avoid
1342 stressing of register allocator by preferencing two colliding
1343 registers into single class.
1345 Also avoid the adjustment if a copy between hard registers of the
1346 class is expensive (ten times the cost of a default copy is
1347 considered arbitrarily expensive). This avoids losing when the
1348 preferred class is very expensive as the source of a copy
1349 instruction. */
1350 if ((set = single_set (insn)) != NULL_RTX
1351 /* In rare cases the single set insn might have less 2 operands
1352 as the source can be a fixed special reg. */
1353 && recog_data.n_operands > 1
1354 && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set))
1356 int regno, other_regno;
1357 rtx dest = SET_DEST (set);
1358 rtx src = SET_SRC (set);
1360 if (GET_CODE (dest) == SUBREG
1361 && (GET_MODE_SIZE (GET_MODE (dest))
1362 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1363 dest = SUBREG_REG (dest);
1364 if (GET_CODE (src) == SUBREG
1365 && (GET_MODE_SIZE (GET_MODE (src))
1366 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1367 src = SUBREG_REG (src);
1368 if (REG_P (src) && REG_P (dest)
1369 && find_regno_note (insn, REG_DEAD, REGNO (src))
1370 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1371 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1372 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1373 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1375 machine_mode mode = GET_MODE (src);
1376 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1377 enum reg_class *cost_classes = cost_classes_ptr->classes;
1378 reg_class_t rclass;
1379 int k, nr;
1381 i = regno == (int) REGNO (src) ? 1 : 0;
1382 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1384 rclass = cost_classes[k];
1385 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1386 && (reg_class_size[(int) rclass]
1387 == ira_reg_class_max_nregs [(int) rclass][(int) mode]))
1389 if (reg_class_size[rclass] == 1)
1390 op_costs[i]->cost[k] = -frequency;
1391 else
1393 for (nr = 0;
1394 nr < hard_regno_nregs[other_regno][mode];
1395 nr++)
1396 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass],
1397 other_regno + nr))
1398 break;
1400 if (nr == hard_regno_nregs[other_regno][mode])
1401 op_costs[i]->cost[k] = -frequency;
1411 /* Process one insn INSN. Scan it and record each time it would save
1412 code to put a certain allocnos in a certain class. Return the last
1413 insn processed, so that the scan can be continued from there. */
1414 static rtx_insn *
1415 scan_one_insn (rtx_insn *insn)
1417 enum rtx_code pat_code;
1418 rtx set, note;
1419 int i, k;
1420 bool counted_mem;
1422 if (!NONDEBUG_INSN_P (insn))
1423 return insn;
1425 pat_code = GET_CODE (PATTERN (insn));
1426 if (pat_code == USE || pat_code == CLOBBER || pat_code == ASM_INPUT)
1427 return insn;
1429 counted_mem = false;
1430 set = single_set (insn);
1431 extract_insn (insn);
1433 /* If this insn loads a parameter from its stack slot, then it
1434 represents a savings, rather than a cost, if the parameter is
1435 stored in memory. Record this fact.
1437 Similarly if we're loading other constants from memory (constant
1438 pool, TOC references, small data areas, etc) and this is the only
1439 assignment to the destination pseudo.
1441 Don't do this if SET_SRC (set) isn't a general operand, if it is
1442 a memory requiring special instructions to load it, decreasing
1443 mem_cost might result in it being loaded using the specialized
1444 instruction into a register, then stored into stack and loaded
1445 again from the stack. See PR52208.
1447 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1448 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1449 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1450 && ((MEM_P (XEXP (note, 0))
1451 && !side_effects_p (SET_SRC (set)))
1452 || (CONSTANT_P (XEXP (note, 0))
1453 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1454 XEXP (note, 0))
1455 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1456 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set))))
1458 enum reg_class cl = GENERAL_REGS;
1459 rtx reg = SET_DEST (set);
1460 int num = COST_INDEX (REGNO (reg));
1462 COSTS (costs, num)->mem_cost
1463 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1464 record_address_regs (GET_MODE (SET_SRC (set)),
1465 MEM_ADDR_SPACE (SET_SRC (set)),
1466 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1467 frequency * 2);
1468 counted_mem = true;
1471 record_operand_costs (insn, pref);
1473 /* Now add the cost for each operand to the total costs for its
1474 allocno. */
1475 for (i = 0; i < recog_data.n_operands; i++)
1476 if (REG_P (recog_data.operand[i])
1477 && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER)
1479 int regno = REGNO (recog_data.operand[i]);
1480 struct costs *p = COSTS (costs, COST_INDEX (regno));
1481 struct costs *q = op_costs[i];
1482 int *p_costs = p->cost, *q_costs = q->cost;
1483 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1484 int add_cost;
1486 /* If the already accounted for the memory "cost" above, don't
1487 do so again. */
1488 if (!counted_mem)
1490 add_cost = q->mem_cost;
1491 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1492 p->mem_cost = INT_MAX;
1493 else
1494 p->mem_cost += add_cost;
1496 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1498 add_cost = q_costs[k];
1499 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1500 p_costs[k] = INT_MAX;
1501 else
1502 p_costs[k] += add_cost;
1506 return insn;
1511 /* Print allocnos costs to file F. */
1512 static void
1513 print_allocno_costs (FILE *f)
1515 int k;
1516 ira_allocno_t a;
1517 ira_allocno_iterator ai;
1519 ira_assert (allocno_p);
1520 fprintf (f, "\n");
1521 FOR_EACH_ALLOCNO (a, ai)
1523 int i, rclass;
1524 basic_block bb;
1525 int regno = ALLOCNO_REGNO (a);
1526 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1527 enum reg_class *cost_classes = cost_classes_ptr->classes;
1529 i = ALLOCNO_NUM (a);
1530 fprintf (f, " a%d(r%d,", i, regno);
1531 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1532 fprintf (f, "b%d", bb->index);
1533 else
1534 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1535 fprintf (f, ") costs:");
1536 for (k = 0; k < cost_classes_ptr->num; k++)
1538 rclass = cost_classes[k];
1539 fprintf (f, " %s:%d", reg_class_names[rclass],
1540 COSTS (costs, i)->cost[k]);
1541 if (flag_ira_region == IRA_REGION_ALL
1542 || flag_ira_region == IRA_REGION_MIXED)
1543 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1545 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1546 if (flag_ira_region == IRA_REGION_ALL
1547 || flag_ira_region == IRA_REGION_MIXED)
1548 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1549 fprintf (f, "\n");
1553 /* Print pseudo costs to file F. */
1554 static void
1555 print_pseudo_costs (FILE *f)
1557 int regno, k;
1558 int rclass;
1559 cost_classes_t cost_classes_ptr;
1560 enum reg_class *cost_classes;
1562 ira_assert (! allocno_p);
1563 fprintf (f, "\n");
1564 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1566 if (REG_N_REFS (regno) <= 0)
1567 continue;
1568 cost_classes_ptr = regno_cost_classes[regno];
1569 cost_classes = cost_classes_ptr->classes;
1570 fprintf (f, " r%d costs:", regno);
1571 for (k = 0; k < cost_classes_ptr->num; k++)
1573 rclass = cost_classes[k];
1574 fprintf (f, " %s:%d", reg_class_names[rclass],
1575 COSTS (costs, regno)->cost[k]);
1577 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1581 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1582 costs. */
1583 static void
1584 process_bb_for_costs (basic_block bb)
1586 rtx_insn *insn;
1588 frequency = REG_FREQ_FROM_BB (bb);
1589 if (frequency == 0)
1590 frequency = 1;
1591 FOR_BB_INSNS (bb, insn)
1592 insn = scan_one_insn (insn);
1595 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1596 costs. */
1597 static void
1598 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1600 basic_block bb;
1602 bb = loop_tree_node->bb;
1603 if (bb != NULL)
1604 process_bb_for_costs (bb);
1607 /* Find costs of register classes and memory for allocnos or pseudos
1608 and their best costs. Set up preferred, alternative and allocno
1609 classes for pseudos. */
1610 static void
1611 find_costs_and_classes (FILE *dump_file)
1613 int i, k, start, max_cost_classes_num;
1614 int pass;
1615 basic_block bb;
1616 enum reg_class *regno_best_class, new_class;
1618 init_recog ();
1619 regno_best_class
1620 = (enum reg_class *) ira_allocate (max_reg_num ()
1621 * sizeof (enum reg_class));
1622 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1623 regno_best_class[i] = NO_REGS;
1624 if (!resize_reg_info () && allocno_p
1625 && pseudo_classes_defined_p && flag_expensive_optimizations)
1627 ira_allocno_t a;
1628 ira_allocno_iterator ai;
1630 pref = pref_buffer;
1631 max_cost_classes_num = 1;
1632 FOR_EACH_ALLOCNO (a, ai)
1634 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1635 setup_regno_cost_classes_by_aclass
1636 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1637 max_cost_classes_num
1638 = MAX (max_cost_classes_num,
1639 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1641 start = 1;
1643 else
1645 pref = NULL;
1646 max_cost_classes_num = ira_important_classes_num;
1647 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1648 if (regno_reg_rtx[i] != NULL_RTX)
1649 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1650 else
1651 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1652 start = 0;
1654 if (allocno_p)
1655 /* Clear the flag for the next compiled function. */
1656 pseudo_classes_defined_p = false;
1657 /* Normally we scan the insns once and determine the best class to
1658 use for each allocno. However, if -fexpensive-optimizations are
1659 on, we do so twice, the second time using the tentative best
1660 classes to guide the selection. */
1661 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1663 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1664 fprintf (dump_file,
1665 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1667 if (pass != start)
1669 max_cost_classes_num = 1;
1670 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1672 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1673 max_cost_classes_num
1674 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1678 struct_costs_size
1679 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1680 /* Zero out our accumulation of the cost of each class for each
1681 allocno. */
1682 memset (costs, 0, cost_elements_num * struct_costs_size);
1684 if (allocno_p)
1686 /* Scan the instructions and record each time it would save code
1687 to put a certain allocno in a certain class. */
1688 ira_traverse_loop_tree (true, ira_loop_tree_root,
1689 process_bb_node_for_costs, NULL);
1691 memcpy (total_allocno_costs, costs,
1692 max_struct_costs_size * ira_allocnos_num);
1694 else
1696 basic_block bb;
1698 FOR_EACH_BB_FN (bb, cfun)
1699 process_bb_for_costs (bb);
1702 if (pass == 0)
1703 pref = pref_buffer;
1705 /* Now for each allocno look at how desirable each class is and
1706 find which class is preferred. */
1707 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1709 ira_allocno_t a, parent_a;
1710 int rclass, a_num, parent_a_num, add_cost;
1711 ira_loop_tree_node_t parent;
1712 int best_cost, allocno_cost;
1713 enum reg_class best, alt_class;
1714 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1715 enum reg_class *cost_classes = cost_classes_ptr->classes;
1716 int *i_costs = temp_costs->cost;
1717 int i_mem_cost;
1718 int equiv_savings = regno_equiv_gains[i];
1720 if (! allocno_p)
1722 if (regno_reg_rtx[i] == NULL_RTX)
1723 continue;
1724 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1725 i_mem_cost = temp_costs->mem_cost;
1727 else
1729 if (ira_regno_allocno_map[i] == NULL)
1730 continue;
1731 memset (temp_costs, 0, struct_costs_size);
1732 i_mem_cost = 0;
1733 /* Find cost of all allocnos with the same regno. */
1734 for (a = ira_regno_allocno_map[i];
1735 a != NULL;
1736 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1738 int *a_costs, *p_costs;
1740 a_num = ALLOCNO_NUM (a);
1741 if ((flag_ira_region == IRA_REGION_ALL
1742 || flag_ira_region == IRA_REGION_MIXED)
1743 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1744 && (parent_a = parent->regno_allocno_map[i]) != NULL
1745 /* There are no caps yet. */
1746 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1747 (a)->border_allocnos,
1748 ALLOCNO_NUM (a)))
1750 /* Propagate costs to upper levels in the region
1751 tree. */
1752 parent_a_num = ALLOCNO_NUM (parent_a);
1753 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1754 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1755 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1757 add_cost = a_costs[k];
1758 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1759 p_costs[k] = INT_MAX;
1760 else
1761 p_costs[k] += add_cost;
1763 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1764 if (add_cost > 0
1765 && (INT_MAX - add_cost
1766 < COSTS (total_allocno_costs,
1767 parent_a_num)->mem_cost))
1768 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1769 = INT_MAX;
1770 else
1771 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1772 += add_cost;
1774 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1775 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1777 a_costs = COSTS (costs, a_num)->cost;
1778 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1780 add_cost = a_costs[k];
1781 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1782 i_costs[k] = INT_MAX;
1783 else
1784 i_costs[k] += add_cost;
1786 add_cost = COSTS (costs, a_num)->mem_cost;
1787 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1788 i_mem_cost = INT_MAX;
1789 else
1790 i_mem_cost += add_cost;
1793 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1794 i_mem_cost = 0;
1795 else if (equiv_savings < 0)
1796 i_mem_cost = -equiv_savings;
1797 else if (equiv_savings > 0)
1799 i_mem_cost = 0;
1800 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1801 i_costs[k] += equiv_savings;
1804 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1805 best = ALL_REGS;
1806 alt_class = NO_REGS;
1807 /* Find best common class for all allocnos with the same
1808 regno. */
1809 for (k = 0; k < cost_classes_ptr->num; k++)
1811 rclass = cost_classes[k];
1812 if (i_costs[k] < best_cost)
1814 best_cost = i_costs[k];
1815 best = (enum reg_class) rclass;
1817 else if (i_costs[k] == best_cost)
1818 best = ira_reg_class_subunion[best][rclass];
1819 if (pass == flag_expensive_optimizations
1820 /* We still prefer registers to memory even at this
1821 stage if their costs are the same. We will make
1822 a final decision during assigning hard registers
1823 when we have all info including more accurate
1824 costs which might be affected by assigning hard
1825 registers to other pseudos because the pseudos
1826 involved in moves can be coalesced. */
1827 && i_costs[k] <= i_mem_cost
1828 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1829 > reg_class_size[alt_class]))
1830 alt_class = reg_class_subunion[alt_class][rclass];
1832 alt_class = ira_allocno_class_translate[alt_class];
1833 if (best_cost > i_mem_cost
1834 && ! non_spilled_static_chain_regno_p (i))
1835 regno_aclass[i] = NO_REGS;
1836 else if (!optimize && !targetm.class_likely_spilled_p (best))
1837 /* Registers in the alternative class are likely to need
1838 longer or slower sequences than registers in the best class.
1839 When optimizing we make some effort to use the best class
1840 over the alternative class where possible, but at -O0 we
1841 effectively give the alternative class equal weight.
1842 We then run the risk of using slower alternative registers
1843 when plenty of registers from the best class are still free.
1844 This is especially true because live ranges tend to be very
1845 short in -O0 code and so register pressure tends to be low.
1847 Avoid that by ignoring the alternative class if the best
1848 class has plenty of registers. */
1849 regno_aclass[i] = best;
1850 else
1852 /* Make the common class the biggest class of best and
1853 alt_class. */
1854 regno_aclass[i]
1855 = ira_reg_class_superunion[best][alt_class];
1856 ira_assert (regno_aclass[i] != NO_REGS
1857 && ira_reg_allocno_class_p[regno_aclass[i]]);
1859 if ((new_class
1860 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1861 (i, regno_aclass[i], best))) != regno_aclass[i])
1863 regno_aclass[i] = new_class;
1864 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1865 reg_class_contents[best]))
1866 best = new_class;
1867 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1868 reg_class_contents[alt_class]))
1869 alt_class = new_class;
1871 if (pass == flag_expensive_optimizations)
1873 if (best_cost > i_mem_cost
1874 /* Do not assign NO_REGS to static chain pointer
1875 pseudo when non-local goto is used. */
1876 && ! non_spilled_static_chain_regno_p (i))
1877 best = alt_class = NO_REGS;
1878 else if (best == alt_class)
1879 alt_class = NO_REGS;
1880 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1881 if ((!allocno_p || internal_flag_ira_verbose > 2)
1882 && dump_file != NULL)
1883 fprintf (dump_file,
1884 " r%d: preferred %s, alternative %s, allocno %s\n",
1885 i, reg_class_names[best], reg_class_names[alt_class],
1886 reg_class_names[regno_aclass[i]]);
1888 regno_best_class[i] = best;
1889 if (! allocno_p)
1891 pref[i] = (best_cost > i_mem_cost
1892 && ! non_spilled_static_chain_regno_p (i)
1893 ? NO_REGS : best);
1894 continue;
1896 for (a = ira_regno_allocno_map[i];
1897 a != NULL;
1898 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1900 enum reg_class aclass = regno_aclass[i];
1901 int a_num = ALLOCNO_NUM (a);
1902 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1903 int *a_costs = COSTS (costs, a_num)->cost;
1905 if (aclass == NO_REGS)
1906 best = NO_REGS;
1907 else
1909 /* Finding best class which is subset of the common
1910 class. */
1911 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1912 allocno_cost = best_cost;
1913 best = ALL_REGS;
1914 for (k = 0; k < cost_classes_ptr->num; k++)
1916 rclass = cost_classes[k];
1917 if (! ira_class_subset_p[rclass][aclass])
1918 continue;
1919 if (total_a_costs[k] < best_cost)
1921 best_cost = total_a_costs[k];
1922 allocno_cost = a_costs[k];
1923 best = (enum reg_class) rclass;
1925 else if (total_a_costs[k] == best_cost)
1927 best = ira_reg_class_subunion[best][rclass];
1928 allocno_cost = MAX (allocno_cost, a_costs[k]);
1931 ALLOCNO_CLASS_COST (a) = allocno_cost;
1933 if (internal_flag_ira_verbose > 2 && dump_file != NULL
1934 && (pass == 0 || pref[a_num] != best))
1936 fprintf (dump_file, " a%d (r%d,", a_num, i);
1937 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1938 fprintf (dump_file, "b%d", bb->index);
1939 else
1940 fprintf (dump_file, "l%d",
1941 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1942 fprintf (dump_file, ") best %s, allocno %s\n",
1943 reg_class_names[best],
1944 reg_class_names[aclass]);
1946 pref[a_num] = best;
1947 if (pass == flag_expensive_optimizations && best != aclass
1948 && ira_class_hard_regs_num[best] > 0
1949 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
1950 >= ira_class_hard_regs_num[best]))
1952 int ind = cost_classes_ptr->index[aclass];
1954 ira_assert (ind >= 0);
1955 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
1956 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
1957 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
1958 / (ira_register_move_cost
1959 [ALLOCNO_MODE (a)][best][aclass]));
1960 for (k = 0; k < cost_classes_ptr->num; k++)
1961 if (ira_class_subset_p[cost_classes[k]][best])
1962 a_costs[k] = a_costs[ind];
1967 if (internal_flag_ira_verbose > 4 && dump_file)
1969 if (allocno_p)
1970 print_allocno_costs (dump_file);
1971 else
1972 print_pseudo_costs (dump_file);
1973 fprintf (dump_file,"\n");
1976 ira_free (regno_best_class);
1981 /* Process moves involving hard regs to modify allocno hard register
1982 costs. We can do this only after determining allocno class. If a
1983 hard register forms a register class, then moves with the hard
1984 register are already taken into account in class costs for the
1985 allocno. */
1986 static void
1987 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
1989 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
1990 bool to_p;
1991 ira_allocno_t a, curr_a;
1992 ira_loop_tree_node_t curr_loop_tree_node;
1993 enum reg_class rclass;
1994 basic_block bb;
1995 rtx_insn *insn;
1996 rtx set, src, dst;
1998 bb = loop_tree_node->bb;
1999 if (bb == NULL)
2000 return;
2001 freq = REG_FREQ_FROM_BB (bb);
2002 if (freq == 0)
2003 freq = 1;
2004 FOR_BB_INSNS (bb, insn)
2006 if (!NONDEBUG_INSN_P (insn))
2007 continue;
2008 set = single_set (insn);
2009 if (set == NULL_RTX)
2010 continue;
2011 dst = SET_DEST (set);
2012 src = SET_SRC (set);
2013 if (! REG_P (dst) || ! REG_P (src))
2014 continue;
2015 dst_regno = REGNO (dst);
2016 src_regno = REGNO (src);
2017 if (dst_regno >= FIRST_PSEUDO_REGISTER
2018 && src_regno < FIRST_PSEUDO_REGISTER)
2020 hard_regno = src_regno;
2021 a = ira_curr_regno_allocno_map[dst_regno];
2022 to_p = true;
2024 else if (src_regno >= FIRST_PSEUDO_REGISTER
2025 && dst_regno < FIRST_PSEUDO_REGISTER)
2027 hard_regno = dst_regno;
2028 a = ira_curr_regno_allocno_map[src_regno];
2029 to_p = false;
2031 else
2032 continue;
2033 rclass = ALLOCNO_CLASS (a);
2034 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2035 continue;
2036 i = ira_class_hard_reg_index[rclass][hard_regno];
2037 if (i < 0)
2038 continue;
2039 a_regno = ALLOCNO_REGNO (a);
2040 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2041 curr_loop_tree_node != NULL;
2042 curr_loop_tree_node = curr_loop_tree_node->parent)
2043 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2044 ira_add_allocno_pref (curr_a, hard_regno, freq);
2046 int cost;
2047 enum reg_class hard_reg_class;
2048 machine_mode mode;
2050 mode = ALLOCNO_MODE (a);
2051 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2052 ira_init_register_move_cost_if_necessary (mode);
2053 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2054 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2055 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2056 ALLOCNO_CLASS_COST (a));
2057 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2058 rclass, 0);
2059 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2060 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2061 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2062 ALLOCNO_HARD_REG_COSTS (a)[i]);
2067 /* After we find hard register and memory costs for allocnos, define
2068 its class and modify hard register cost because insns moving
2069 allocno to/from hard registers. */
2070 static void
2071 setup_allocno_class_and_costs (void)
2073 int i, j, n, regno, hard_regno, num;
2074 int *reg_costs;
2075 enum reg_class aclass, rclass;
2076 ira_allocno_t a;
2077 ira_allocno_iterator ai;
2078 cost_classes_t cost_classes_ptr;
2080 ira_assert (allocno_p);
2081 FOR_EACH_ALLOCNO (a, ai)
2083 i = ALLOCNO_NUM (a);
2084 regno = ALLOCNO_REGNO (a);
2085 aclass = regno_aclass[regno];
2086 cost_classes_ptr = regno_cost_classes[regno];
2087 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2088 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2089 ira_set_allocno_class (a, aclass);
2090 if (aclass == NO_REGS)
2091 continue;
2092 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2094 n = ira_class_hard_regs_num[aclass];
2095 ALLOCNO_HARD_REG_COSTS (a)
2096 = reg_costs = ira_allocate_cost_vector (aclass);
2097 for (j = n - 1; j >= 0; j--)
2099 hard_regno = ira_class_hard_regs[aclass][j];
2100 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2101 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2102 else
2104 rclass = REGNO_REG_CLASS (hard_regno);
2105 num = cost_classes_ptr->index[rclass];
2106 if (num < 0)
2108 num = cost_classes_ptr->hard_regno_index[hard_regno];
2109 ira_assert (num >= 0);
2111 reg_costs[j] = COSTS (costs, i)->cost[num];
2116 if (optimize)
2117 ira_traverse_loop_tree (true, ira_loop_tree_root,
2118 process_bb_node_for_hard_reg_moves, NULL);
2123 /* Function called once during compiler work. */
2124 void
2125 ira_init_costs_once (void)
2127 int i;
2129 init_cost = NULL;
2130 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2132 op_costs[i] = NULL;
2133 this_op_costs[i] = NULL;
2135 temp_costs = NULL;
2138 /* Free allocated temporary cost vectors. */
2139 void
2140 target_ira_int::free_ira_costs ()
2142 int i;
2144 free (x_init_cost);
2145 x_init_cost = NULL;
2146 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2148 free (x_op_costs[i]);
2149 free (x_this_op_costs[i]);
2150 x_op_costs[i] = x_this_op_costs[i] = NULL;
2152 free (x_temp_costs);
2153 x_temp_costs = NULL;
2156 /* This is called each time register related information is
2157 changed. */
2158 void
2159 ira_init_costs (void)
2161 int i;
2163 this_target_ira_int->free_ira_costs ();
2164 max_struct_costs_size
2165 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2166 /* Don't use ira_allocate because vectors live through several IRA
2167 calls. */
2168 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2169 init_cost->mem_cost = 1000000;
2170 for (i = 0; i < ira_important_classes_num; i++)
2171 init_cost->cost[i] = 1000000;
2172 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2174 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2175 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2177 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2182 /* Common initialization function for ira_costs and
2183 ira_set_pseudo_classes. */
2184 static void
2185 init_costs (void)
2187 init_subregs_of_mode ();
2188 costs = (struct costs *) ira_allocate (max_struct_costs_size
2189 * cost_elements_num);
2190 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2191 * cost_elements_num);
2192 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2193 * max_reg_num ());
2194 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2195 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2198 /* Common finalization function for ira_costs and
2199 ira_set_pseudo_classes. */
2200 static void
2201 finish_costs (void)
2203 finish_subregs_of_mode ();
2204 ira_free (regno_equiv_gains);
2205 ira_free (regno_aclass);
2206 ira_free (pref_buffer);
2207 ira_free (costs);
2210 /* Entry function which defines register class, memory and hard
2211 register costs for each allocno. */
2212 void
2213 ira_costs (void)
2215 allocno_p = true;
2216 cost_elements_num = ira_allocnos_num;
2217 init_costs ();
2218 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2219 * ira_allocnos_num);
2220 initiate_regno_cost_classes ();
2221 calculate_elim_costs_all_insns ();
2222 find_costs_and_classes (ira_dump_file);
2223 setup_allocno_class_and_costs ();
2224 finish_regno_cost_classes ();
2225 finish_costs ();
2226 ira_free (total_allocno_costs);
2229 /* Entry function which defines classes for pseudos.
2230 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2231 void
2232 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2234 allocno_p = false;
2235 internal_flag_ira_verbose = flag_ira_verbose;
2236 cost_elements_num = max_reg_num ();
2237 init_costs ();
2238 initiate_regno_cost_classes ();
2239 find_costs_and_classes (dump_file);
2240 finish_regno_cost_classes ();
2241 if (define_pseudo_classes)
2242 pseudo_classes_defined_p = true;
2244 finish_costs ();
2249 /* Change hard register costs for allocnos which lives through
2250 function calls. This is called only when we found all intersected
2251 calls during building allocno live ranges. */
2252 void
2253 ira_tune_allocno_costs (void)
2255 int j, n, regno;
2256 int cost, min_cost, *reg_costs;
2257 enum reg_class aclass, rclass;
2258 machine_mode mode;
2259 ira_allocno_t a;
2260 ira_allocno_iterator ai;
2261 ira_allocno_object_iterator oi;
2262 ira_object_t obj;
2263 bool skip_p;
2264 HARD_REG_SET *crossed_calls_clobber_regs;
2266 FOR_EACH_ALLOCNO (a, ai)
2268 aclass = ALLOCNO_CLASS (a);
2269 if (aclass == NO_REGS)
2270 continue;
2271 mode = ALLOCNO_MODE (a);
2272 n = ira_class_hard_regs_num[aclass];
2273 min_cost = INT_MAX;
2274 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2275 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2277 ira_allocate_and_set_costs
2278 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2279 ALLOCNO_CLASS_COST (a));
2280 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2281 for (j = n - 1; j >= 0; j--)
2283 regno = ira_class_hard_regs[aclass][j];
2284 skip_p = false;
2285 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2287 if (ira_hard_reg_set_intersection_p (regno, mode,
2288 OBJECT_CONFLICT_HARD_REGS
2289 (obj)))
2291 skip_p = true;
2292 break;
2295 if (skip_p)
2296 continue;
2297 rclass = REGNO_REG_CLASS (regno);
2298 cost = 0;
2299 crossed_calls_clobber_regs
2300 = &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2301 if (ira_hard_reg_set_intersection_p (regno, mode,
2302 *crossed_calls_clobber_regs)
2303 && (ira_hard_reg_set_intersection_p (regno, mode,
2304 call_used_reg_set)
2305 || HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
2306 cost += (ALLOCNO_CALL_FREQ (a)
2307 * (ira_memory_move_cost[mode][rclass][0]
2308 + ira_memory_move_cost[mode][rclass][1]));
2309 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2310 cost += ((ira_memory_move_cost[mode][rclass][0]
2311 + ira_memory_move_cost[mode][rclass][1])
2312 * ALLOCNO_FREQ (a)
2313 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2314 #endif
2315 if (INT_MAX - cost < reg_costs[j])
2316 reg_costs[j] = INT_MAX;
2317 else
2318 reg_costs[j] += cost;
2319 if (min_cost > reg_costs[j])
2320 min_cost = reg_costs[j];
2323 if (min_cost != INT_MAX)
2324 ALLOCNO_CLASS_COST (a) = min_cost;
2326 /* Some targets allow pseudos to be allocated to unaligned sequences
2327 of hard registers. However, selecting an unaligned sequence can
2328 unnecessarily restrict later allocations. So increase the cost of
2329 unaligned hard regs to encourage the use of aligned hard regs. */
2331 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2333 if (nregs > 1)
2335 ira_allocate_and_set_costs
2336 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2337 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2338 for (j = n - 1; j >= 0; j--)
2340 regno = ira_non_ordered_class_hard_regs[aclass][j];
2341 if ((regno % nregs) != 0)
2343 int index = ira_class_hard_reg_index[aclass][regno];
2344 ira_assert (index != -1);
2345 reg_costs[index] += ALLOCNO_FREQ (a);
2353 /* Add COST to the estimated gain for eliminating REGNO with its
2354 equivalence. If COST is zero, record that no such elimination is
2355 possible. */
2357 void
2358 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2360 if (cost == 0)
2361 regno_equiv_gains[regno] = 0;
2362 else
2363 regno_equiv_gains[regno] += cost;
2366 void
2367 ira_costs_c_finalize (void)
2369 this_target_ira_int->free_ira_costs ();