PR target/65604
[official-gcc.git] / gcc / ira-costs.c
blobea5e8b1a0c339b9f64304c10ba5a709a5e8aee39
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2016 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "tm_p.h"
30 #include "insn-config.h"
31 #include "regs.h"
32 #include "ira.h"
33 #include "ira-int.h"
34 #include "addresses.h"
35 #include "reload.h"
37 /* The flags is set up every time when we calculate pseudo register
38 classes through function ira_set_pseudo_classes. */
39 static bool pseudo_classes_defined_p = false;
41 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
42 static bool allocno_p;
44 /* Number of elements in array `costs'. */
45 static int cost_elements_num;
47 /* The `costs' struct records the cost of using hard registers of each
48 class considered for the calculation and of using memory for each
49 allocno or pseudo. */
50 struct costs
52 int mem_cost;
53 /* Costs for register classes start here. We process only some
54 allocno classes. */
55 int cost[1];
58 #define max_struct_costs_size \
59 (this_target_ira_int->x_max_struct_costs_size)
60 #define init_cost \
61 (this_target_ira_int->x_init_cost)
62 #define temp_costs \
63 (this_target_ira_int->x_temp_costs)
64 #define op_costs \
65 (this_target_ira_int->x_op_costs)
66 #define this_op_costs \
67 (this_target_ira_int->x_this_op_costs)
69 /* Costs of each class for each allocno or pseudo. */
70 static struct costs *costs;
72 /* Accumulated costs of each class for each allocno. */
73 static struct costs *total_allocno_costs;
75 /* It is the current size of struct costs. */
76 static int struct_costs_size;
78 /* Return pointer to structure containing costs of allocno or pseudo
79 with given NUM in array ARR. */
80 #define COSTS(arr, num) \
81 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
83 /* Return index in COSTS when processing reg with REGNO. */
84 #define COST_INDEX(regno) (allocno_p \
85 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
86 : (int) regno)
88 /* Record register class preferences of each allocno or pseudo. Null
89 value means no preferences. It happens on the 1st iteration of the
90 cost calculation. */
91 static enum reg_class *pref;
93 /* Allocated buffers for pref. */
94 static enum reg_class *pref_buffer;
96 /* Record allocno class of each allocno with the same regno. */
97 static enum reg_class *regno_aclass;
99 /* Record cost gains for not allocating a register with an invariant
100 equivalence. */
101 static int *regno_equiv_gains;
103 /* Execution frequency of the current insn. */
104 static int frequency;
108 /* Info about reg classes whose costs are calculated for a pseudo. */
109 struct cost_classes
111 /* Number of the cost classes in the subsequent array. */
112 int num;
113 /* Container of the cost classes. */
114 enum reg_class classes[N_REG_CLASSES];
115 /* Map reg class -> index of the reg class in the previous array.
116 -1 if it is not a cost class. */
117 int index[N_REG_CLASSES];
118 /* Map hard regno index of first class in array CLASSES containing
119 the hard regno, -1 otherwise. */
120 int hard_regno_index[FIRST_PSEUDO_REGISTER];
123 /* Types of pointers to the structure above. */
124 typedef struct cost_classes *cost_classes_t;
125 typedef const struct cost_classes *const_cost_classes_t;
127 /* Info about cost classes for each pseudo. */
128 static cost_classes_t *regno_cost_classes;
130 /* Helper for cost_classes hashing. */
132 struct cost_classes_hasher : pointer_hash <cost_classes>
134 static inline hashval_t hash (const cost_classes *);
135 static inline bool equal (const cost_classes *, const cost_classes *);
136 static inline void remove (cost_classes *);
139 /* Returns hash value for cost classes info HV. */
140 inline hashval_t
141 cost_classes_hasher::hash (const cost_classes *hv)
143 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
146 /* Compares cost classes info HV1 and HV2. */
147 inline bool
148 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
150 return (hv1->num == hv2->num
151 && memcmp (hv1->classes, hv2->classes,
152 sizeof (enum reg_class) * hv1->num) == 0);
155 /* Delete cost classes info V from the hash table. */
156 inline void
157 cost_classes_hasher::remove (cost_classes *v)
159 ira_free (v);
162 /* Hash table of unique cost classes. */
163 static hash_table<cost_classes_hasher> *cost_classes_htab;
165 /* Map allocno class -> cost classes for pseudo of given allocno
166 class. */
167 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
169 /* Map mode -> cost classes for pseudo of give mode. */
170 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
172 /* Cost classes that include all classes in ira_important_classes. */
173 static cost_classes all_cost_classes;
175 /* Use the array of classes in CLASSES_PTR to fill out the rest of
176 the structure. */
177 static void
178 complete_cost_classes (cost_classes_t classes_ptr)
180 for (int i = 0; i < N_REG_CLASSES; i++)
181 classes_ptr->index[i] = -1;
182 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
183 classes_ptr->hard_regno_index[i] = -1;
184 for (int i = 0; i < classes_ptr->num; i++)
186 enum reg_class cl = classes_ptr->classes[i];
187 classes_ptr->index[cl] = i;
188 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
190 unsigned int hard_regno = ira_class_hard_regs[cl][j];
191 if (classes_ptr->hard_regno_index[hard_regno] < 0)
192 classes_ptr->hard_regno_index[hard_regno] = i;
197 /* Initialize info about the cost classes for each pseudo. */
198 static void
199 initiate_regno_cost_classes (void)
201 int size = sizeof (cost_classes_t) * max_reg_num ();
203 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
204 memset (regno_cost_classes, 0, size);
205 memset (cost_classes_aclass_cache, 0,
206 sizeof (cost_classes_t) * N_REG_CLASSES);
207 memset (cost_classes_mode_cache, 0,
208 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
209 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
210 all_cost_classes.num = ira_important_classes_num;
211 for (int i = 0; i < ira_important_classes_num; i++)
212 all_cost_classes.classes[i] = ira_important_classes[i];
213 complete_cost_classes (&all_cost_classes);
216 /* Create new cost classes from cost classes FROM and set up members
217 index and hard_regno_index. Return the new classes. The function
218 implements some common code of two functions
219 setup_regno_cost_classes_by_aclass and
220 setup_regno_cost_classes_by_mode. */
221 static cost_classes_t
222 setup_cost_classes (cost_classes_t from)
224 cost_classes_t classes_ptr;
226 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
227 classes_ptr->num = from->num;
228 for (int i = 0; i < from->num; i++)
229 classes_ptr->classes[i] = from->classes[i];
230 complete_cost_classes (classes_ptr);
231 return classes_ptr;
234 /* Return a version of FULL that only considers registers in REGS that are
235 valid for mode MODE. Both FULL and the returned class are globally
236 allocated. */
237 static cost_classes_t
238 restrict_cost_classes (cost_classes_t full, machine_mode mode,
239 const HARD_REG_SET &regs)
241 static struct cost_classes narrow;
242 int map[N_REG_CLASSES];
243 narrow.num = 0;
244 for (int i = 0; i < full->num; i++)
246 /* Assume that we'll drop the class. */
247 map[i] = -1;
249 /* Ignore classes that are too small for the mode. */
250 enum reg_class cl = full->classes[i];
251 if (!contains_reg_of_mode[cl][mode])
252 continue;
254 /* Calculate the set of registers in CL that belong to REGS and
255 are valid for MODE. */
256 HARD_REG_SET valid_for_cl;
257 COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
258 AND_HARD_REG_SET (valid_for_cl, regs);
259 AND_COMPL_HARD_REG_SET (valid_for_cl,
260 ira_prohibited_class_mode_regs[cl][mode]);
261 AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
262 if (hard_reg_set_empty_p (valid_for_cl))
263 continue;
265 /* Don't use this class if the set of valid registers is a subset
266 of an existing class. For example, suppose we have two classes
267 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
268 that the mode changes allowed by FR_REGS are not as general as
269 the mode changes allowed by GR_REGS.
271 In this situation, the mode changes for GR_AND_FR_REGS could
272 either be seen as the union or the intersection of the mode
273 changes allowed by the two subclasses. The justification for
274 the union-based definition would be that, if you want a mode
275 change that's only allowed by GR_REGS, you can pick a register
276 from the GR_REGS subclass. The justification for the
277 intersection-based definition would be that every register
278 from the class would allow the mode change.
280 However, if we have a register that needs to be in GR_REGS,
281 using GR_AND_FR_REGS with the intersection-based definition
282 would be too pessimistic, since it would bring in restrictions
283 that only apply to FR_REGS. Conversely, if we have a register
284 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
285 union-based definition would lose the extra restrictions
286 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
287 for cases where GR_REGS and FP_REGS are both valid. */
288 int pos;
289 for (pos = 0; pos < narrow.num; ++pos)
291 enum reg_class cl2 = narrow.classes[pos];
292 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
293 break;
295 map[i] = pos;
296 if (pos == narrow.num)
298 /* If several classes are equivalent, prefer to use the one
299 that was chosen as the allocno class. */
300 enum reg_class cl2 = ira_allocno_class_translate[cl];
301 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
302 cl = cl2;
303 narrow.classes[narrow.num++] = cl;
306 if (narrow.num == full->num)
307 return full;
309 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
310 if (*slot == NULL)
312 cost_classes_t classes = setup_cost_classes (&narrow);
313 /* Map equivalent classes to the representative that we chose above. */
314 for (int i = 0; i < ira_important_classes_num; i++)
316 enum reg_class cl = ira_important_classes[i];
317 int index = full->index[cl];
318 if (index >= 0)
319 classes->index[cl] = map[index];
321 *slot = classes;
323 return *slot;
326 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
327 This function is used when we know an initial approximation of
328 allocno class of the pseudo already, e.g. on the second iteration
329 of class cost calculation or after class cost calculation in
330 register-pressure sensitive insn scheduling or register-pressure
331 sensitive loop-invariant motion. */
332 static void
333 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
335 static struct cost_classes classes;
336 cost_classes_t classes_ptr;
337 enum reg_class cl;
338 int i;
339 cost_classes **slot;
340 HARD_REG_SET temp, temp2;
341 bool exclude_p;
343 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
345 COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
346 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
347 /* We exclude classes from consideration which are subsets of
348 ACLASS only if ACLASS is an uniform class. */
349 exclude_p = ira_uniform_class_p[aclass];
350 classes.num = 0;
351 for (i = 0; i < ira_important_classes_num; i++)
353 cl = ira_important_classes[i];
354 if (exclude_p)
356 /* Exclude non-uniform classes which are subsets of
357 ACLASS. */
358 COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
359 AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
360 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
361 continue;
363 classes.classes[classes.num++] = cl;
365 slot = cost_classes_htab->find_slot (&classes, INSERT);
366 if (*slot == NULL)
368 classes_ptr = setup_cost_classes (&classes);
369 *slot = classes_ptr;
371 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
373 if (regno_reg_rtx[regno] != NULL_RTX)
375 /* Restrict the classes to those that are valid for REGNO's mode
376 (which might for example exclude singleton classes if the mode
377 requires two registers). Also restrict the classes to those that
378 are valid for subregs of REGNO. */
379 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
380 if (!valid_regs)
381 valid_regs = &reg_class_contents[ALL_REGS];
382 classes_ptr = restrict_cost_classes (classes_ptr,
383 PSEUDO_REGNO_MODE (regno),
384 *valid_regs);
386 regno_cost_classes[regno] = classes_ptr;
389 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
390 decrease number of cost classes for the pseudo, if hard registers
391 of some important classes can not hold a value of MODE. So the
392 pseudo can not get hard register of some important classes and cost
393 calculation for such important classes is only wasting CPU
394 time. */
395 static void
396 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
398 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
399 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
400 mode, *valid_regs);
401 else
403 if (cost_classes_mode_cache[mode] == NULL)
404 cost_classes_mode_cache[mode]
405 = restrict_cost_classes (&all_cost_classes, mode,
406 reg_class_contents[ALL_REGS]);
407 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
411 /* Finalize info about the cost classes for each pseudo. */
412 static void
413 finish_regno_cost_classes (void)
415 ira_free (regno_cost_classes);
416 delete cost_classes_htab;
417 cost_classes_htab = NULL;
422 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
423 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
424 be a pseudo register. */
425 static int
426 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
427 secondary_reload_info *prev_sri)
429 secondary_reload_info sri;
430 reg_class_t secondary_class = NO_REGS;
432 /* If X is a SCRATCH, there is actually nothing to move since we are
433 assuming optimal allocation. */
434 if (GET_CODE (x) == SCRATCH)
435 return 0;
437 /* Get the class we will actually use for a reload. */
438 rclass = targetm.preferred_reload_class (x, rclass);
440 /* If we need a secondary reload for an intermediate, the cost is
441 that to load the input into the intermediate register, then to
442 copy it. */
443 sri.prev_sri = prev_sri;
444 sri.extra_cost = 0;
445 /* PR 68770: Secondary reload might examine the t_icode field. */
446 sri.t_icode = CODE_FOR_nothing;
448 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
450 if (secondary_class != NO_REGS)
452 ira_init_register_move_cost_if_necessary (mode);
453 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
454 + sri.extra_cost
455 + copy_cost (x, mode, secondary_class, to_p, &sri));
458 /* For memory, use the memory move cost, for (hard) registers, use
459 the cost to move between the register classes, and use 2 for
460 everything else (constants). */
461 if (MEM_P (x) || rclass == NO_REGS)
462 return sri.extra_cost
463 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
464 else if (REG_P (x))
466 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
468 ira_init_register_move_cost_if_necessary (mode);
469 return (sri.extra_cost
470 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
472 else
473 /* If this is a constant, we may eventually want to call rtx_cost
474 here. */
475 return sri.extra_cost + COSTS_N_INSNS (1);
480 /* Record the cost of using memory or hard registers of various
481 classes for the operands in INSN.
483 N_ALTS is the number of alternatives.
484 N_OPS is the number of operands.
485 OPS is an array of the operands.
486 MODES are the modes of the operands, in case any are VOIDmode.
487 CONSTRAINTS are the constraints to use for the operands. This array
488 is modified by this procedure.
490 This procedure works alternative by alternative. For each
491 alternative we assume that we will be able to allocate all allocnos
492 to their ideal register class and calculate the cost of using that
493 alternative. Then we compute, for each operand that is a
494 pseudo-register, the cost of having the allocno allocated to each
495 register class and using it in that alternative. To this cost is
496 added the cost of the alternative.
498 The cost of each class for this insn is its lowest cost among all
499 the alternatives. */
500 static void
501 record_reg_classes (int n_alts, int n_ops, rtx *ops,
502 machine_mode *modes, const char **constraints,
503 rtx_insn *insn, enum reg_class *pref)
505 int alt;
506 int i, j, k;
507 int insn_allows_mem[MAX_RECOG_OPERANDS];
508 move_table *move_in_cost, *move_out_cost;
509 short (*mem_cost)[2];
511 for (i = 0; i < n_ops; i++)
512 insn_allows_mem[i] = 0;
514 /* Process each alternative, each time minimizing an operand's cost
515 with the cost for each operand in that alternative. */
516 alternative_mask preferred = get_preferred_alternatives (insn);
517 for (alt = 0; alt < n_alts; alt++)
519 enum reg_class classes[MAX_RECOG_OPERANDS];
520 int allows_mem[MAX_RECOG_OPERANDS];
521 enum reg_class rclass;
522 int alt_fail = 0;
523 int alt_cost = 0, op_cost_add;
525 if (!TEST_BIT (preferred, alt))
527 for (i = 0; i < recog_data.n_operands; i++)
528 constraints[i] = skip_alternative (constraints[i]);
530 continue;
533 for (i = 0; i < n_ops; i++)
535 unsigned char c;
536 const char *p = constraints[i];
537 rtx op = ops[i];
538 machine_mode mode = modes[i];
539 int allows_addr = 0;
540 int win = 0;
542 /* Initially show we know nothing about the register class. */
543 classes[i] = NO_REGS;
544 allows_mem[i] = 0;
546 /* If this operand has no constraints at all, we can
547 conclude nothing about it since anything is valid. */
548 if (*p == 0)
550 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
551 memset (this_op_costs[i], 0, struct_costs_size);
552 continue;
555 /* If this alternative is only relevant when this operand
556 matches a previous operand, we do different things
557 depending on whether this operand is a allocno-reg or not.
558 We must process any modifiers for the operand before we
559 can make this test. */
560 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
561 p++;
563 if (p[0] >= '0' && p[0] <= '0' + i)
565 /* Copy class and whether memory is allowed from the
566 matching alternative. Then perform any needed cost
567 computations and/or adjustments. */
568 j = p[0] - '0';
569 classes[i] = classes[j];
570 allows_mem[i] = allows_mem[j];
571 if (allows_mem[i])
572 insn_allows_mem[i] = 1;
574 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
576 /* If this matches the other operand, we have no
577 added cost and we win. */
578 if (rtx_equal_p (ops[j], op))
579 win = 1;
580 /* If we can put the other operand into a register,
581 add to the cost of this alternative the cost to
582 copy this operand to the register used for the
583 other operand. */
584 else if (classes[j] != NO_REGS)
586 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
587 win = 1;
590 else if (! REG_P (ops[j])
591 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
593 /* This op is an allocno but the one it matches is
594 not. */
596 /* If we can't put the other operand into a
597 register, this alternative can't be used. */
599 if (classes[j] == NO_REGS)
600 alt_fail = 1;
601 /* Otherwise, add to the cost of this alternative
602 the cost to copy the other operand to the hard
603 register used for this operand. */
604 else
605 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
607 else
609 /* The costs of this operand are not the same as the
610 other operand since move costs are not symmetric.
611 Moreover, if we cannot tie them, this alternative
612 needs to do a copy, which is one insn. */
613 struct costs *pp = this_op_costs[i];
614 int *pp_costs = pp->cost;
615 cost_classes_t cost_classes_ptr
616 = regno_cost_classes[REGNO (op)];
617 enum reg_class *cost_classes = cost_classes_ptr->classes;
618 bool in_p = recog_data.operand_type[i] != OP_OUT;
619 bool out_p = recog_data.operand_type[i] != OP_IN;
620 enum reg_class op_class = classes[i];
622 ira_init_register_move_cost_if_necessary (mode);
623 if (! in_p)
625 ira_assert (out_p);
626 if (op_class == NO_REGS)
628 mem_cost = ira_memory_move_cost[mode];
629 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
631 rclass = cost_classes[k];
632 pp_costs[k] = mem_cost[rclass][0] * frequency;
635 else
637 move_out_cost = ira_may_move_out_cost[mode];
638 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
640 rclass = cost_classes[k];
641 pp_costs[k]
642 = move_out_cost[op_class][rclass] * frequency;
646 else if (! out_p)
648 ira_assert (in_p);
649 if (op_class == NO_REGS)
651 mem_cost = ira_memory_move_cost[mode];
652 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
654 rclass = cost_classes[k];
655 pp_costs[k] = mem_cost[rclass][1] * frequency;
658 else
660 move_in_cost = ira_may_move_in_cost[mode];
661 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
663 rclass = cost_classes[k];
664 pp_costs[k]
665 = move_in_cost[rclass][op_class] * frequency;
669 else
671 if (op_class == NO_REGS)
673 mem_cost = ira_memory_move_cost[mode];
674 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
676 rclass = cost_classes[k];
677 pp_costs[k] = ((mem_cost[rclass][0]
678 + mem_cost[rclass][1])
679 * frequency);
682 else
684 move_in_cost = ira_may_move_in_cost[mode];
685 move_out_cost = ira_may_move_out_cost[mode];
686 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
688 rclass = cost_classes[k];
689 pp_costs[k] = ((move_in_cost[rclass][op_class]
690 + move_out_cost[op_class][rclass])
691 * frequency);
696 /* If the alternative actually allows memory, make
697 things a bit cheaper since we won't need an extra
698 insn to load it. */
699 pp->mem_cost
700 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
701 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
702 - allows_mem[i]) * frequency;
704 /* If we have assigned a class to this allocno in
705 our first pass, add a cost to this alternative
706 corresponding to what we would add if this
707 allocno were not in the appropriate class. */
708 if (pref)
710 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
712 if (pref_class == NO_REGS)
713 alt_cost
714 += ((out_p
715 ? ira_memory_move_cost[mode][op_class][0] : 0)
716 + (in_p
717 ? ira_memory_move_cost[mode][op_class][1]
718 : 0));
719 else if (ira_reg_class_intersect
720 [pref_class][op_class] == NO_REGS)
721 alt_cost
722 += ira_register_move_cost[mode][pref_class][op_class];
724 if (REGNO (ops[i]) != REGNO (ops[j])
725 && ! find_reg_note (insn, REG_DEAD, op))
726 alt_cost += 2;
728 p++;
732 /* Scan all the constraint letters. See if the operand
733 matches any of the constraints. Collect the valid
734 register classes and see if this operand accepts
735 memory. */
736 while ((c = *p))
738 switch (c)
740 case '*':
741 /* Ignore the next letter for this pass. */
742 c = *++p;
743 break;
745 case '^':
746 alt_cost += 2;
747 break;
749 case '?':
750 alt_cost += 2;
751 break;
753 case 'g':
754 if (MEM_P (op)
755 || (CONSTANT_P (op)
756 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
757 win = 1;
758 insn_allows_mem[i] = allows_mem[i] = 1;
759 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
760 break;
762 default:
763 enum constraint_num cn = lookup_constraint (p);
764 enum reg_class cl;
765 switch (get_constraint_type (cn))
767 case CT_REGISTER:
768 cl = reg_class_for_constraint (cn);
769 if (cl != NO_REGS)
770 classes[i] = ira_reg_class_subunion[classes[i]][cl];
771 break;
773 case CT_CONST_INT:
774 if (CONST_INT_P (op)
775 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
776 win = 1;
777 break;
779 case CT_MEMORY:
780 /* Every MEM can be reloaded to fit. */
781 insn_allows_mem[i] = allows_mem[i] = 1;
782 if (MEM_P (op))
783 win = 1;
784 break;
786 case CT_ADDRESS:
787 /* Every address can be reloaded to fit. */
788 allows_addr = 1;
789 if (address_operand (op, GET_MODE (op))
790 || constraint_satisfied_p (op, cn))
791 win = 1;
792 /* We know this operand is an address, so we
793 want it to be allocated to a hard register
794 that can be the base of an address,
795 i.e. BASE_REG_CLASS. */
796 classes[i]
797 = ira_reg_class_subunion[classes[i]]
798 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
799 ADDRESS, SCRATCH)];
800 break;
802 case CT_FIXED_FORM:
803 if (constraint_satisfied_p (op, cn))
804 win = 1;
805 break;
807 break;
809 p += CONSTRAINT_LEN (c, p);
810 if (c == ',')
811 break;
814 constraints[i] = p;
816 /* How we account for this operand now depends on whether it
817 is a pseudo register or not. If it is, we first check if
818 any register classes are valid. If not, we ignore this
819 alternative, since we want to assume that all allocnos get
820 allocated for register preferencing. If some register
821 class is valid, compute the costs of moving the allocno
822 into that class. */
823 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
825 if (classes[i] == NO_REGS && ! allows_mem[i])
827 /* We must always fail if the operand is a REG, but
828 we did not find a suitable class and memory is
829 not allowed.
831 Otherwise we may perform an uninitialized read
832 from this_op_costs after the `continue' statement
833 below. */
834 alt_fail = 1;
836 else
838 unsigned int regno = REGNO (op);
839 struct costs *pp = this_op_costs[i];
840 int *pp_costs = pp->cost;
841 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
842 enum reg_class *cost_classes = cost_classes_ptr->classes;
843 bool in_p = recog_data.operand_type[i] != OP_OUT;
844 bool out_p = recog_data.operand_type[i] != OP_IN;
845 enum reg_class op_class = classes[i];
847 ira_init_register_move_cost_if_necessary (mode);
848 if (! in_p)
850 ira_assert (out_p);
851 if (op_class == NO_REGS)
853 mem_cost = ira_memory_move_cost[mode];
854 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
856 rclass = cost_classes[k];
857 pp_costs[k] = mem_cost[rclass][0] * frequency;
860 else
862 move_out_cost = ira_may_move_out_cost[mode];
863 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
865 rclass = cost_classes[k];
866 pp_costs[k]
867 = move_out_cost[op_class][rclass] * frequency;
871 else if (! out_p)
873 ira_assert (in_p);
874 if (op_class == NO_REGS)
876 mem_cost = ira_memory_move_cost[mode];
877 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
879 rclass = cost_classes[k];
880 pp_costs[k] = mem_cost[rclass][1] * frequency;
883 else
885 move_in_cost = ira_may_move_in_cost[mode];
886 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
888 rclass = cost_classes[k];
889 pp_costs[k]
890 = move_in_cost[rclass][op_class] * frequency;
894 else
896 if (op_class == NO_REGS)
898 mem_cost = ira_memory_move_cost[mode];
899 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
901 rclass = cost_classes[k];
902 pp_costs[k] = ((mem_cost[rclass][0]
903 + mem_cost[rclass][1])
904 * frequency);
907 else
909 move_in_cost = ira_may_move_in_cost[mode];
910 move_out_cost = ira_may_move_out_cost[mode];
911 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
913 rclass = cost_classes[k];
914 pp_costs[k] = ((move_in_cost[rclass][op_class]
915 + move_out_cost[op_class][rclass])
916 * frequency);
921 if (op_class == NO_REGS)
922 /* Although we don't need insn to reload from
923 memory, still accessing memory is usually more
924 expensive than a register. */
925 pp->mem_cost = frequency;
926 else
927 /* If the alternative actually allows memory, make
928 things a bit cheaper since we won't need an
929 extra insn to load it. */
930 pp->mem_cost
931 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
932 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
933 - allows_mem[i]) * frequency;
934 /* If we have assigned a class to this allocno in
935 our first pass, add a cost to this alternative
936 corresponding to what we would add if this
937 allocno were not in the appropriate class. */
938 if (pref)
940 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
942 if (pref_class == NO_REGS)
944 if (op_class != NO_REGS)
945 alt_cost
946 += ((out_p
947 ? ira_memory_move_cost[mode][op_class][0]
948 : 0)
949 + (in_p
950 ? ira_memory_move_cost[mode][op_class][1]
951 : 0));
953 else if (op_class == NO_REGS)
954 alt_cost
955 += ((out_p
956 ? ira_memory_move_cost[mode][pref_class][1]
957 : 0)
958 + (in_p
959 ? ira_memory_move_cost[mode][pref_class][0]
960 : 0));
961 else if (ira_reg_class_intersect[pref_class][op_class]
962 == NO_REGS)
963 alt_cost += (ira_register_move_cost
964 [mode][pref_class][op_class]);
969 /* Otherwise, if this alternative wins, either because we
970 have already determined that or if we have a hard
971 register of the proper class, there is no cost for this
972 alternative. */
973 else if (win || (REG_P (op)
974 && reg_fits_class_p (op, classes[i],
975 0, GET_MODE (op))))
978 /* If registers are valid, the cost of this alternative
979 includes copying the object to and/or from a
980 register. */
981 else if (classes[i] != NO_REGS)
983 if (recog_data.operand_type[i] != OP_OUT)
984 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
986 if (recog_data.operand_type[i] != OP_IN)
987 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
989 /* The only other way this alternative can be used is if
990 this is a constant that could be placed into memory. */
991 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
992 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
993 else
994 alt_fail = 1;
997 if (alt_fail)
998 continue;
1000 op_cost_add = alt_cost * frequency;
1001 /* Finally, update the costs with the information we've
1002 calculated about this alternative. */
1003 for (i = 0; i < n_ops; i++)
1004 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1006 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1007 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1008 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1009 cost_classes_t cost_classes_ptr
1010 = regno_cost_classes[REGNO (ops[i])];
1012 pp->mem_cost = MIN (pp->mem_cost,
1013 (qq->mem_cost + op_cost_add) * scale);
1015 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1016 pp_costs[k]
1017 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1021 if (allocno_p)
1022 for (i = 0; i < n_ops; i++)
1024 ira_allocno_t a;
1025 rtx op = ops[i];
1027 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1028 continue;
1029 a = ira_curr_regno_allocno_map [REGNO (op)];
1030 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1031 ALLOCNO_BAD_SPILL_P (a) = true;
1038 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1039 static inline bool
1040 ok_for_index_p_nonstrict (rtx reg)
1042 unsigned regno = REGNO (reg);
1044 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1047 /* A version of regno_ok_for_base_p for use here, when all
1048 pseudo-registers should count as OK. Arguments as for
1049 regno_ok_for_base_p. */
1050 static inline bool
1051 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1052 enum rtx_code outer_code, enum rtx_code index_code)
1054 unsigned regno = REGNO (reg);
1056 if (regno >= FIRST_PSEUDO_REGISTER)
1057 return true;
1058 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1061 /* Record the pseudo registers we must reload into hard registers in a
1062 subexpression of a memory address, X.
1064 If CONTEXT is 0, we are looking at the base part of an address,
1065 otherwise we are looking at the index part.
1067 MODE and AS are the mode and address space of the memory reference;
1068 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1069 These four arguments are passed down to base_reg_class.
1071 SCALE is twice the amount to multiply the cost by (it is twice so
1072 we can represent half-cost adjustments). */
1073 static void
1074 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1075 int context, enum rtx_code outer_code,
1076 enum rtx_code index_code, int scale)
1078 enum rtx_code code = GET_CODE (x);
1079 enum reg_class rclass;
1081 if (context == 1)
1082 rclass = INDEX_REG_CLASS;
1083 else
1084 rclass = base_reg_class (mode, as, outer_code, index_code);
1086 switch (code)
1088 case CONST_INT:
1089 case CONST:
1090 case CC0:
1091 case PC:
1092 case SYMBOL_REF:
1093 case LABEL_REF:
1094 return;
1096 case PLUS:
1097 /* When we have an address that is a sum, we must determine
1098 whether registers are "base" or "index" regs. If there is a
1099 sum of two registers, we must choose one to be the "base".
1100 Luckily, we can use the REG_POINTER to make a good choice
1101 most of the time. We only need to do this on machines that
1102 can have two registers in an address and where the base and
1103 index register classes are different.
1105 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1106 but that seems bogus since it should only be set when we are
1107 sure the register is being used as a pointer. */
1109 rtx arg0 = XEXP (x, 0);
1110 rtx arg1 = XEXP (x, 1);
1111 enum rtx_code code0 = GET_CODE (arg0);
1112 enum rtx_code code1 = GET_CODE (arg1);
1114 /* Look inside subregs. */
1115 if (code0 == SUBREG)
1116 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1117 if (code1 == SUBREG)
1118 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1120 /* If this machine only allows one register per address, it
1121 must be in the first operand. */
1122 if (MAX_REGS_PER_ADDRESS == 1)
1123 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1125 /* If index and base registers are the same on this machine,
1126 just record registers in any non-constant operands. We
1127 assume here, as well as in the tests below, that all
1128 addresses are in canonical form. */
1129 else if (INDEX_REG_CLASS
1130 == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1132 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1133 if (! CONSTANT_P (arg1))
1134 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1137 /* If the second operand is a constant integer, it doesn't
1138 change what class the first operand must be. */
1139 else if (CONST_SCALAR_INT_P (arg1))
1140 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1141 /* If the second operand is a symbolic constant, the first
1142 operand must be an index register. */
1143 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1144 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1145 /* If both operands are registers but one is already a hard
1146 register of index or reg-base class, give the other the
1147 class that the hard register is not. */
1148 else if (code0 == REG && code1 == REG
1149 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1150 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1151 || ok_for_index_p_nonstrict (arg0)))
1152 record_address_regs (mode, as, arg1,
1153 ok_for_base_p_nonstrict (arg0, mode, as,
1154 PLUS, REG) ? 1 : 0,
1155 PLUS, REG, scale);
1156 else if (code0 == REG && code1 == REG
1157 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1158 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1159 || ok_for_index_p_nonstrict (arg1)))
1160 record_address_regs (mode, as, arg0,
1161 ok_for_base_p_nonstrict (arg1, mode, as,
1162 PLUS, REG) ? 1 : 0,
1163 PLUS, REG, scale);
1164 /* If one operand is known to be a pointer, it must be the
1165 base with the other operand the index. Likewise if the
1166 other operand is a MULT. */
1167 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1169 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1170 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1172 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1174 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1175 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1177 /* Otherwise, count equal chances that each might be a base or
1178 index register. This case should be rare. */
1179 else
1181 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1182 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1183 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1184 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1187 break;
1189 /* Double the importance of an allocno that is incremented or
1190 decremented, since it would take two extra insns if it ends
1191 up in the wrong place. */
1192 case POST_MODIFY:
1193 case PRE_MODIFY:
1194 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1195 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1196 if (REG_P (XEXP (XEXP (x, 1), 1)))
1197 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1198 2 * scale);
1199 break;
1201 case POST_INC:
1202 case PRE_INC:
1203 case POST_DEC:
1204 case PRE_DEC:
1205 /* Double the importance of an allocno that is incremented or
1206 decremented, since it would take two extra insns if it ends
1207 up in the wrong place. */
1208 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1209 break;
1211 case REG:
1213 struct costs *pp;
1214 int *pp_costs;
1215 enum reg_class i;
1216 int k, regno, add_cost;
1217 cost_classes_t cost_classes_ptr;
1218 enum reg_class *cost_classes;
1219 move_table *move_in_cost;
1221 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1222 break;
1224 regno = REGNO (x);
1225 if (allocno_p)
1226 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1227 pp = COSTS (costs, COST_INDEX (regno));
1228 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1229 if (INT_MAX - add_cost < pp->mem_cost)
1230 pp->mem_cost = INT_MAX;
1231 else
1232 pp->mem_cost += add_cost;
1233 cost_classes_ptr = regno_cost_classes[regno];
1234 cost_classes = cost_classes_ptr->classes;
1235 pp_costs = pp->cost;
1236 ira_init_register_move_cost_if_necessary (Pmode);
1237 move_in_cost = ira_may_move_in_cost[Pmode];
1238 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1240 i = cost_classes[k];
1241 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1242 if (INT_MAX - add_cost < pp_costs[k])
1243 pp_costs[k] = INT_MAX;
1244 else
1245 pp_costs[k] += add_cost;
1248 break;
1250 default:
1252 const char *fmt = GET_RTX_FORMAT (code);
1253 int i;
1254 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1255 if (fmt[i] == 'e')
1256 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1257 scale);
1264 /* Calculate the costs of insn operands. */
1265 static void
1266 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1268 const char *constraints[MAX_RECOG_OPERANDS];
1269 machine_mode modes[MAX_RECOG_OPERANDS];
1270 rtx ops[MAX_RECOG_OPERANDS];
1271 rtx set;
1272 int i;
1274 for (i = 0; i < recog_data.n_operands; i++)
1276 constraints[i] = recog_data.constraints[i];
1277 modes[i] = recog_data.operand_mode[i];
1280 /* If we get here, we are set up to record the costs of all the
1281 operands for this insn. Start by initializing the costs. Then
1282 handle any address registers. Finally record the desired classes
1283 for any allocnos, doing it twice if some pair of operands are
1284 commutative. */
1285 for (i = 0; i < recog_data.n_operands; i++)
1287 memcpy (op_costs[i], init_cost, struct_costs_size);
1289 ops[i] = recog_data.operand[i];
1290 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1291 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1293 if (MEM_P (recog_data.operand[i]))
1294 record_address_regs (GET_MODE (recog_data.operand[i]),
1295 MEM_ADDR_SPACE (recog_data.operand[i]),
1296 XEXP (recog_data.operand[i], 0),
1297 0, MEM, SCRATCH, frequency * 2);
1298 else if (constraints[i][0] == 'p'
1299 || (insn_extra_address_constraint
1300 (lookup_constraint (constraints[i]))))
1301 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1302 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1303 frequency * 2);
1306 /* Check for commutative in a separate loop so everything will have
1307 been initialized. We must do this even if one operand is a
1308 constant--see addsi3 in m68k.md. */
1309 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1310 if (constraints[i][0] == '%')
1312 const char *xconstraints[MAX_RECOG_OPERANDS];
1313 int j;
1315 /* Handle commutative operands by swapping the constraints.
1316 We assume the modes are the same. */
1317 for (j = 0; j < recog_data.n_operands; j++)
1318 xconstraints[j] = constraints[j];
1320 xconstraints[i] = constraints[i+1];
1321 xconstraints[i+1] = constraints[i];
1322 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1323 recog_data.operand, modes,
1324 xconstraints, insn, pref);
1326 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1327 recog_data.operand, modes,
1328 constraints, insn, pref);
1330 /* If this insn is a single set copying operand 1 to operand 0 and
1331 one operand is an allocno with the other a hard reg or an allocno
1332 that prefers a hard register that is in its own register class
1333 then we may want to adjust the cost of that register class to -1.
1335 Avoid the adjustment if the source does not die to avoid
1336 stressing of register allocator by preferencing two colliding
1337 registers into single class.
1339 Also avoid the adjustment if a copy between hard registers of the
1340 class is expensive (ten times the cost of a default copy is
1341 considered arbitrarily expensive). This avoids losing when the
1342 preferred class is very expensive as the source of a copy
1343 instruction. */
1344 if ((set = single_set (insn)) != NULL_RTX
1345 /* In rare cases the single set insn might have less 2 operands
1346 as the source can be a fixed special reg. */
1347 && recog_data.n_operands > 1
1348 && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set))
1350 int regno, other_regno;
1351 rtx dest = SET_DEST (set);
1352 rtx src = SET_SRC (set);
1354 if (GET_CODE (dest) == SUBREG
1355 && (GET_MODE_SIZE (GET_MODE (dest))
1356 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1357 dest = SUBREG_REG (dest);
1358 if (GET_CODE (src) == SUBREG
1359 && (GET_MODE_SIZE (GET_MODE (src))
1360 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1361 src = SUBREG_REG (src);
1362 if (REG_P (src) && REG_P (dest)
1363 && find_regno_note (insn, REG_DEAD, REGNO (src))
1364 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1365 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1366 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1367 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1369 machine_mode mode = GET_MODE (src);
1370 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1371 enum reg_class *cost_classes = cost_classes_ptr->classes;
1372 reg_class_t rclass;
1373 int k, nr;
1375 i = regno == (int) REGNO (src) ? 1 : 0;
1376 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1378 rclass = cost_classes[k];
1379 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1380 && (reg_class_size[(int) rclass]
1381 == ira_reg_class_max_nregs [(int) rclass][(int) mode]))
1383 if (reg_class_size[rclass] == 1)
1384 op_costs[i]->cost[k] = -frequency;
1385 else
1387 for (nr = 0;
1388 nr < hard_regno_nregs[other_regno][mode];
1389 nr++)
1390 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass],
1391 other_regno + nr))
1392 break;
1394 if (nr == hard_regno_nregs[other_regno][mode])
1395 op_costs[i]->cost[k] = -frequency;
1405 /* Process one insn INSN. Scan it and record each time it would save
1406 code to put a certain allocnos in a certain class. Return the last
1407 insn processed, so that the scan can be continued from there. */
1408 static rtx_insn *
1409 scan_one_insn (rtx_insn *insn)
1411 enum rtx_code pat_code;
1412 rtx set, note;
1413 int i, k;
1414 bool counted_mem;
1416 if (!NONDEBUG_INSN_P (insn))
1417 return insn;
1419 pat_code = GET_CODE (PATTERN (insn));
1420 if (pat_code == USE || pat_code == CLOBBER || pat_code == ASM_INPUT)
1421 return insn;
1423 counted_mem = false;
1424 set = single_set (insn);
1425 extract_insn (insn);
1427 /* If this insn loads a parameter from its stack slot, then it
1428 represents a savings, rather than a cost, if the parameter is
1429 stored in memory. Record this fact.
1431 Similarly if we're loading other constants from memory (constant
1432 pool, TOC references, small data areas, etc) and this is the only
1433 assignment to the destination pseudo.
1435 Don't do this if SET_SRC (set) isn't a general operand, if it is
1436 a memory requiring special instructions to load it, decreasing
1437 mem_cost might result in it being loaded using the specialized
1438 instruction into a register, then stored into stack and loaded
1439 again from the stack. See PR52208.
1441 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1442 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1443 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1444 && ((MEM_P (XEXP (note, 0))
1445 && !side_effects_p (SET_SRC (set)))
1446 || (CONSTANT_P (XEXP (note, 0))
1447 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1448 XEXP (note, 0))
1449 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1450 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set))))
1452 enum reg_class cl = GENERAL_REGS;
1453 rtx reg = SET_DEST (set);
1454 int num = COST_INDEX (REGNO (reg));
1456 COSTS (costs, num)->mem_cost
1457 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1458 record_address_regs (GET_MODE (SET_SRC (set)),
1459 MEM_ADDR_SPACE (SET_SRC (set)),
1460 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1461 frequency * 2);
1462 counted_mem = true;
1465 record_operand_costs (insn, pref);
1467 /* Now add the cost for each operand to the total costs for its
1468 allocno. */
1469 for (i = 0; i < recog_data.n_operands; i++)
1470 if (REG_P (recog_data.operand[i])
1471 && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER)
1473 int regno = REGNO (recog_data.operand[i]);
1474 struct costs *p = COSTS (costs, COST_INDEX (regno));
1475 struct costs *q = op_costs[i];
1476 int *p_costs = p->cost, *q_costs = q->cost;
1477 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1478 int add_cost;
1480 /* If the already accounted for the memory "cost" above, don't
1481 do so again. */
1482 if (!counted_mem)
1484 add_cost = q->mem_cost;
1485 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1486 p->mem_cost = INT_MAX;
1487 else
1488 p->mem_cost += add_cost;
1490 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1492 add_cost = q_costs[k];
1493 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1494 p_costs[k] = INT_MAX;
1495 else
1496 p_costs[k] += add_cost;
1500 return insn;
1505 /* Print allocnos costs to file F. */
1506 static void
1507 print_allocno_costs (FILE *f)
1509 int k;
1510 ira_allocno_t a;
1511 ira_allocno_iterator ai;
1513 ira_assert (allocno_p);
1514 fprintf (f, "\n");
1515 FOR_EACH_ALLOCNO (a, ai)
1517 int i, rclass;
1518 basic_block bb;
1519 int regno = ALLOCNO_REGNO (a);
1520 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1521 enum reg_class *cost_classes = cost_classes_ptr->classes;
1523 i = ALLOCNO_NUM (a);
1524 fprintf (f, " a%d(r%d,", i, regno);
1525 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1526 fprintf (f, "b%d", bb->index);
1527 else
1528 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1529 fprintf (f, ") costs:");
1530 for (k = 0; k < cost_classes_ptr->num; k++)
1532 rclass = cost_classes[k];
1533 fprintf (f, " %s:%d", reg_class_names[rclass],
1534 COSTS (costs, i)->cost[k]);
1535 if (flag_ira_region == IRA_REGION_ALL
1536 || flag_ira_region == IRA_REGION_MIXED)
1537 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1539 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1540 if (flag_ira_region == IRA_REGION_ALL
1541 || flag_ira_region == IRA_REGION_MIXED)
1542 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1543 fprintf (f, "\n");
1547 /* Print pseudo costs to file F. */
1548 static void
1549 print_pseudo_costs (FILE *f)
1551 int regno, k;
1552 int rclass;
1553 cost_classes_t cost_classes_ptr;
1554 enum reg_class *cost_classes;
1556 ira_assert (! allocno_p);
1557 fprintf (f, "\n");
1558 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1560 if (REG_N_REFS (regno) <= 0)
1561 continue;
1562 cost_classes_ptr = regno_cost_classes[regno];
1563 cost_classes = cost_classes_ptr->classes;
1564 fprintf (f, " r%d costs:", regno);
1565 for (k = 0; k < cost_classes_ptr->num; k++)
1567 rclass = cost_classes[k];
1568 fprintf (f, " %s:%d", reg_class_names[rclass],
1569 COSTS (costs, regno)->cost[k]);
1571 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1575 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1576 costs. */
1577 static void
1578 process_bb_for_costs (basic_block bb)
1580 rtx_insn *insn;
1582 frequency = REG_FREQ_FROM_BB (bb);
1583 if (frequency == 0)
1584 frequency = 1;
1585 FOR_BB_INSNS (bb, insn)
1586 insn = scan_one_insn (insn);
1589 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1590 costs. */
1591 static void
1592 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1594 basic_block bb;
1596 bb = loop_tree_node->bb;
1597 if (bb != NULL)
1598 process_bb_for_costs (bb);
1601 /* Find costs of register classes and memory for allocnos or pseudos
1602 and their best costs. Set up preferred, alternative and allocno
1603 classes for pseudos. */
1604 static void
1605 find_costs_and_classes (FILE *dump_file)
1607 int i, k, start, max_cost_classes_num;
1608 int pass;
1609 basic_block bb;
1610 enum reg_class *regno_best_class, new_class;
1612 init_recog ();
1613 regno_best_class
1614 = (enum reg_class *) ira_allocate (max_reg_num ()
1615 * sizeof (enum reg_class));
1616 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1617 regno_best_class[i] = NO_REGS;
1618 if (!resize_reg_info () && allocno_p
1619 && pseudo_classes_defined_p && flag_expensive_optimizations)
1621 ira_allocno_t a;
1622 ira_allocno_iterator ai;
1624 pref = pref_buffer;
1625 max_cost_classes_num = 1;
1626 FOR_EACH_ALLOCNO (a, ai)
1628 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1629 setup_regno_cost_classes_by_aclass
1630 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1631 max_cost_classes_num
1632 = MAX (max_cost_classes_num,
1633 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1635 start = 1;
1637 else
1639 pref = NULL;
1640 max_cost_classes_num = ira_important_classes_num;
1641 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1642 if (regno_reg_rtx[i] != NULL_RTX)
1643 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1644 else
1645 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1646 start = 0;
1648 if (allocno_p)
1649 /* Clear the flag for the next compiled function. */
1650 pseudo_classes_defined_p = false;
1651 /* Normally we scan the insns once and determine the best class to
1652 use for each allocno. However, if -fexpensive-optimizations are
1653 on, we do so twice, the second time using the tentative best
1654 classes to guide the selection. */
1655 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1657 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1658 fprintf (dump_file,
1659 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1661 if (pass != start)
1663 max_cost_classes_num = 1;
1664 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1666 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1667 max_cost_classes_num
1668 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1672 struct_costs_size
1673 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1674 /* Zero out our accumulation of the cost of each class for each
1675 allocno. */
1676 memset (costs, 0, cost_elements_num * struct_costs_size);
1678 if (allocno_p)
1680 /* Scan the instructions and record each time it would save code
1681 to put a certain allocno in a certain class. */
1682 ira_traverse_loop_tree (true, ira_loop_tree_root,
1683 process_bb_node_for_costs, NULL);
1685 memcpy (total_allocno_costs, costs,
1686 max_struct_costs_size * ira_allocnos_num);
1688 else
1690 basic_block bb;
1692 FOR_EACH_BB_FN (bb, cfun)
1693 process_bb_for_costs (bb);
1696 if (pass == 0)
1697 pref = pref_buffer;
1699 /* Now for each allocno look at how desirable each class is and
1700 find which class is preferred. */
1701 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1703 ira_allocno_t a, parent_a;
1704 int rclass, a_num, parent_a_num, add_cost;
1705 ira_loop_tree_node_t parent;
1706 int best_cost, allocno_cost;
1707 enum reg_class best, alt_class;
1708 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1709 enum reg_class *cost_classes = cost_classes_ptr->classes;
1710 int *i_costs = temp_costs->cost;
1711 int i_mem_cost;
1712 int equiv_savings = regno_equiv_gains[i];
1714 if (! allocno_p)
1716 if (regno_reg_rtx[i] == NULL_RTX)
1717 continue;
1718 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1719 i_mem_cost = temp_costs->mem_cost;
1721 else
1723 if (ira_regno_allocno_map[i] == NULL)
1724 continue;
1725 memset (temp_costs, 0, struct_costs_size);
1726 i_mem_cost = 0;
1727 /* Find cost of all allocnos with the same regno. */
1728 for (a = ira_regno_allocno_map[i];
1729 a != NULL;
1730 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1732 int *a_costs, *p_costs;
1734 a_num = ALLOCNO_NUM (a);
1735 if ((flag_ira_region == IRA_REGION_ALL
1736 || flag_ira_region == IRA_REGION_MIXED)
1737 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1738 && (parent_a = parent->regno_allocno_map[i]) != NULL
1739 /* There are no caps yet. */
1740 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1741 (a)->border_allocnos,
1742 ALLOCNO_NUM (a)))
1744 /* Propagate costs to upper levels in the region
1745 tree. */
1746 parent_a_num = ALLOCNO_NUM (parent_a);
1747 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1748 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1749 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1751 add_cost = a_costs[k];
1752 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1753 p_costs[k] = INT_MAX;
1754 else
1755 p_costs[k] += add_cost;
1757 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1758 if (add_cost > 0
1759 && (INT_MAX - add_cost
1760 < COSTS (total_allocno_costs,
1761 parent_a_num)->mem_cost))
1762 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1763 = INT_MAX;
1764 else
1765 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1766 += add_cost;
1768 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1769 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1771 a_costs = COSTS (costs, a_num)->cost;
1772 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1774 add_cost = a_costs[k];
1775 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1776 i_costs[k] = INT_MAX;
1777 else
1778 i_costs[k] += add_cost;
1780 add_cost = COSTS (costs, a_num)->mem_cost;
1781 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1782 i_mem_cost = INT_MAX;
1783 else
1784 i_mem_cost += add_cost;
1787 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1788 i_mem_cost = 0;
1789 else if (equiv_savings < 0)
1790 i_mem_cost = -equiv_savings;
1791 else if (equiv_savings > 0)
1793 i_mem_cost = 0;
1794 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1795 i_costs[k] += equiv_savings;
1798 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1799 best = ALL_REGS;
1800 alt_class = NO_REGS;
1801 /* Find best common class for all allocnos with the same
1802 regno. */
1803 for (k = 0; k < cost_classes_ptr->num; k++)
1805 rclass = cost_classes[k];
1806 if (i_costs[k] < best_cost)
1808 best_cost = i_costs[k];
1809 best = (enum reg_class) rclass;
1811 else if (i_costs[k] == best_cost)
1812 best = ira_reg_class_subunion[best][rclass];
1813 if (pass == flag_expensive_optimizations
1814 /* We still prefer registers to memory even at this
1815 stage if their costs are the same. We will make
1816 a final decision during assigning hard registers
1817 when we have all info including more accurate
1818 costs which might be affected by assigning hard
1819 registers to other pseudos because the pseudos
1820 involved in moves can be coalesced. */
1821 && i_costs[k] <= i_mem_cost
1822 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1823 > reg_class_size[alt_class]))
1824 alt_class = reg_class_subunion[alt_class][rclass];
1826 alt_class = ira_allocno_class_translate[alt_class];
1827 if (best_cost > i_mem_cost
1828 && ! non_spilled_static_chain_regno_p (i))
1829 regno_aclass[i] = NO_REGS;
1830 else if (!optimize && !targetm.class_likely_spilled_p (best))
1831 /* Registers in the alternative class are likely to need
1832 longer or slower sequences than registers in the best class.
1833 When optimizing we make some effort to use the best class
1834 over the alternative class where possible, but at -O0 we
1835 effectively give the alternative class equal weight.
1836 We then run the risk of using slower alternative registers
1837 when plenty of registers from the best class are still free.
1838 This is especially true because live ranges tend to be very
1839 short in -O0 code and so register pressure tends to be low.
1841 Avoid that by ignoring the alternative class if the best
1842 class has plenty of registers. */
1843 regno_aclass[i] = best;
1844 else
1846 /* Make the common class the biggest class of best and
1847 alt_class. */
1848 regno_aclass[i]
1849 = ira_reg_class_superunion[best][alt_class];
1850 ira_assert (regno_aclass[i] != NO_REGS
1851 && ira_reg_allocno_class_p[regno_aclass[i]]);
1853 if ((new_class
1854 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1855 (i, regno_aclass[i]))) != regno_aclass[i])
1857 regno_aclass[i] = new_class;
1858 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1859 reg_class_contents[best]))
1860 best = new_class;
1861 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1862 reg_class_contents[alt_class]))
1863 alt_class = new_class;
1865 if (pass == flag_expensive_optimizations)
1867 if (best_cost > i_mem_cost
1868 /* Do not assign NO_REGS to static chain pointer
1869 pseudo when non-local goto is used. */
1870 && ! non_spilled_static_chain_regno_p (i))
1871 best = alt_class = NO_REGS;
1872 else if (best == alt_class)
1873 alt_class = NO_REGS;
1874 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1875 if ((!allocno_p || internal_flag_ira_verbose > 2)
1876 && dump_file != NULL)
1877 fprintf (dump_file,
1878 " r%d: preferred %s, alternative %s, allocno %s\n",
1879 i, reg_class_names[best], reg_class_names[alt_class],
1880 reg_class_names[regno_aclass[i]]);
1882 regno_best_class[i] = best;
1883 if (! allocno_p)
1885 pref[i] = (best_cost > i_mem_cost
1886 && ! non_spilled_static_chain_regno_p (i)
1887 ? NO_REGS : best);
1888 continue;
1890 for (a = ira_regno_allocno_map[i];
1891 a != NULL;
1892 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1894 enum reg_class aclass = regno_aclass[i];
1895 int a_num = ALLOCNO_NUM (a);
1896 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1897 int *a_costs = COSTS (costs, a_num)->cost;
1899 if (aclass == NO_REGS)
1900 best = NO_REGS;
1901 else
1903 /* Finding best class which is subset of the common
1904 class. */
1905 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1906 allocno_cost = best_cost;
1907 best = ALL_REGS;
1908 for (k = 0; k < cost_classes_ptr->num; k++)
1910 rclass = cost_classes[k];
1911 if (! ira_class_subset_p[rclass][aclass])
1912 continue;
1913 if (total_a_costs[k] < best_cost)
1915 best_cost = total_a_costs[k];
1916 allocno_cost = a_costs[k];
1917 best = (enum reg_class) rclass;
1919 else if (total_a_costs[k] == best_cost)
1921 best = ira_reg_class_subunion[best][rclass];
1922 allocno_cost = MAX (allocno_cost, a_costs[k]);
1925 ALLOCNO_CLASS_COST (a) = allocno_cost;
1927 if (internal_flag_ira_verbose > 2 && dump_file != NULL
1928 && (pass == 0 || pref[a_num] != best))
1930 fprintf (dump_file, " a%d (r%d,", a_num, i);
1931 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1932 fprintf (dump_file, "b%d", bb->index);
1933 else
1934 fprintf (dump_file, "l%d",
1935 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1936 fprintf (dump_file, ") best %s, allocno %s\n",
1937 reg_class_names[best],
1938 reg_class_names[aclass]);
1940 pref[a_num] = best;
1941 if (pass == flag_expensive_optimizations && best != aclass
1942 && ira_class_hard_regs_num[best] > 0
1943 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
1944 >= ira_class_hard_regs_num[best]))
1946 int ind = cost_classes_ptr->index[aclass];
1948 ira_assert (ind >= 0);
1949 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
1950 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
1951 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
1952 / (ira_register_move_cost
1953 [ALLOCNO_MODE (a)][best][aclass]));
1954 for (k = 0; k < cost_classes_ptr->num; k++)
1955 if (ira_class_subset_p[cost_classes[k]][best])
1956 a_costs[k] = a_costs[ind];
1961 if (internal_flag_ira_verbose > 4 && dump_file)
1963 if (allocno_p)
1964 print_allocno_costs (dump_file);
1965 else
1966 print_pseudo_costs (dump_file);
1967 fprintf (dump_file,"\n");
1970 ira_free (regno_best_class);
1975 /* Process moves involving hard regs to modify allocno hard register
1976 costs. We can do this only after determining allocno class. If a
1977 hard register forms a register class, then moves with the hard
1978 register are already taken into account in class costs for the
1979 allocno. */
1980 static void
1981 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
1983 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
1984 bool to_p;
1985 ira_allocno_t a, curr_a;
1986 ira_loop_tree_node_t curr_loop_tree_node;
1987 enum reg_class rclass;
1988 basic_block bb;
1989 rtx_insn *insn;
1990 rtx set, src, dst;
1992 bb = loop_tree_node->bb;
1993 if (bb == NULL)
1994 return;
1995 freq = REG_FREQ_FROM_BB (bb);
1996 if (freq == 0)
1997 freq = 1;
1998 FOR_BB_INSNS (bb, insn)
2000 if (!NONDEBUG_INSN_P (insn))
2001 continue;
2002 set = single_set (insn);
2003 if (set == NULL_RTX)
2004 continue;
2005 dst = SET_DEST (set);
2006 src = SET_SRC (set);
2007 if (! REG_P (dst) || ! REG_P (src))
2008 continue;
2009 dst_regno = REGNO (dst);
2010 src_regno = REGNO (src);
2011 if (dst_regno >= FIRST_PSEUDO_REGISTER
2012 && src_regno < FIRST_PSEUDO_REGISTER)
2014 hard_regno = src_regno;
2015 a = ira_curr_regno_allocno_map[dst_regno];
2016 to_p = true;
2018 else if (src_regno >= FIRST_PSEUDO_REGISTER
2019 && dst_regno < FIRST_PSEUDO_REGISTER)
2021 hard_regno = dst_regno;
2022 a = ira_curr_regno_allocno_map[src_regno];
2023 to_p = false;
2025 else
2026 continue;
2027 rclass = ALLOCNO_CLASS (a);
2028 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2029 continue;
2030 i = ira_class_hard_reg_index[rclass][hard_regno];
2031 if (i < 0)
2032 continue;
2033 a_regno = ALLOCNO_REGNO (a);
2034 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2035 curr_loop_tree_node != NULL;
2036 curr_loop_tree_node = curr_loop_tree_node->parent)
2037 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2038 ira_add_allocno_pref (curr_a, hard_regno, freq);
2040 int cost;
2041 enum reg_class hard_reg_class;
2042 machine_mode mode;
2044 mode = ALLOCNO_MODE (a);
2045 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2046 ira_init_register_move_cost_if_necessary (mode);
2047 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2048 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2049 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2050 ALLOCNO_CLASS_COST (a));
2051 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2052 rclass, 0);
2053 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2054 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2055 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2056 ALLOCNO_HARD_REG_COSTS (a)[i]);
2061 /* After we find hard register and memory costs for allocnos, define
2062 its class and modify hard register cost because insns moving
2063 allocno to/from hard registers. */
2064 static void
2065 setup_allocno_class_and_costs (void)
2067 int i, j, n, regno, hard_regno, num;
2068 int *reg_costs;
2069 enum reg_class aclass, rclass;
2070 ira_allocno_t a;
2071 ira_allocno_iterator ai;
2072 cost_classes_t cost_classes_ptr;
2074 ira_assert (allocno_p);
2075 FOR_EACH_ALLOCNO (a, ai)
2077 i = ALLOCNO_NUM (a);
2078 regno = ALLOCNO_REGNO (a);
2079 aclass = regno_aclass[regno];
2080 cost_classes_ptr = regno_cost_classes[regno];
2081 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2082 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2083 ira_set_allocno_class (a, aclass);
2084 if (aclass == NO_REGS)
2085 continue;
2086 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2088 n = ira_class_hard_regs_num[aclass];
2089 ALLOCNO_HARD_REG_COSTS (a)
2090 = reg_costs = ira_allocate_cost_vector (aclass);
2091 for (j = n - 1; j >= 0; j--)
2093 hard_regno = ira_class_hard_regs[aclass][j];
2094 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2095 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2096 else
2098 rclass = REGNO_REG_CLASS (hard_regno);
2099 num = cost_classes_ptr->index[rclass];
2100 if (num < 0)
2102 num = cost_classes_ptr->hard_regno_index[hard_regno];
2103 ira_assert (num >= 0);
2105 reg_costs[j] = COSTS (costs, i)->cost[num];
2110 if (optimize)
2111 ira_traverse_loop_tree (true, ira_loop_tree_root,
2112 process_bb_node_for_hard_reg_moves, NULL);
2117 /* Function called once during compiler work. */
2118 void
2119 ira_init_costs_once (void)
2121 int i;
2123 init_cost = NULL;
2124 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2126 op_costs[i] = NULL;
2127 this_op_costs[i] = NULL;
2129 temp_costs = NULL;
2132 /* Free allocated temporary cost vectors. */
2133 void
2134 target_ira_int::free_ira_costs ()
2136 int i;
2138 free (x_init_cost);
2139 x_init_cost = NULL;
2140 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2142 free (x_op_costs[i]);
2143 free (x_this_op_costs[i]);
2144 x_op_costs[i] = x_this_op_costs[i] = NULL;
2146 free (x_temp_costs);
2147 x_temp_costs = NULL;
2150 /* This is called each time register related information is
2151 changed. */
2152 void
2153 ira_init_costs (void)
2155 int i;
2157 this_target_ira_int->free_ira_costs ();
2158 max_struct_costs_size
2159 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2160 /* Don't use ira_allocate because vectors live through several IRA
2161 calls. */
2162 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2163 init_cost->mem_cost = 1000000;
2164 for (i = 0; i < ira_important_classes_num; i++)
2165 init_cost->cost[i] = 1000000;
2166 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2168 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2169 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2171 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2176 /* Common initialization function for ira_costs and
2177 ira_set_pseudo_classes. */
2178 static void
2179 init_costs (void)
2181 init_subregs_of_mode ();
2182 costs = (struct costs *) ira_allocate (max_struct_costs_size
2183 * cost_elements_num);
2184 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2185 * cost_elements_num);
2186 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2187 * max_reg_num ());
2188 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2189 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2192 /* Common finalization function for ira_costs and
2193 ira_set_pseudo_classes. */
2194 static void
2195 finish_costs (void)
2197 finish_subregs_of_mode ();
2198 ira_free (regno_equiv_gains);
2199 ira_free (regno_aclass);
2200 ira_free (pref_buffer);
2201 ira_free (costs);
2204 /* Entry function which defines register class, memory and hard
2205 register costs for each allocno. */
2206 void
2207 ira_costs (void)
2209 allocno_p = true;
2210 cost_elements_num = ira_allocnos_num;
2211 init_costs ();
2212 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2213 * ira_allocnos_num);
2214 initiate_regno_cost_classes ();
2215 calculate_elim_costs_all_insns ();
2216 find_costs_and_classes (ira_dump_file);
2217 setup_allocno_class_and_costs ();
2218 finish_regno_cost_classes ();
2219 finish_costs ();
2220 ira_free (total_allocno_costs);
2223 /* Entry function which defines classes for pseudos.
2224 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2225 void
2226 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2228 allocno_p = false;
2229 internal_flag_ira_verbose = flag_ira_verbose;
2230 cost_elements_num = max_reg_num ();
2231 init_costs ();
2232 initiate_regno_cost_classes ();
2233 find_costs_and_classes (dump_file);
2234 finish_regno_cost_classes ();
2235 if (define_pseudo_classes)
2236 pseudo_classes_defined_p = true;
2238 finish_costs ();
2243 /* Change hard register costs for allocnos which lives through
2244 function calls. This is called only when we found all intersected
2245 calls during building allocno live ranges. */
2246 void
2247 ira_tune_allocno_costs (void)
2249 int j, n, regno;
2250 int cost, min_cost, *reg_costs;
2251 enum reg_class aclass, rclass;
2252 machine_mode mode;
2253 ira_allocno_t a;
2254 ira_allocno_iterator ai;
2255 ira_allocno_object_iterator oi;
2256 ira_object_t obj;
2257 bool skip_p;
2258 HARD_REG_SET *crossed_calls_clobber_regs;
2260 FOR_EACH_ALLOCNO (a, ai)
2262 aclass = ALLOCNO_CLASS (a);
2263 if (aclass == NO_REGS)
2264 continue;
2265 mode = ALLOCNO_MODE (a);
2266 n = ira_class_hard_regs_num[aclass];
2267 min_cost = INT_MAX;
2268 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2269 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2271 ira_allocate_and_set_costs
2272 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2273 ALLOCNO_CLASS_COST (a));
2274 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2275 for (j = n - 1; j >= 0; j--)
2277 regno = ira_class_hard_regs[aclass][j];
2278 skip_p = false;
2279 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2281 if (ira_hard_reg_set_intersection_p (regno, mode,
2282 OBJECT_CONFLICT_HARD_REGS
2283 (obj)))
2285 skip_p = true;
2286 break;
2289 if (skip_p)
2290 continue;
2291 rclass = REGNO_REG_CLASS (regno);
2292 cost = 0;
2293 crossed_calls_clobber_regs
2294 = &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2295 if (ira_hard_reg_set_intersection_p (regno, mode,
2296 *crossed_calls_clobber_regs)
2297 && (ira_hard_reg_set_intersection_p (regno, mode,
2298 call_used_reg_set)
2299 || HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
2300 cost += (ALLOCNO_CALL_FREQ (a)
2301 * (ira_memory_move_cost[mode][rclass][0]
2302 + ira_memory_move_cost[mode][rclass][1]));
2303 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2304 cost += ((ira_memory_move_cost[mode][rclass][0]
2305 + ira_memory_move_cost[mode][rclass][1])
2306 * ALLOCNO_FREQ (a)
2307 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2308 #endif
2309 if (INT_MAX - cost < reg_costs[j])
2310 reg_costs[j] = INT_MAX;
2311 else
2312 reg_costs[j] += cost;
2313 if (min_cost > reg_costs[j])
2314 min_cost = reg_costs[j];
2317 if (min_cost != INT_MAX)
2318 ALLOCNO_CLASS_COST (a) = min_cost;
2320 /* Some targets allow pseudos to be allocated to unaligned sequences
2321 of hard registers. However, selecting an unaligned sequence can
2322 unnecessarily restrict later allocations. So increase the cost of
2323 unaligned hard regs to encourage the use of aligned hard regs. */
2325 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2327 if (nregs > 1)
2329 ira_allocate_and_set_costs
2330 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2331 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2332 for (j = n - 1; j >= 0; j--)
2334 regno = ira_non_ordered_class_hard_regs[aclass][j];
2335 if ((regno % nregs) != 0)
2337 int index = ira_class_hard_reg_index[aclass][regno];
2338 ira_assert (index != -1);
2339 reg_costs[index] += ALLOCNO_FREQ (a);
2347 /* Add COST to the estimated gain for eliminating REGNO with its
2348 equivalence. If COST is zero, record that no such elimination is
2349 possible. */
2351 void
2352 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2354 if (cost == 0)
2355 regno_equiv_gains[regno] = 0;
2356 else
2357 regno_equiv_gains[regno] += cost;
2360 void
2361 ira_costs_c_finalize (void)
2363 this_target_ira_int->free_ira_costs ();