Fix nb_iterations calculation in tree-vect-loop-manip.c
[official-gcc.git] / gcc / ira-costs.c
blobbdd5cb5ceb6abaf7b3c7f3dd4e6c6ecde6190674
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2016 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "addresses.h"
36 #include "reload.h"
38 /* The flags is set up every time when we calculate pseudo register
39 classes through function ira_set_pseudo_classes. */
40 static bool pseudo_classes_defined_p = false;
42 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
43 static bool allocno_p;
45 /* Number of elements in array `costs'. */
46 static int cost_elements_num;
48 /* The `costs' struct records the cost of using hard registers of each
49 class considered for the calculation and of using memory for each
50 allocno or pseudo. */
51 struct costs
53 int mem_cost;
54 /* Costs for register classes start here. We process only some
55 allocno classes. */
56 int cost[1];
59 #define max_struct_costs_size \
60 (this_target_ira_int->x_max_struct_costs_size)
61 #define init_cost \
62 (this_target_ira_int->x_init_cost)
63 #define temp_costs \
64 (this_target_ira_int->x_temp_costs)
65 #define op_costs \
66 (this_target_ira_int->x_op_costs)
67 #define this_op_costs \
68 (this_target_ira_int->x_this_op_costs)
70 /* Costs of each class for each allocno or pseudo. */
71 static struct costs *costs;
73 /* Accumulated costs of each class for each allocno. */
74 static struct costs *total_allocno_costs;
76 /* It is the current size of struct costs. */
77 static int struct_costs_size;
79 /* Return pointer to structure containing costs of allocno or pseudo
80 with given NUM in array ARR. */
81 #define COSTS(arr, num) \
82 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
84 /* Return index in COSTS when processing reg with REGNO. */
85 #define COST_INDEX(regno) (allocno_p \
86 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
87 : (int) regno)
89 /* Record register class preferences of each allocno or pseudo. Null
90 value means no preferences. It happens on the 1st iteration of the
91 cost calculation. */
92 static enum reg_class *pref;
94 /* Allocated buffers for pref. */
95 static enum reg_class *pref_buffer;
97 /* Record allocno class of each allocno with the same regno. */
98 static enum reg_class *regno_aclass;
100 /* Record cost gains for not allocating a register with an invariant
101 equivalence. */
102 static int *regno_equiv_gains;
104 /* Execution frequency of the current insn. */
105 static int frequency;
109 /* Info about reg classes whose costs are calculated for a pseudo. */
110 struct cost_classes
112 /* Number of the cost classes in the subsequent array. */
113 int num;
114 /* Container of the cost classes. */
115 enum reg_class classes[N_REG_CLASSES];
116 /* Map reg class -> index of the reg class in the previous array.
117 -1 if it is not a cost class. */
118 int index[N_REG_CLASSES];
119 /* Map hard regno index of first class in array CLASSES containing
120 the hard regno, -1 otherwise. */
121 int hard_regno_index[FIRST_PSEUDO_REGISTER];
124 /* Types of pointers to the structure above. */
125 typedef struct cost_classes *cost_classes_t;
126 typedef const struct cost_classes *const_cost_classes_t;
128 /* Info about cost classes for each pseudo. */
129 static cost_classes_t *regno_cost_classes;
131 /* Helper for cost_classes hashing. */
133 struct cost_classes_hasher : pointer_hash <cost_classes>
135 static inline hashval_t hash (const cost_classes *);
136 static inline bool equal (const cost_classes *, const cost_classes *);
137 static inline void remove (cost_classes *);
140 /* Returns hash value for cost classes info HV. */
141 inline hashval_t
142 cost_classes_hasher::hash (const cost_classes *hv)
144 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
147 /* Compares cost classes info HV1 and HV2. */
148 inline bool
149 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
151 return (hv1->num == hv2->num
152 && memcmp (hv1->classes, hv2->classes,
153 sizeof (enum reg_class) * hv1->num) == 0);
156 /* Delete cost classes info V from the hash table. */
157 inline void
158 cost_classes_hasher::remove (cost_classes *v)
160 ira_free (v);
163 /* Hash table of unique cost classes. */
164 static hash_table<cost_classes_hasher> *cost_classes_htab;
166 /* Map allocno class -> cost classes for pseudo of given allocno
167 class. */
168 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
170 /* Map mode -> cost classes for pseudo of give mode. */
171 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
173 /* Cost classes that include all classes in ira_important_classes. */
174 static cost_classes all_cost_classes;
176 /* Use the array of classes in CLASSES_PTR to fill out the rest of
177 the structure. */
178 static void
179 complete_cost_classes (cost_classes_t classes_ptr)
181 for (int i = 0; i < N_REG_CLASSES; i++)
182 classes_ptr->index[i] = -1;
183 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
184 classes_ptr->hard_regno_index[i] = -1;
185 for (int i = 0; i < classes_ptr->num; i++)
187 enum reg_class cl = classes_ptr->classes[i];
188 classes_ptr->index[cl] = i;
189 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
191 unsigned int hard_regno = ira_class_hard_regs[cl][j];
192 if (classes_ptr->hard_regno_index[hard_regno] < 0)
193 classes_ptr->hard_regno_index[hard_regno] = i;
198 /* Initialize info about the cost classes for each pseudo. */
199 static void
200 initiate_regno_cost_classes (void)
202 int size = sizeof (cost_classes_t) * max_reg_num ();
204 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
205 memset (regno_cost_classes, 0, size);
206 memset (cost_classes_aclass_cache, 0,
207 sizeof (cost_classes_t) * N_REG_CLASSES);
208 memset (cost_classes_mode_cache, 0,
209 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
210 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
211 all_cost_classes.num = ira_important_classes_num;
212 for (int i = 0; i < ira_important_classes_num; i++)
213 all_cost_classes.classes[i] = ira_important_classes[i];
214 complete_cost_classes (&all_cost_classes);
217 /* Create new cost classes from cost classes FROM and set up members
218 index and hard_regno_index. Return the new classes. The function
219 implements some common code of two functions
220 setup_regno_cost_classes_by_aclass and
221 setup_regno_cost_classes_by_mode. */
222 static cost_classes_t
223 setup_cost_classes (cost_classes_t from)
225 cost_classes_t classes_ptr;
227 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
228 classes_ptr->num = from->num;
229 for (int i = 0; i < from->num; i++)
230 classes_ptr->classes[i] = from->classes[i];
231 complete_cost_classes (classes_ptr);
232 return classes_ptr;
235 /* Return a version of FULL that only considers registers in REGS that are
236 valid for mode MODE. Both FULL and the returned class are globally
237 allocated. */
238 static cost_classes_t
239 restrict_cost_classes (cost_classes_t full, machine_mode mode,
240 const HARD_REG_SET &regs)
242 static struct cost_classes narrow;
243 int map[N_REG_CLASSES];
244 narrow.num = 0;
245 for (int i = 0; i < full->num; i++)
247 /* Assume that we'll drop the class. */
248 map[i] = -1;
250 /* Ignore classes that are too small for the mode. */
251 enum reg_class cl = full->classes[i];
252 if (!contains_reg_of_mode[cl][mode])
253 continue;
255 /* Calculate the set of registers in CL that belong to REGS and
256 are valid for MODE. */
257 HARD_REG_SET valid_for_cl;
258 COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
259 AND_HARD_REG_SET (valid_for_cl, regs);
260 AND_COMPL_HARD_REG_SET (valid_for_cl,
261 ira_prohibited_class_mode_regs[cl][mode]);
262 AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
263 if (hard_reg_set_empty_p (valid_for_cl))
264 continue;
266 /* Don't use this class if the set of valid registers is a subset
267 of an existing class. For example, suppose we have two classes
268 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
269 that the mode changes allowed by FR_REGS are not as general as
270 the mode changes allowed by GR_REGS.
272 In this situation, the mode changes for GR_AND_FR_REGS could
273 either be seen as the union or the intersection of the mode
274 changes allowed by the two subclasses. The justification for
275 the union-based definition would be that, if you want a mode
276 change that's only allowed by GR_REGS, you can pick a register
277 from the GR_REGS subclass. The justification for the
278 intersection-based definition would be that every register
279 from the class would allow the mode change.
281 However, if we have a register that needs to be in GR_REGS,
282 using GR_AND_FR_REGS with the intersection-based definition
283 would be too pessimistic, since it would bring in restrictions
284 that only apply to FR_REGS. Conversely, if we have a register
285 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
286 union-based definition would lose the extra restrictions
287 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
288 for cases where GR_REGS and FP_REGS are both valid. */
289 int pos;
290 for (pos = 0; pos < narrow.num; ++pos)
292 enum reg_class cl2 = narrow.classes[pos];
293 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
294 break;
296 map[i] = pos;
297 if (pos == narrow.num)
299 /* If several classes are equivalent, prefer to use the one
300 that was chosen as the allocno class. */
301 enum reg_class cl2 = ira_allocno_class_translate[cl];
302 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
303 cl = cl2;
304 narrow.classes[narrow.num++] = cl;
307 if (narrow.num == full->num)
308 return full;
310 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
311 if (*slot == NULL)
313 cost_classes_t classes = setup_cost_classes (&narrow);
314 /* Map equivalent classes to the representative that we chose above. */
315 for (int i = 0; i < ira_important_classes_num; i++)
317 enum reg_class cl = ira_important_classes[i];
318 int index = full->index[cl];
319 if (index >= 0)
320 classes->index[cl] = map[index];
322 *slot = classes;
324 return *slot;
327 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
328 This function is used when we know an initial approximation of
329 allocno class of the pseudo already, e.g. on the second iteration
330 of class cost calculation or after class cost calculation in
331 register-pressure sensitive insn scheduling or register-pressure
332 sensitive loop-invariant motion. */
333 static void
334 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
336 static struct cost_classes classes;
337 cost_classes_t classes_ptr;
338 enum reg_class cl;
339 int i;
340 cost_classes **slot;
341 HARD_REG_SET temp, temp2;
342 bool exclude_p;
344 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
346 COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
347 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
348 /* We exclude classes from consideration which are subsets of
349 ACLASS only if ACLASS is an uniform class. */
350 exclude_p = ira_uniform_class_p[aclass];
351 classes.num = 0;
352 for (i = 0; i < ira_important_classes_num; i++)
354 cl = ira_important_classes[i];
355 if (exclude_p)
357 /* Exclude non-uniform classes which are subsets of
358 ACLASS. */
359 COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
360 AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
361 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
362 continue;
364 classes.classes[classes.num++] = cl;
366 slot = cost_classes_htab->find_slot (&classes, INSERT);
367 if (*slot == NULL)
369 classes_ptr = setup_cost_classes (&classes);
370 *slot = classes_ptr;
372 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
374 if (regno_reg_rtx[regno] != NULL_RTX)
376 /* Restrict the classes to those that are valid for REGNO's mode
377 (which might for example exclude singleton classes if the mode
378 requires two registers). Also restrict the classes to those that
379 are valid for subregs of REGNO. */
380 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
381 if (!valid_regs)
382 valid_regs = &reg_class_contents[ALL_REGS];
383 classes_ptr = restrict_cost_classes (classes_ptr,
384 PSEUDO_REGNO_MODE (regno),
385 *valid_regs);
387 regno_cost_classes[regno] = classes_ptr;
390 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
391 decrease number of cost classes for the pseudo, if hard registers
392 of some important classes can not hold a value of MODE. So the
393 pseudo can not get hard register of some important classes and cost
394 calculation for such important classes is only wasting CPU
395 time. */
396 static void
397 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
399 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
400 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
401 mode, *valid_regs);
402 else
404 if (cost_classes_mode_cache[mode] == NULL)
405 cost_classes_mode_cache[mode]
406 = restrict_cost_classes (&all_cost_classes, mode,
407 reg_class_contents[ALL_REGS]);
408 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
412 /* Finalize info about the cost classes for each pseudo. */
413 static void
414 finish_regno_cost_classes (void)
416 ira_free (regno_cost_classes);
417 delete cost_classes_htab;
418 cost_classes_htab = NULL;
423 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
424 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
425 be a pseudo register. */
426 static int
427 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
428 secondary_reload_info *prev_sri)
430 secondary_reload_info sri;
431 reg_class_t secondary_class = NO_REGS;
433 /* If X is a SCRATCH, there is actually nothing to move since we are
434 assuming optimal allocation. */
435 if (GET_CODE (x) == SCRATCH)
436 return 0;
438 /* Get the class we will actually use for a reload. */
439 rclass = targetm.preferred_reload_class (x, rclass);
441 /* If we need a secondary reload for an intermediate, the cost is
442 that to load the input into the intermediate register, then to
443 copy it. */
444 sri.prev_sri = prev_sri;
445 sri.extra_cost = 0;
446 /* PR 68770: Secondary reload might examine the t_icode field. */
447 sri.t_icode = CODE_FOR_nothing;
449 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
451 if (secondary_class != NO_REGS)
453 ira_init_register_move_cost_if_necessary (mode);
454 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
455 + sri.extra_cost
456 + copy_cost (x, mode, secondary_class, to_p, &sri));
459 /* For memory, use the memory move cost, for (hard) registers, use
460 the cost to move between the register classes, and use 2 for
461 everything else (constants). */
462 if (MEM_P (x) || rclass == NO_REGS)
463 return sri.extra_cost
464 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
465 else if (REG_P (x))
467 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
469 ira_init_register_move_cost_if_necessary (mode);
470 return (sri.extra_cost
471 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
473 else
474 /* If this is a constant, we may eventually want to call rtx_cost
475 here. */
476 return sri.extra_cost + COSTS_N_INSNS (1);
481 /* Record the cost of using memory or hard registers of various
482 classes for the operands in INSN.
484 N_ALTS is the number of alternatives.
485 N_OPS is the number of operands.
486 OPS is an array of the operands.
487 MODES are the modes of the operands, in case any are VOIDmode.
488 CONSTRAINTS are the constraints to use for the operands. This array
489 is modified by this procedure.
491 This procedure works alternative by alternative. For each
492 alternative we assume that we will be able to allocate all allocnos
493 to their ideal register class and calculate the cost of using that
494 alternative. Then we compute, for each operand that is a
495 pseudo-register, the cost of having the allocno allocated to each
496 register class and using it in that alternative. To this cost is
497 added the cost of the alternative.
499 The cost of each class for this insn is its lowest cost among all
500 the alternatives. */
501 static void
502 record_reg_classes (int n_alts, int n_ops, rtx *ops,
503 machine_mode *modes, const char **constraints,
504 rtx_insn *insn, enum reg_class *pref)
506 int alt;
507 int i, j, k;
508 int insn_allows_mem[MAX_RECOG_OPERANDS];
509 move_table *move_in_cost, *move_out_cost;
510 short (*mem_cost)[2];
512 for (i = 0; i < n_ops; i++)
513 insn_allows_mem[i] = 0;
515 /* Process each alternative, each time minimizing an operand's cost
516 with the cost for each operand in that alternative. */
517 alternative_mask preferred = get_preferred_alternatives (insn);
518 for (alt = 0; alt < n_alts; alt++)
520 enum reg_class classes[MAX_RECOG_OPERANDS];
521 int allows_mem[MAX_RECOG_OPERANDS];
522 enum reg_class rclass;
523 int alt_fail = 0;
524 int alt_cost = 0, op_cost_add;
526 if (!TEST_BIT (preferred, alt))
528 for (i = 0; i < recog_data.n_operands; i++)
529 constraints[i] = skip_alternative (constraints[i]);
531 continue;
534 for (i = 0; i < n_ops; i++)
536 unsigned char c;
537 const char *p = constraints[i];
538 rtx op = ops[i];
539 machine_mode mode = modes[i];
540 int allows_addr = 0;
541 int win = 0;
543 /* Initially show we know nothing about the register class. */
544 classes[i] = NO_REGS;
545 allows_mem[i] = 0;
547 /* If this operand has no constraints at all, we can
548 conclude nothing about it since anything is valid. */
549 if (*p == 0)
551 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
552 memset (this_op_costs[i], 0, struct_costs_size);
553 continue;
556 /* If this alternative is only relevant when this operand
557 matches a previous operand, we do different things
558 depending on whether this operand is a allocno-reg or not.
559 We must process any modifiers for the operand before we
560 can make this test. */
561 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
562 p++;
564 if (p[0] >= '0' && p[0] <= '0' + i)
566 /* Copy class and whether memory is allowed from the
567 matching alternative. Then perform any needed cost
568 computations and/or adjustments. */
569 j = p[0] - '0';
570 classes[i] = classes[j];
571 allows_mem[i] = allows_mem[j];
572 if (allows_mem[i])
573 insn_allows_mem[i] = 1;
575 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
577 /* If this matches the other operand, we have no
578 added cost and we win. */
579 if (rtx_equal_p (ops[j], op))
580 win = 1;
581 /* If we can put the other operand into a register,
582 add to the cost of this alternative the cost to
583 copy this operand to the register used for the
584 other operand. */
585 else if (classes[j] != NO_REGS)
587 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
588 win = 1;
591 else if (! REG_P (ops[j])
592 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
594 /* This op is an allocno but the one it matches is
595 not. */
597 /* If we can't put the other operand into a
598 register, this alternative can't be used. */
600 if (classes[j] == NO_REGS)
601 alt_fail = 1;
602 /* Otherwise, add to the cost of this alternative
603 the cost to copy the other operand to the hard
604 register used for this operand. */
605 else
606 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
608 else
610 /* The costs of this operand are not the same as the
611 other operand since move costs are not symmetric.
612 Moreover, if we cannot tie them, this alternative
613 needs to do a copy, which is one insn. */
614 struct costs *pp = this_op_costs[i];
615 int *pp_costs = pp->cost;
616 cost_classes_t cost_classes_ptr
617 = regno_cost_classes[REGNO (op)];
618 enum reg_class *cost_classes = cost_classes_ptr->classes;
619 bool in_p = recog_data.operand_type[i] != OP_OUT;
620 bool out_p = recog_data.operand_type[i] != OP_IN;
621 enum reg_class op_class = classes[i];
623 ira_init_register_move_cost_if_necessary (mode);
624 if (! in_p)
626 ira_assert (out_p);
627 if (op_class == NO_REGS)
629 mem_cost = ira_memory_move_cost[mode];
630 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
632 rclass = cost_classes[k];
633 pp_costs[k] = mem_cost[rclass][0] * frequency;
636 else
638 move_out_cost = ira_may_move_out_cost[mode];
639 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
641 rclass = cost_classes[k];
642 pp_costs[k]
643 = move_out_cost[op_class][rclass] * frequency;
647 else if (! out_p)
649 ira_assert (in_p);
650 if (op_class == NO_REGS)
652 mem_cost = ira_memory_move_cost[mode];
653 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
655 rclass = cost_classes[k];
656 pp_costs[k] = mem_cost[rclass][1] * frequency;
659 else
661 move_in_cost = ira_may_move_in_cost[mode];
662 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
664 rclass = cost_classes[k];
665 pp_costs[k]
666 = move_in_cost[rclass][op_class] * frequency;
670 else
672 if (op_class == NO_REGS)
674 mem_cost = ira_memory_move_cost[mode];
675 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
677 rclass = cost_classes[k];
678 pp_costs[k] = ((mem_cost[rclass][0]
679 + mem_cost[rclass][1])
680 * frequency);
683 else
685 move_in_cost = ira_may_move_in_cost[mode];
686 move_out_cost = ira_may_move_out_cost[mode];
687 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
689 rclass = cost_classes[k];
690 pp_costs[k] = ((move_in_cost[rclass][op_class]
691 + move_out_cost[op_class][rclass])
692 * frequency);
697 /* If the alternative actually allows memory, make
698 things a bit cheaper since we won't need an extra
699 insn to load it. */
700 pp->mem_cost
701 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
702 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
703 - allows_mem[i]) * frequency;
705 /* If we have assigned a class to this allocno in
706 our first pass, add a cost to this alternative
707 corresponding to what we would add if this
708 allocno were not in the appropriate class. */
709 if (pref)
711 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
713 if (pref_class == NO_REGS)
714 alt_cost
715 += ((out_p
716 ? ira_memory_move_cost[mode][op_class][0] : 0)
717 + (in_p
718 ? ira_memory_move_cost[mode][op_class][1]
719 : 0));
720 else if (ira_reg_class_intersect
721 [pref_class][op_class] == NO_REGS)
722 alt_cost
723 += ira_register_move_cost[mode][pref_class][op_class];
725 if (REGNO (ops[i]) != REGNO (ops[j])
726 && ! find_reg_note (insn, REG_DEAD, op))
727 alt_cost += 2;
729 p++;
733 /* Scan all the constraint letters. See if the operand
734 matches any of the constraints. Collect the valid
735 register classes and see if this operand accepts
736 memory. */
737 while ((c = *p))
739 switch (c)
741 case '*':
742 /* Ignore the next letter for this pass. */
743 c = *++p;
744 break;
746 case '^':
747 alt_cost += 2;
748 break;
750 case '?':
751 alt_cost += 2;
752 break;
754 case 'g':
755 if (MEM_P (op)
756 || (CONSTANT_P (op)
757 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
758 win = 1;
759 insn_allows_mem[i] = allows_mem[i] = 1;
760 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
761 break;
763 default:
764 enum constraint_num cn = lookup_constraint (p);
765 enum reg_class cl;
766 switch (get_constraint_type (cn))
768 case CT_REGISTER:
769 cl = reg_class_for_constraint (cn);
770 if (cl != NO_REGS)
771 classes[i] = ira_reg_class_subunion[classes[i]][cl];
772 break;
774 case CT_CONST_INT:
775 if (CONST_INT_P (op)
776 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
777 win = 1;
778 break;
780 case CT_MEMORY:
781 /* Every MEM can be reloaded to fit. */
782 insn_allows_mem[i] = allows_mem[i] = 1;
783 if (MEM_P (op))
784 win = 1;
785 break;
787 case CT_SPECIAL_MEMORY:
788 insn_allows_mem[i] = allows_mem[i] = 1;
789 if (MEM_P (op) && constraint_satisfied_p (op, cn))
790 win = 1;
791 break;
793 case CT_ADDRESS:
794 /* Every address can be reloaded to fit. */
795 allows_addr = 1;
796 if (address_operand (op, GET_MODE (op))
797 || constraint_satisfied_p (op, cn))
798 win = 1;
799 /* We know this operand is an address, so we
800 want it to be allocated to a hard register
801 that can be the base of an address,
802 i.e. BASE_REG_CLASS. */
803 classes[i]
804 = ira_reg_class_subunion[classes[i]]
805 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
806 ADDRESS, SCRATCH)];
807 break;
809 case CT_FIXED_FORM:
810 if (constraint_satisfied_p (op, cn))
811 win = 1;
812 break;
814 break;
816 p += CONSTRAINT_LEN (c, p);
817 if (c == ',')
818 break;
821 constraints[i] = p;
823 /* How we account for this operand now depends on whether it
824 is a pseudo register or not. If it is, we first check if
825 any register classes are valid. If not, we ignore this
826 alternative, since we want to assume that all allocnos get
827 allocated for register preferencing. If some register
828 class is valid, compute the costs of moving the allocno
829 into that class. */
830 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
832 if (classes[i] == NO_REGS && ! allows_mem[i])
834 /* We must always fail if the operand is a REG, but
835 we did not find a suitable class and memory is
836 not allowed.
838 Otherwise we may perform an uninitialized read
839 from this_op_costs after the `continue' statement
840 below. */
841 alt_fail = 1;
843 else
845 unsigned int regno = REGNO (op);
846 struct costs *pp = this_op_costs[i];
847 int *pp_costs = pp->cost;
848 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
849 enum reg_class *cost_classes = cost_classes_ptr->classes;
850 bool in_p = recog_data.operand_type[i] != OP_OUT;
851 bool out_p = recog_data.operand_type[i] != OP_IN;
852 enum reg_class op_class = classes[i];
854 ira_init_register_move_cost_if_necessary (mode);
855 if (! in_p)
857 ira_assert (out_p);
858 if (op_class == NO_REGS)
860 mem_cost = ira_memory_move_cost[mode];
861 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
863 rclass = cost_classes[k];
864 pp_costs[k] = mem_cost[rclass][0] * frequency;
867 else
869 move_out_cost = ira_may_move_out_cost[mode];
870 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
872 rclass = cost_classes[k];
873 pp_costs[k]
874 = move_out_cost[op_class][rclass] * frequency;
878 else if (! out_p)
880 ira_assert (in_p);
881 if (op_class == NO_REGS)
883 mem_cost = ira_memory_move_cost[mode];
884 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
886 rclass = cost_classes[k];
887 pp_costs[k] = mem_cost[rclass][1] * frequency;
890 else
892 move_in_cost = ira_may_move_in_cost[mode];
893 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
895 rclass = cost_classes[k];
896 pp_costs[k]
897 = move_in_cost[rclass][op_class] * frequency;
901 else
903 if (op_class == NO_REGS)
905 mem_cost = ira_memory_move_cost[mode];
906 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
908 rclass = cost_classes[k];
909 pp_costs[k] = ((mem_cost[rclass][0]
910 + mem_cost[rclass][1])
911 * frequency);
914 else
916 move_in_cost = ira_may_move_in_cost[mode];
917 move_out_cost = ira_may_move_out_cost[mode];
918 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
920 rclass = cost_classes[k];
921 pp_costs[k] = ((move_in_cost[rclass][op_class]
922 + move_out_cost[op_class][rclass])
923 * frequency);
928 if (op_class == NO_REGS)
929 /* Although we don't need insn to reload from
930 memory, still accessing memory is usually more
931 expensive than a register. */
932 pp->mem_cost = frequency;
933 else
934 /* If the alternative actually allows memory, make
935 things a bit cheaper since we won't need an
936 extra insn to load it. */
937 pp->mem_cost
938 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
939 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
940 - allows_mem[i]) * frequency;
941 /* If we have assigned a class to this allocno in
942 our first pass, add a cost to this alternative
943 corresponding to what we would add if this
944 allocno were not in the appropriate class. */
945 if (pref)
947 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
949 if (pref_class == NO_REGS)
951 if (op_class != NO_REGS)
952 alt_cost
953 += ((out_p
954 ? ira_memory_move_cost[mode][op_class][0]
955 : 0)
956 + (in_p
957 ? ira_memory_move_cost[mode][op_class][1]
958 : 0));
960 else if (op_class == NO_REGS)
961 alt_cost
962 += ((out_p
963 ? ira_memory_move_cost[mode][pref_class][1]
964 : 0)
965 + (in_p
966 ? ira_memory_move_cost[mode][pref_class][0]
967 : 0));
968 else if (ira_reg_class_intersect[pref_class][op_class]
969 == NO_REGS)
970 alt_cost += (ira_register_move_cost
971 [mode][pref_class][op_class]);
976 /* Otherwise, if this alternative wins, either because we
977 have already determined that or if we have a hard
978 register of the proper class, there is no cost for this
979 alternative. */
980 else if (win || (REG_P (op)
981 && reg_fits_class_p (op, classes[i],
982 0, GET_MODE (op))))
985 /* If registers are valid, the cost of this alternative
986 includes copying the object to and/or from a
987 register. */
988 else if (classes[i] != NO_REGS)
990 if (recog_data.operand_type[i] != OP_OUT)
991 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
993 if (recog_data.operand_type[i] != OP_IN)
994 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
996 /* The only other way this alternative can be used is if
997 this is a constant that could be placed into memory. */
998 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
999 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1000 else
1001 alt_fail = 1;
1004 if (alt_fail)
1005 continue;
1007 op_cost_add = alt_cost * frequency;
1008 /* Finally, update the costs with the information we've
1009 calculated about this alternative. */
1010 for (i = 0; i < n_ops; i++)
1011 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1013 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1014 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1015 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1016 cost_classes_t cost_classes_ptr
1017 = regno_cost_classes[REGNO (ops[i])];
1019 pp->mem_cost = MIN (pp->mem_cost,
1020 (qq->mem_cost + op_cost_add) * scale);
1022 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1023 pp_costs[k]
1024 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1028 if (allocno_p)
1029 for (i = 0; i < n_ops; i++)
1031 ira_allocno_t a;
1032 rtx op = ops[i];
1034 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1035 continue;
1036 a = ira_curr_regno_allocno_map [REGNO (op)];
1037 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1038 ALLOCNO_BAD_SPILL_P (a) = true;
1045 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1046 static inline bool
1047 ok_for_index_p_nonstrict (rtx reg)
1049 unsigned regno = REGNO (reg);
1051 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1054 /* A version of regno_ok_for_base_p for use here, when all
1055 pseudo-registers should count as OK. Arguments as for
1056 regno_ok_for_base_p. */
1057 static inline bool
1058 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1059 enum rtx_code outer_code, enum rtx_code index_code)
1061 unsigned regno = REGNO (reg);
1063 if (regno >= FIRST_PSEUDO_REGISTER)
1064 return true;
1065 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1068 /* Record the pseudo registers we must reload into hard registers in a
1069 subexpression of a memory address, X.
1071 If CONTEXT is 0, we are looking at the base part of an address,
1072 otherwise we are looking at the index part.
1074 MODE and AS are the mode and address space of the memory reference;
1075 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1076 These four arguments are passed down to base_reg_class.
1078 SCALE is twice the amount to multiply the cost by (it is twice so
1079 we can represent half-cost adjustments). */
1080 static void
1081 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1082 int context, enum rtx_code outer_code,
1083 enum rtx_code index_code, int scale)
1085 enum rtx_code code = GET_CODE (x);
1086 enum reg_class rclass;
1088 if (context == 1)
1089 rclass = INDEX_REG_CLASS;
1090 else
1091 rclass = base_reg_class (mode, as, outer_code, index_code);
1093 switch (code)
1095 case CONST_INT:
1096 case CONST:
1097 case CC0:
1098 case PC:
1099 case SYMBOL_REF:
1100 case LABEL_REF:
1101 return;
1103 case PLUS:
1104 /* When we have an address that is a sum, we must determine
1105 whether registers are "base" or "index" regs. If there is a
1106 sum of two registers, we must choose one to be the "base".
1107 Luckily, we can use the REG_POINTER to make a good choice
1108 most of the time. We only need to do this on machines that
1109 can have two registers in an address and where the base and
1110 index register classes are different.
1112 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1113 but that seems bogus since it should only be set when we are
1114 sure the register is being used as a pointer. */
1116 rtx arg0 = XEXP (x, 0);
1117 rtx arg1 = XEXP (x, 1);
1118 enum rtx_code code0 = GET_CODE (arg0);
1119 enum rtx_code code1 = GET_CODE (arg1);
1121 /* Look inside subregs. */
1122 if (code0 == SUBREG)
1123 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1124 if (code1 == SUBREG)
1125 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1127 /* If this machine only allows one register per address, it
1128 must be in the first operand. */
1129 if (MAX_REGS_PER_ADDRESS == 1)
1130 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1132 /* If index and base registers are the same on this machine,
1133 just record registers in any non-constant operands. We
1134 assume here, as well as in the tests below, that all
1135 addresses are in canonical form. */
1136 else if (INDEX_REG_CLASS
1137 == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1139 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1140 if (! CONSTANT_P (arg1))
1141 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1144 /* If the second operand is a constant integer, it doesn't
1145 change what class the first operand must be. */
1146 else if (CONST_SCALAR_INT_P (arg1))
1147 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1148 /* If the second operand is a symbolic constant, the first
1149 operand must be an index register. */
1150 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1151 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1152 /* If both operands are registers but one is already a hard
1153 register of index or reg-base class, give the other the
1154 class that the hard register is not. */
1155 else if (code0 == REG && code1 == REG
1156 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1157 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1158 || ok_for_index_p_nonstrict (arg0)))
1159 record_address_regs (mode, as, arg1,
1160 ok_for_base_p_nonstrict (arg0, mode, as,
1161 PLUS, REG) ? 1 : 0,
1162 PLUS, REG, scale);
1163 else if (code0 == REG && code1 == REG
1164 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1165 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1166 || ok_for_index_p_nonstrict (arg1)))
1167 record_address_regs (mode, as, arg0,
1168 ok_for_base_p_nonstrict (arg1, mode, as,
1169 PLUS, REG) ? 1 : 0,
1170 PLUS, REG, scale);
1171 /* If one operand is known to be a pointer, it must be the
1172 base with the other operand the index. Likewise if the
1173 other operand is a MULT. */
1174 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1176 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1177 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1179 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1181 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1182 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1184 /* Otherwise, count equal chances that each might be a base or
1185 index register. This case should be rare. */
1186 else
1188 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1189 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1190 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1191 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1194 break;
1196 /* Double the importance of an allocno that is incremented or
1197 decremented, since it would take two extra insns if it ends
1198 up in the wrong place. */
1199 case POST_MODIFY:
1200 case PRE_MODIFY:
1201 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1202 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1203 if (REG_P (XEXP (XEXP (x, 1), 1)))
1204 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1205 2 * scale);
1206 break;
1208 case POST_INC:
1209 case PRE_INC:
1210 case POST_DEC:
1211 case PRE_DEC:
1212 /* Double the importance of an allocno that is incremented or
1213 decremented, since it would take two extra insns if it ends
1214 up in the wrong place. */
1215 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1216 break;
1218 case REG:
1220 struct costs *pp;
1221 int *pp_costs;
1222 enum reg_class i;
1223 int k, regno, add_cost;
1224 cost_classes_t cost_classes_ptr;
1225 enum reg_class *cost_classes;
1226 move_table *move_in_cost;
1228 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1229 break;
1231 regno = REGNO (x);
1232 if (allocno_p)
1233 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1234 pp = COSTS (costs, COST_INDEX (regno));
1235 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1236 if (INT_MAX - add_cost < pp->mem_cost)
1237 pp->mem_cost = INT_MAX;
1238 else
1239 pp->mem_cost += add_cost;
1240 cost_classes_ptr = regno_cost_classes[regno];
1241 cost_classes = cost_classes_ptr->classes;
1242 pp_costs = pp->cost;
1243 ira_init_register_move_cost_if_necessary (Pmode);
1244 move_in_cost = ira_may_move_in_cost[Pmode];
1245 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1247 i = cost_classes[k];
1248 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1249 if (INT_MAX - add_cost < pp_costs[k])
1250 pp_costs[k] = INT_MAX;
1251 else
1252 pp_costs[k] += add_cost;
1255 break;
1257 default:
1259 const char *fmt = GET_RTX_FORMAT (code);
1260 int i;
1261 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1262 if (fmt[i] == 'e')
1263 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1264 scale);
1271 /* Calculate the costs of insn operands. */
1272 static void
1273 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1275 const char *constraints[MAX_RECOG_OPERANDS];
1276 machine_mode modes[MAX_RECOG_OPERANDS];
1277 rtx ops[MAX_RECOG_OPERANDS];
1278 rtx set;
1279 int i;
1281 for (i = 0; i < recog_data.n_operands; i++)
1283 constraints[i] = recog_data.constraints[i];
1284 modes[i] = recog_data.operand_mode[i];
1287 /* If we get here, we are set up to record the costs of all the
1288 operands for this insn. Start by initializing the costs. Then
1289 handle any address registers. Finally record the desired classes
1290 for any allocnos, doing it twice if some pair of operands are
1291 commutative. */
1292 for (i = 0; i < recog_data.n_operands; i++)
1294 memcpy (op_costs[i], init_cost, struct_costs_size);
1296 ops[i] = recog_data.operand[i];
1297 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1298 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1300 if (MEM_P (recog_data.operand[i]))
1301 record_address_regs (GET_MODE (recog_data.operand[i]),
1302 MEM_ADDR_SPACE (recog_data.operand[i]),
1303 XEXP (recog_data.operand[i], 0),
1304 0, MEM, SCRATCH, frequency * 2);
1305 else if (constraints[i][0] == 'p'
1306 || (insn_extra_address_constraint
1307 (lookup_constraint (constraints[i]))))
1308 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1309 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1310 frequency * 2);
1313 /* Check for commutative in a separate loop so everything will have
1314 been initialized. We must do this even if one operand is a
1315 constant--see addsi3 in m68k.md. */
1316 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1317 if (constraints[i][0] == '%')
1319 const char *xconstraints[MAX_RECOG_OPERANDS];
1320 int j;
1322 /* Handle commutative operands by swapping the constraints.
1323 We assume the modes are the same. */
1324 for (j = 0; j < recog_data.n_operands; j++)
1325 xconstraints[j] = constraints[j];
1327 xconstraints[i] = constraints[i+1];
1328 xconstraints[i+1] = constraints[i];
1329 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1330 recog_data.operand, modes,
1331 xconstraints, insn, pref);
1333 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1334 recog_data.operand, modes,
1335 constraints, insn, pref);
1337 /* If this insn is a single set copying operand 1 to operand 0 and
1338 one operand is an allocno with the other a hard reg or an allocno
1339 that prefers a hard register that is in its own register class
1340 then we may want to adjust the cost of that register class to -1.
1342 Avoid the adjustment if the source does not die to avoid
1343 stressing of register allocator by preferencing two colliding
1344 registers into single class.
1346 Also avoid the adjustment if a copy between hard registers of the
1347 class is expensive (ten times the cost of a default copy is
1348 considered arbitrarily expensive). This avoids losing when the
1349 preferred class is very expensive as the source of a copy
1350 instruction. */
1351 if ((set = single_set (insn)) != NULL_RTX
1352 /* In rare cases the single set insn might have less 2 operands
1353 as the source can be a fixed special reg. */
1354 && recog_data.n_operands > 1
1355 && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set))
1357 int regno, other_regno;
1358 rtx dest = SET_DEST (set);
1359 rtx src = SET_SRC (set);
1361 if (GET_CODE (dest) == SUBREG
1362 && (GET_MODE_SIZE (GET_MODE (dest))
1363 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1364 dest = SUBREG_REG (dest);
1365 if (GET_CODE (src) == SUBREG
1366 && (GET_MODE_SIZE (GET_MODE (src))
1367 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1368 src = SUBREG_REG (src);
1369 if (REG_P (src) && REG_P (dest)
1370 && find_regno_note (insn, REG_DEAD, REGNO (src))
1371 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1372 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1373 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1374 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1376 machine_mode mode = GET_MODE (src);
1377 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1378 enum reg_class *cost_classes = cost_classes_ptr->classes;
1379 reg_class_t rclass;
1380 int k, nr;
1382 i = regno == (int) REGNO (src) ? 1 : 0;
1383 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1385 rclass = cost_classes[k];
1386 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1387 && (reg_class_size[(int) rclass]
1388 == ira_reg_class_max_nregs [(int) rclass][(int) mode]))
1390 if (reg_class_size[rclass] == 1)
1391 op_costs[i]->cost[k] = -frequency;
1392 else
1394 for (nr = 0;
1395 nr < hard_regno_nregs[other_regno][mode];
1396 nr++)
1397 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass],
1398 other_regno + nr))
1399 break;
1401 if (nr == hard_regno_nregs[other_regno][mode])
1402 op_costs[i]->cost[k] = -frequency;
1412 /* Process one insn INSN. Scan it and record each time it would save
1413 code to put a certain allocnos in a certain class. Return the last
1414 insn processed, so that the scan can be continued from there. */
1415 static rtx_insn *
1416 scan_one_insn (rtx_insn *insn)
1418 enum rtx_code pat_code;
1419 rtx set, note;
1420 int i, k;
1421 bool counted_mem;
1423 if (!NONDEBUG_INSN_P (insn))
1424 return insn;
1426 pat_code = GET_CODE (PATTERN (insn));
1427 if (pat_code == USE || pat_code == CLOBBER || pat_code == ASM_INPUT)
1428 return insn;
1430 counted_mem = false;
1431 set = single_set (insn);
1432 extract_insn (insn);
1434 /* If this insn loads a parameter from its stack slot, then it
1435 represents a savings, rather than a cost, if the parameter is
1436 stored in memory. Record this fact.
1438 Similarly if we're loading other constants from memory (constant
1439 pool, TOC references, small data areas, etc) and this is the only
1440 assignment to the destination pseudo.
1442 Don't do this if SET_SRC (set) isn't a general operand, if it is
1443 a memory requiring special instructions to load it, decreasing
1444 mem_cost might result in it being loaded using the specialized
1445 instruction into a register, then stored into stack and loaded
1446 again from the stack. See PR52208.
1448 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1449 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1450 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1451 && ((MEM_P (XEXP (note, 0))
1452 && !side_effects_p (SET_SRC (set)))
1453 || (CONSTANT_P (XEXP (note, 0))
1454 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1455 XEXP (note, 0))
1456 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1457 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set))))
1459 enum reg_class cl = GENERAL_REGS;
1460 rtx reg = SET_DEST (set);
1461 int num = COST_INDEX (REGNO (reg));
1463 COSTS (costs, num)->mem_cost
1464 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1465 record_address_regs (GET_MODE (SET_SRC (set)),
1466 MEM_ADDR_SPACE (SET_SRC (set)),
1467 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1468 frequency * 2);
1469 counted_mem = true;
1472 record_operand_costs (insn, pref);
1474 /* Now add the cost for each operand to the total costs for its
1475 allocno. */
1476 for (i = 0; i < recog_data.n_operands; i++)
1477 if (REG_P (recog_data.operand[i])
1478 && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER)
1480 int regno = REGNO (recog_data.operand[i]);
1481 struct costs *p = COSTS (costs, COST_INDEX (regno));
1482 struct costs *q = op_costs[i];
1483 int *p_costs = p->cost, *q_costs = q->cost;
1484 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1485 int add_cost;
1487 /* If the already accounted for the memory "cost" above, don't
1488 do so again. */
1489 if (!counted_mem)
1491 add_cost = q->mem_cost;
1492 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1493 p->mem_cost = INT_MAX;
1494 else
1495 p->mem_cost += add_cost;
1497 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1499 add_cost = q_costs[k];
1500 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1501 p_costs[k] = INT_MAX;
1502 else
1503 p_costs[k] += add_cost;
1507 return insn;
1512 /* Print allocnos costs to file F. */
1513 static void
1514 print_allocno_costs (FILE *f)
1516 int k;
1517 ira_allocno_t a;
1518 ira_allocno_iterator ai;
1520 ira_assert (allocno_p);
1521 fprintf (f, "\n");
1522 FOR_EACH_ALLOCNO (a, ai)
1524 int i, rclass;
1525 basic_block bb;
1526 int regno = ALLOCNO_REGNO (a);
1527 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1528 enum reg_class *cost_classes = cost_classes_ptr->classes;
1530 i = ALLOCNO_NUM (a);
1531 fprintf (f, " a%d(r%d,", i, regno);
1532 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1533 fprintf (f, "b%d", bb->index);
1534 else
1535 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1536 fprintf (f, ") costs:");
1537 for (k = 0; k < cost_classes_ptr->num; k++)
1539 rclass = cost_classes[k];
1540 fprintf (f, " %s:%d", reg_class_names[rclass],
1541 COSTS (costs, i)->cost[k]);
1542 if (flag_ira_region == IRA_REGION_ALL
1543 || flag_ira_region == IRA_REGION_MIXED)
1544 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1546 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1547 if (flag_ira_region == IRA_REGION_ALL
1548 || flag_ira_region == IRA_REGION_MIXED)
1549 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1550 fprintf (f, "\n");
1554 /* Print pseudo costs to file F. */
1555 static void
1556 print_pseudo_costs (FILE *f)
1558 int regno, k;
1559 int rclass;
1560 cost_classes_t cost_classes_ptr;
1561 enum reg_class *cost_classes;
1563 ira_assert (! allocno_p);
1564 fprintf (f, "\n");
1565 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1567 if (REG_N_REFS (regno) <= 0)
1568 continue;
1569 cost_classes_ptr = regno_cost_classes[regno];
1570 cost_classes = cost_classes_ptr->classes;
1571 fprintf (f, " r%d costs:", regno);
1572 for (k = 0; k < cost_classes_ptr->num; k++)
1574 rclass = cost_classes[k];
1575 fprintf (f, " %s:%d", reg_class_names[rclass],
1576 COSTS (costs, regno)->cost[k]);
1578 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1582 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1583 costs. */
1584 static void
1585 process_bb_for_costs (basic_block bb)
1587 rtx_insn *insn;
1589 frequency = REG_FREQ_FROM_BB (bb);
1590 if (frequency == 0)
1591 frequency = 1;
1592 FOR_BB_INSNS (bb, insn)
1593 insn = scan_one_insn (insn);
1596 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1597 costs. */
1598 static void
1599 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1601 basic_block bb;
1603 bb = loop_tree_node->bb;
1604 if (bb != NULL)
1605 process_bb_for_costs (bb);
1608 /* Find costs of register classes and memory for allocnos or pseudos
1609 and their best costs. Set up preferred, alternative and allocno
1610 classes for pseudos. */
1611 static void
1612 find_costs_and_classes (FILE *dump_file)
1614 int i, k, start, max_cost_classes_num;
1615 int pass;
1616 basic_block bb;
1617 enum reg_class *regno_best_class, new_class;
1619 init_recog ();
1620 regno_best_class
1621 = (enum reg_class *) ira_allocate (max_reg_num ()
1622 * sizeof (enum reg_class));
1623 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1624 regno_best_class[i] = NO_REGS;
1625 if (!resize_reg_info () && allocno_p
1626 && pseudo_classes_defined_p && flag_expensive_optimizations)
1628 ira_allocno_t a;
1629 ira_allocno_iterator ai;
1631 pref = pref_buffer;
1632 max_cost_classes_num = 1;
1633 FOR_EACH_ALLOCNO (a, ai)
1635 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1636 setup_regno_cost_classes_by_aclass
1637 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1638 max_cost_classes_num
1639 = MAX (max_cost_classes_num,
1640 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1642 start = 1;
1644 else
1646 pref = NULL;
1647 max_cost_classes_num = ira_important_classes_num;
1648 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1649 if (regno_reg_rtx[i] != NULL_RTX)
1650 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1651 else
1652 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1653 start = 0;
1655 if (allocno_p)
1656 /* Clear the flag for the next compiled function. */
1657 pseudo_classes_defined_p = false;
1658 /* Normally we scan the insns once and determine the best class to
1659 use for each allocno. However, if -fexpensive-optimizations are
1660 on, we do so twice, the second time using the tentative best
1661 classes to guide the selection. */
1662 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1664 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1665 fprintf (dump_file,
1666 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1668 if (pass != start)
1670 max_cost_classes_num = 1;
1671 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1673 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1674 max_cost_classes_num
1675 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1679 struct_costs_size
1680 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1681 /* Zero out our accumulation of the cost of each class for each
1682 allocno. */
1683 memset (costs, 0, cost_elements_num * struct_costs_size);
1685 if (allocno_p)
1687 /* Scan the instructions and record each time it would save code
1688 to put a certain allocno in a certain class. */
1689 ira_traverse_loop_tree (true, ira_loop_tree_root,
1690 process_bb_node_for_costs, NULL);
1692 memcpy (total_allocno_costs, costs,
1693 max_struct_costs_size * ira_allocnos_num);
1695 else
1697 basic_block bb;
1699 FOR_EACH_BB_FN (bb, cfun)
1700 process_bb_for_costs (bb);
1703 if (pass == 0)
1704 pref = pref_buffer;
1706 /* Now for each allocno look at how desirable each class is and
1707 find which class is preferred. */
1708 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1710 ira_allocno_t a, parent_a;
1711 int rclass, a_num, parent_a_num, add_cost;
1712 ira_loop_tree_node_t parent;
1713 int best_cost, allocno_cost;
1714 enum reg_class best, alt_class;
1715 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1716 enum reg_class *cost_classes = cost_classes_ptr->classes;
1717 int *i_costs = temp_costs->cost;
1718 int i_mem_cost;
1719 int equiv_savings = regno_equiv_gains[i];
1721 if (! allocno_p)
1723 if (regno_reg_rtx[i] == NULL_RTX)
1724 continue;
1725 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1726 i_mem_cost = temp_costs->mem_cost;
1728 else
1730 if (ira_regno_allocno_map[i] == NULL)
1731 continue;
1732 memset (temp_costs, 0, struct_costs_size);
1733 i_mem_cost = 0;
1734 /* Find cost of all allocnos with the same regno. */
1735 for (a = ira_regno_allocno_map[i];
1736 a != NULL;
1737 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1739 int *a_costs, *p_costs;
1741 a_num = ALLOCNO_NUM (a);
1742 if ((flag_ira_region == IRA_REGION_ALL
1743 || flag_ira_region == IRA_REGION_MIXED)
1744 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1745 && (parent_a = parent->regno_allocno_map[i]) != NULL
1746 /* There are no caps yet. */
1747 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1748 (a)->border_allocnos,
1749 ALLOCNO_NUM (a)))
1751 /* Propagate costs to upper levels in the region
1752 tree. */
1753 parent_a_num = ALLOCNO_NUM (parent_a);
1754 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1755 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1756 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1758 add_cost = a_costs[k];
1759 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1760 p_costs[k] = INT_MAX;
1761 else
1762 p_costs[k] += add_cost;
1764 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1765 if (add_cost > 0
1766 && (INT_MAX - add_cost
1767 < COSTS (total_allocno_costs,
1768 parent_a_num)->mem_cost))
1769 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1770 = INT_MAX;
1771 else
1772 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1773 += add_cost;
1775 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1776 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1778 a_costs = COSTS (costs, a_num)->cost;
1779 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1781 add_cost = a_costs[k];
1782 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1783 i_costs[k] = INT_MAX;
1784 else
1785 i_costs[k] += add_cost;
1787 add_cost = COSTS (costs, a_num)->mem_cost;
1788 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1789 i_mem_cost = INT_MAX;
1790 else
1791 i_mem_cost += add_cost;
1794 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1795 i_mem_cost = 0;
1796 else if (equiv_savings < 0)
1797 i_mem_cost = -equiv_savings;
1798 else if (equiv_savings > 0)
1800 i_mem_cost = 0;
1801 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1802 i_costs[k] += equiv_savings;
1805 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1806 best = ALL_REGS;
1807 alt_class = NO_REGS;
1808 /* Find best common class for all allocnos with the same
1809 regno. */
1810 for (k = 0; k < cost_classes_ptr->num; k++)
1812 rclass = cost_classes[k];
1813 if (i_costs[k] < best_cost)
1815 best_cost = i_costs[k];
1816 best = (enum reg_class) rclass;
1818 else if (i_costs[k] == best_cost)
1819 best = ira_reg_class_subunion[best][rclass];
1820 if (pass == flag_expensive_optimizations
1821 /* We still prefer registers to memory even at this
1822 stage if their costs are the same. We will make
1823 a final decision during assigning hard registers
1824 when we have all info including more accurate
1825 costs which might be affected by assigning hard
1826 registers to other pseudos because the pseudos
1827 involved in moves can be coalesced. */
1828 && i_costs[k] <= i_mem_cost
1829 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1830 > reg_class_size[alt_class]))
1831 alt_class = reg_class_subunion[alt_class][rclass];
1833 alt_class = ira_allocno_class_translate[alt_class];
1834 if (best_cost > i_mem_cost
1835 && ! non_spilled_static_chain_regno_p (i))
1836 regno_aclass[i] = NO_REGS;
1837 else if (!optimize && !targetm.class_likely_spilled_p (best))
1838 /* Registers in the alternative class are likely to need
1839 longer or slower sequences than registers in the best class.
1840 When optimizing we make some effort to use the best class
1841 over the alternative class where possible, but at -O0 we
1842 effectively give the alternative class equal weight.
1843 We then run the risk of using slower alternative registers
1844 when plenty of registers from the best class are still free.
1845 This is especially true because live ranges tend to be very
1846 short in -O0 code and so register pressure tends to be low.
1848 Avoid that by ignoring the alternative class if the best
1849 class has plenty of registers. */
1850 regno_aclass[i] = best;
1851 else
1853 /* Make the common class the biggest class of best and
1854 alt_class. */
1855 regno_aclass[i]
1856 = ira_reg_class_superunion[best][alt_class];
1857 ira_assert (regno_aclass[i] != NO_REGS
1858 && ira_reg_allocno_class_p[regno_aclass[i]]);
1860 if ((new_class
1861 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1862 (i, regno_aclass[i], best))) != regno_aclass[i])
1864 regno_aclass[i] = new_class;
1865 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1866 reg_class_contents[best]))
1867 best = new_class;
1868 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1869 reg_class_contents[alt_class]))
1870 alt_class = new_class;
1872 if (pass == flag_expensive_optimizations)
1874 if (best_cost > i_mem_cost
1875 /* Do not assign NO_REGS to static chain pointer
1876 pseudo when non-local goto is used. */
1877 && ! non_spilled_static_chain_regno_p (i))
1878 best = alt_class = NO_REGS;
1879 else if (best == alt_class)
1880 alt_class = NO_REGS;
1881 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1882 if ((!allocno_p || internal_flag_ira_verbose > 2)
1883 && dump_file != NULL)
1884 fprintf (dump_file,
1885 " r%d: preferred %s, alternative %s, allocno %s\n",
1886 i, reg_class_names[best], reg_class_names[alt_class],
1887 reg_class_names[regno_aclass[i]]);
1889 regno_best_class[i] = best;
1890 if (! allocno_p)
1892 pref[i] = (best_cost > i_mem_cost
1893 && ! non_spilled_static_chain_regno_p (i)
1894 ? NO_REGS : best);
1895 continue;
1897 for (a = ira_regno_allocno_map[i];
1898 a != NULL;
1899 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1901 enum reg_class aclass = regno_aclass[i];
1902 int a_num = ALLOCNO_NUM (a);
1903 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1904 int *a_costs = COSTS (costs, a_num)->cost;
1906 if (aclass == NO_REGS)
1907 best = NO_REGS;
1908 else
1910 /* Finding best class which is subset of the common
1911 class. */
1912 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1913 allocno_cost = best_cost;
1914 best = ALL_REGS;
1915 for (k = 0; k < cost_classes_ptr->num; k++)
1917 rclass = cost_classes[k];
1918 if (! ira_class_subset_p[rclass][aclass])
1919 continue;
1920 if (total_a_costs[k] < best_cost)
1922 best_cost = total_a_costs[k];
1923 allocno_cost = a_costs[k];
1924 best = (enum reg_class) rclass;
1926 else if (total_a_costs[k] == best_cost)
1928 best = ira_reg_class_subunion[best][rclass];
1929 allocno_cost = MAX (allocno_cost, a_costs[k]);
1932 ALLOCNO_CLASS_COST (a) = allocno_cost;
1934 if (internal_flag_ira_verbose > 2 && dump_file != NULL
1935 && (pass == 0 || pref[a_num] != best))
1937 fprintf (dump_file, " a%d (r%d,", a_num, i);
1938 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1939 fprintf (dump_file, "b%d", bb->index);
1940 else
1941 fprintf (dump_file, "l%d",
1942 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1943 fprintf (dump_file, ") best %s, allocno %s\n",
1944 reg_class_names[best],
1945 reg_class_names[aclass]);
1947 pref[a_num] = best;
1948 if (pass == flag_expensive_optimizations && best != aclass
1949 && ira_class_hard_regs_num[best] > 0
1950 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
1951 >= ira_class_hard_regs_num[best]))
1953 int ind = cost_classes_ptr->index[aclass];
1955 ira_assert (ind >= 0);
1956 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
1957 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
1958 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
1959 / (ira_register_move_cost
1960 [ALLOCNO_MODE (a)][best][aclass]));
1961 for (k = 0; k < cost_classes_ptr->num; k++)
1962 if (ira_class_subset_p[cost_classes[k]][best])
1963 a_costs[k] = a_costs[ind];
1968 if (internal_flag_ira_verbose > 4 && dump_file)
1970 if (allocno_p)
1971 print_allocno_costs (dump_file);
1972 else
1973 print_pseudo_costs (dump_file);
1974 fprintf (dump_file,"\n");
1977 ira_free (regno_best_class);
1982 /* Process moves involving hard regs to modify allocno hard register
1983 costs. We can do this only after determining allocno class. If a
1984 hard register forms a register class, then moves with the hard
1985 register are already taken into account in class costs for the
1986 allocno. */
1987 static void
1988 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
1990 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
1991 bool to_p;
1992 ira_allocno_t a, curr_a;
1993 ira_loop_tree_node_t curr_loop_tree_node;
1994 enum reg_class rclass;
1995 basic_block bb;
1996 rtx_insn *insn;
1997 rtx set, src, dst;
1999 bb = loop_tree_node->bb;
2000 if (bb == NULL)
2001 return;
2002 freq = REG_FREQ_FROM_BB (bb);
2003 if (freq == 0)
2004 freq = 1;
2005 FOR_BB_INSNS (bb, insn)
2007 if (!NONDEBUG_INSN_P (insn))
2008 continue;
2009 set = single_set (insn);
2010 if (set == NULL_RTX)
2011 continue;
2012 dst = SET_DEST (set);
2013 src = SET_SRC (set);
2014 if (! REG_P (dst) || ! REG_P (src))
2015 continue;
2016 dst_regno = REGNO (dst);
2017 src_regno = REGNO (src);
2018 if (dst_regno >= FIRST_PSEUDO_REGISTER
2019 && src_regno < FIRST_PSEUDO_REGISTER)
2021 hard_regno = src_regno;
2022 a = ira_curr_regno_allocno_map[dst_regno];
2023 to_p = true;
2025 else if (src_regno >= FIRST_PSEUDO_REGISTER
2026 && dst_regno < FIRST_PSEUDO_REGISTER)
2028 hard_regno = dst_regno;
2029 a = ira_curr_regno_allocno_map[src_regno];
2030 to_p = false;
2032 else
2033 continue;
2034 rclass = ALLOCNO_CLASS (a);
2035 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2036 continue;
2037 i = ira_class_hard_reg_index[rclass][hard_regno];
2038 if (i < 0)
2039 continue;
2040 a_regno = ALLOCNO_REGNO (a);
2041 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2042 curr_loop_tree_node != NULL;
2043 curr_loop_tree_node = curr_loop_tree_node->parent)
2044 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2045 ira_add_allocno_pref (curr_a, hard_regno, freq);
2047 int cost;
2048 enum reg_class hard_reg_class;
2049 machine_mode mode;
2051 mode = ALLOCNO_MODE (a);
2052 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2053 ira_init_register_move_cost_if_necessary (mode);
2054 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2055 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2056 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2057 ALLOCNO_CLASS_COST (a));
2058 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2059 rclass, 0);
2060 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2061 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2062 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2063 ALLOCNO_HARD_REG_COSTS (a)[i]);
2068 /* After we find hard register and memory costs for allocnos, define
2069 its class and modify hard register cost because insns moving
2070 allocno to/from hard registers. */
2071 static void
2072 setup_allocno_class_and_costs (void)
2074 int i, j, n, regno, hard_regno, num;
2075 int *reg_costs;
2076 enum reg_class aclass, rclass;
2077 ira_allocno_t a;
2078 ira_allocno_iterator ai;
2079 cost_classes_t cost_classes_ptr;
2081 ira_assert (allocno_p);
2082 FOR_EACH_ALLOCNO (a, ai)
2084 i = ALLOCNO_NUM (a);
2085 regno = ALLOCNO_REGNO (a);
2086 aclass = regno_aclass[regno];
2087 cost_classes_ptr = regno_cost_classes[regno];
2088 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2089 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2090 ira_set_allocno_class (a, aclass);
2091 if (aclass == NO_REGS)
2092 continue;
2093 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2095 n = ira_class_hard_regs_num[aclass];
2096 ALLOCNO_HARD_REG_COSTS (a)
2097 = reg_costs = ira_allocate_cost_vector (aclass);
2098 for (j = n - 1; j >= 0; j--)
2100 hard_regno = ira_class_hard_regs[aclass][j];
2101 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2102 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2103 else
2105 rclass = REGNO_REG_CLASS (hard_regno);
2106 num = cost_classes_ptr->index[rclass];
2107 if (num < 0)
2109 num = cost_classes_ptr->hard_regno_index[hard_regno];
2110 ira_assert (num >= 0);
2112 reg_costs[j] = COSTS (costs, i)->cost[num];
2117 if (optimize)
2118 ira_traverse_loop_tree (true, ira_loop_tree_root,
2119 process_bb_node_for_hard_reg_moves, NULL);
2124 /* Function called once during compiler work. */
2125 void
2126 ira_init_costs_once (void)
2128 int i;
2130 init_cost = NULL;
2131 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2133 op_costs[i] = NULL;
2134 this_op_costs[i] = NULL;
2136 temp_costs = NULL;
2139 /* Free allocated temporary cost vectors. */
2140 void
2141 target_ira_int::free_ira_costs ()
2143 int i;
2145 free (x_init_cost);
2146 x_init_cost = NULL;
2147 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2149 free (x_op_costs[i]);
2150 free (x_this_op_costs[i]);
2151 x_op_costs[i] = x_this_op_costs[i] = NULL;
2153 free (x_temp_costs);
2154 x_temp_costs = NULL;
2157 /* This is called each time register related information is
2158 changed. */
2159 void
2160 ira_init_costs (void)
2162 int i;
2164 this_target_ira_int->free_ira_costs ();
2165 max_struct_costs_size
2166 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2167 /* Don't use ira_allocate because vectors live through several IRA
2168 calls. */
2169 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2170 init_cost->mem_cost = 1000000;
2171 for (i = 0; i < ira_important_classes_num; i++)
2172 init_cost->cost[i] = 1000000;
2173 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2175 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2176 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2178 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2183 /* Common initialization function for ira_costs and
2184 ira_set_pseudo_classes. */
2185 static void
2186 init_costs (void)
2188 init_subregs_of_mode ();
2189 costs = (struct costs *) ira_allocate (max_struct_costs_size
2190 * cost_elements_num);
2191 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2192 * cost_elements_num);
2193 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2194 * max_reg_num ());
2195 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2196 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2199 /* Common finalization function for ira_costs and
2200 ira_set_pseudo_classes. */
2201 static void
2202 finish_costs (void)
2204 finish_subregs_of_mode ();
2205 ira_free (regno_equiv_gains);
2206 ira_free (regno_aclass);
2207 ira_free (pref_buffer);
2208 ira_free (costs);
2211 /* Entry function which defines register class, memory and hard
2212 register costs for each allocno. */
2213 void
2214 ira_costs (void)
2216 allocno_p = true;
2217 cost_elements_num = ira_allocnos_num;
2218 init_costs ();
2219 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2220 * ira_allocnos_num);
2221 initiate_regno_cost_classes ();
2222 calculate_elim_costs_all_insns ();
2223 find_costs_and_classes (ira_dump_file);
2224 setup_allocno_class_and_costs ();
2225 finish_regno_cost_classes ();
2226 finish_costs ();
2227 ira_free (total_allocno_costs);
2230 /* Entry function which defines classes for pseudos.
2231 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2232 void
2233 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2235 allocno_p = false;
2236 internal_flag_ira_verbose = flag_ira_verbose;
2237 cost_elements_num = max_reg_num ();
2238 init_costs ();
2239 initiate_regno_cost_classes ();
2240 find_costs_and_classes (dump_file);
2241 finish_regno_cost_classes ();
2242 if (define_pseudo_classes)
2243 pseudo_classes_defined_p = true;
2245 finish_costs ();
2250 /* Change hard register costs for allocnos which lives through
2251 function calls. This is called only when we found all intersected
2252 calls during building allocno live ranges. */
2253 void
2254 ira_tune_allocno_costs (void)
2256 int j, n, regno;
2257 int cost, min_cost, *reg_costs;
2258 enum reg_class aclass, rclass;
2259 machine_mode mode;
2260 ira_allocno_t a;
2261 ira_allocno_iterator ai;
2262 ira_allocno_object_iterator oi;
2263 ira_object_t obj;
2264 bool skip_p;
2265 HARD_REG_SET *crossed_calls_clobber_regs;
2267 FOR_EACH_ALLOCNO (a, ai)
2269 aclass = ALLOCNO_CLASS (a);
2270 if (aclass == NO_REGS)
2271 continue;
2272 mode = ALLOCNO_MODE (a);
2273 n = ira_class_hard_regs_num[aclass];
2274 min_cost = INT_MAX;
2275 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2276 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2278 ira_allocate_and_set_costs
2279 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2280 ALLOCNO_CLASS_COST (a));
2281 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2282 for (j = n - 1; j >= 0; j--)
2284 regno = ira_class_hard_regs[aclass][j];
2285 skip_p = false;
2286 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2288 if (ira_hard_reg_set_intersection_p (regno, mode,
2289 OBJECT_CONFLICT_HARD_REGS
2290 (obj)))
2292 skip_p = true;
2293 break;
2296 if (skip_p)
2297 continue;
2298 rclass = REGNO_REG_CLASS (regno);
2299 cost = 0;
2300 crossed_calls_clobber_regs
2301 = &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2302 if (ira_hard_reg_set_intersection_p (regno, mode,
2303 *crossed_calls_clobber_regs)
2304 && (ira_hard_reg_set_intersection_p (regno, mode,
2305 call_used_reg_set)
2306 || HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
2307 cost += (ALLOCNO_CALL_FREQ (a)
2308 * (ira_memory_move_cost[mode][rclass][0]
2309 + ira_memory_move_cost[mode][rclass][1]));
2310 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2311 cost += ((ira_memory_move_cost[mode][rclass][0]
2312 + ira_memory_move_cost[mode][rclass][1])
2313 * ALLOCNO_FREQ (a)
2314 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2315 #endif
2316 if (INT_MAX - cost < reg_costs[j])
2317 reg_costs[j] = INT_MAX;
2318 else
2319 reg_costs[j] += cost;
2320 if (min_cost > reg_costs[j])
2321 min_cost = reg_costs[j];
2324 if (min_cost != INT_MAX)
2325 ALLOCNO_CLASS_COST (a) = min_cost;
2327 /* Some targets allow pseudos to be allocated to unaligned sequences
2328 of hard registers. However, selecting an unaligned sequence can
2329 unnecessarily restrict later allocations. So increase the cost of
2330 unaligned hard regs to encourage the use of aligned hard regs. */
2332 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2334 if (nregs > 1)
2336 ira_allocate_and_set_costs
2337 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2338 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2339 for (j = n - 1; j >= 0; j--)
2341 regno = ira_non_ordered_class_hard_regs[aclass][j];
2342 if ((regno % nregs) != 0)
2344 int index = ira_class_hard_reg_index[aclass][regno];
2345 ira_assert (index != -1);
2346 reg_costs[index] += ALLOCNO_FREQ (a);
2354 /* Add COST to the estimated gain for eliminating REGNO with its
2355 equivalence. If COST is zero, record that no such elimination is
2356 possible. */
2358 void
2359 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2361 if (cost == 0)
2362 regno_equiv_gains[regno] = 0;
2363 else
2364 regno_equiv_gains[regno] += cost;
2367 void
2368 ira_costs_c_finalize (void)
2370 this_target_ira_int->free_ira_costs ();