Move some comparison simplifications to match.pd
[official-gcc.git] / gcc / ira-costs.c
blob902712d3140ff39260942f3ff7468583d0780205
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "flags.h"
29 #include "alias.h"
30 #include "insn-config.h"
31 #include "expmed.h"
32 #include "dojump.h"
33 #include "explow.h"
34 #include "calls.h"
35 #include "emit-rtl.h"
36 #include "varasm.h"
37 #include "stmt.h"
38 #include "expr.h"
39 #include "tm_p.h"
40 #include "regs.h"
41 #include "addresses.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "target.h"
45 #include "params.h"
46 #include "cfgloop.h"
47 #include "ira.h"
48 #include "alloc-pool.h"
49 #include "ira-int.h"
51 /* The flags is set up every time when we calculate pseudo register
52 classes through function ira_set_pseudo_classes. */
53 static bool pseudo_classes_defined_p = false;
55 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
56 static bool allocno_p;
58 /* Number of elements in array `costs'. */
59 static int cost_elements_num;
61 /* The `costs' struct records the cost of using hard registers of each
62 class considered for the calculation and of using memory for each
63 allocno or pseudo. */
64 struct costs
66 int mem_cost;
67 /* Costs for register classes start here. We process only some
68 allocno classes. */
69 int cost[1];
72 #define max_struct_costs_size \
73 (this_target_ira_int->x_max_struct_costs_size)
74 #define init_cost \
75 (this_target_ira_int->x_init_cost)
76 #define temp_costs \
77 (this_target_ira_int->x_temp_costs)
78 #define op_costs \
79 (this_target_ira_int->x_op_costs)
80 #define this_op_costs \
81 (this_target_ira_int->x_this_op_costs)
83 /* Costs of each class for each allocno or pseudo. */
84 static struct costs *costs;
86 /* Accumulated costs of each class for each allocno. */
87 static struct costs *total_allocno_costs;
89 /* It is the current size of struct costs. */
90 static int struct_costs_size;
92 /* Return pointer to structure containing costs of allocno or pseudo
93 with given NUM in array ARR. */
94 #define COSTS(arr, num) \
95 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
97 /* Return index in COSTS when processing reg with REGNO. */
98 #define COST_INDEX(regno) (allocno_p \
99 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
100 : (int) regno)
102 /* Record register class preferences of each allocno or pseudo. Null
103 value means no preferences. It happens on the 1st iteration of the
104 cost calculation. */
105 static enum reg_class *pref;
107 /* Allocated buffers for pref. */
108 static enum reg_class *pref_buffer;
110 /* Record allocno class of each allocno with the same regno. */
111 static enum reg_class *regno_aclass;
113 /* Record cost gains for not allocating a register with an invariant
114 equivalence. */
115 static int *regno_equiv_gains;
117 /* Execution frequency of the current insn. */
118 static int frequency;
122 /* Info about reg classes whose costs are calculated for a pseudo. */
123 struct cost_classes
125 /* Number of the cost classes in the subsequent array. */
126 int num;
127 /* Container of the cost classes. */
128 enum reg_class classes[N_REG_CLASSES];
129 /* Map reg class -> index of the reg class in the previous array.
130 -1 if it is not a cost class. */
131 int index[N_REG_CLASSES];
132 /* Map hard regno index of first class in array CLASSES containing
133 the hard regno, -1 otherwise. */
134 int hard_regno_index[FIRST_PSEUDO_REGISTER];
137 /* Types of pointers to the structure above. */
138 typedef struct cost_classes *cost_classes_t;
139 typedef const struct cost_classes *const_cost_classes_t;
141 /* Info about cost classes for each pseudo. */
142 static cost_classes_t *regno_cost_classes;
144 /* Helper for cost_classes hashing. */
146 struct cost_classes_hasher : pointer_hash <cost_classes>
148 static inline hashval_t hash (const cost_classes *);
149 static inline bool equal (const cost_classes *, const cost_classes *);
150 static inline void remove (cost_classes *);
153 /* Returns hash value for cost classes info HV. */
154 inline hashval_t
155 cost_classes_hasher::hash (const cost_classes *hv)
157 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
160 /* Compares cost classes info HV1 and HV2. */
161 inline bool
162 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
164 return (hv1->num == hv2->num
165 && memcmp (hv1->classes, hv2->classes,
166 sizeof (enum reg_class) * hv1->num) == 0);
169 /* Delete cost classes info V from the hash table. */
170 inline void
171 cost_classes_hasher::remove (cost_classes *v)
173 ira_free (v);
176 /* Hash table of unique cost classes. */
177 static hash_table<cost_classes_hasher> *cost_classes_htab;
179 /* Map allocno class -> cost classes for pseudo of given allocno
180 class. */
181 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
183 /* Map mode -> cost classes for pseudo of give mode. */
184 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
186 /* Cost classes that include all classes in ira_important_classes. */
187 static cost_classes all_cost_classes;
189 /* Use the array of classes in CLASSES_PTR to fill out the rest of
190 the structure. */
191 static void
192 complete_cost_classes (cost_classes_t classes_ptr)
194 for (int i = 0; i < N_REG_CLASSES; i++)
195 classes_ptr->index[i] = -1;
196 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
197 classes_ptr->hard_regno_index[i] = -1;
198 for (int i = 0; i < classes_ptr->num; i++)
200 enum reg_class cl = classes_ptr->classes[i];
201 classes_ptr->index[cl] = i;
202 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
204 unsigned int hard_regno = ira_class_hard_regs[cl][j];
205 if (classes_ptr->hard_regno_index[hard_regno] < 0)
206 classes_ptr->hard_regno_index[hard_regno] = i;
211 /* Initialize info about the cost classes for each pseudo. */
212 static void
213 initiate_regno_cost_classes (void)
215 int size = sizeof (cost_classes_t) * max_reg_num ();
217 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
218 memset (regno_cost_classes, 0, size);
219 memset (cost_classes_aclass_cache, 0,
220 sizeof (cost_classes_t) * N_REG_CLASSES);
221 memset (cost_classes_mode_cache, 0,
222 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
223 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
224 all_cost_classes.num = ira_important_classes_num;
225 for (int i = 0; i < ira_important_classes_num; i++)
226 all_cost_classes.classes[i] = ira_important_classes[i];
227 complete_cost_classes (&all_cost_classes);
230 /* Create new cost classes from cost classes FROM and set up members
231 index and hard_regno_index. Return the new classes. The function
232 implements some common code of two functions
233 setup_regno_cost_classes_by_aclass and
234 setup_regno_cost_classes_by_mode. */
235 static cost_classes_t
236 setup_cost_classes (cost_classes_t from)
238 cost_classes_t classes_ptr;
240 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
241 classes_ptr->num = from->num;
242 for (int i = 0; i < from->num; i++)
243 classes_ptr->classes[i] = from->classes[i];
244 complete_cost_classes (classes_ptr);
245 return classes_ptr;
248 /* Return a version of FULL that only considers registers in REGS that are
249 valid for mode MODE. Both FULL and the returned class are globally
250 allocated. */
251 static cost_classes_t
252 restrict_cost_classes (cost_classes_t full, machine_mode mode,
253 const HARD_REG_SET &regs)
255 static struct cost_classes narrow;
256 int map[N_REG_CLASSES];
257 narrow.num = 0;
258 for (int i = 0; i < full->num; i++)
260 /* Assume that we'll drop the class. */
261 map[i] = -1;
263 /* Ignore classes that are too small for the mode. */
264 enum reg_class cl = full->classes[i];
265 if (!contains_reg_of_mode[cl][mode])
266 continue;
268 /* Calculate the set of registers in CL that belong to REGS and
269 are valid for MODE. */
270 HARD_REG_SET valid_for_cl;
271 COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
272 AND_HARD_REG_SET (valid_for_cl, regs);
273 AND_COMPL_HARD_REG_SET (valid_for_cl,
274 ira_prohibited_class_mode_regs[cl][mode]);
275 AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
276 if (hard_reg_set_empty_p (valid_for_cl))
277 continue;
279 /* Don't use this class if the set of valid registers is a subset
280 of an existing class. For example, suppose we have two classes
281 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
282 that the mode changes allowed by FR_REGS are not as general as
283 the mode changes allowed by GR_REGS.
285 In this situation, the mode changes for GR_AND_FR_REGS could
286 either be seen as the union or the intersection of the mode
287 changes allowed by the two subclasses. The justification for
288 the union-based definition would be that, if you want a mode
289 change that's only allowed by GR_REGS, you can pick a register
290 from the GR_REGS subclass. The justification for the
291 intersection-based definition would be that every register
292 from the class would allow the mode change.
294 However, if we have a register that needs to be in GR_REGS,
295 using GR_AND_FR_REGS with the intersection-based definition
296 would be too pessimistic, since it would bring in restrictions
297 that only apply to FR_REGS. Conversely, if we have a register
298 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
299 union-based definition would lose the extra restrictions
300 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
301 for cases where GR_REGS and FP_REGS are both valid. */
302 int pos;
303 for (pos = 0; pos < narrow.num; ++pos)
305 enum reg_class cl2 = narrow.classes[pos];
306 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
307 break;
309 map[i] = pos;
310 if (pos == narrow.num)
312 /* If several classes are equivalent, prefer to use the one
313 that was chosen as the allocno class. */
314 enum reg_class cl2 = ira_allocno_class_translate[cl];
315 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
316 cl = cl2;
317 narrow.classes[narrow.num++] = cl;
320 if (narrow.num == full->num)
321 return full;
323 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
324 if (*slot == NULL)
326 cost_classes_t classes = setup_cost_classes (&narrow);
327 /* Map equivalent classes to the representative that we chose above. */
328 for (int i = 0; i < ira_important_classes_num; i++)
330 enum reg_class cl = ira_important_classes[i];
331 int index = full->index[cl];
332 if (index >= 0)
333 classes->index[cl] = map[index];
335 *slot = classes;
337 return *slot;
340 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
341 This function is used when we know an initial approximation of
342 allocno class of the pseudo already, e.g. on the second iteration
343 of class cost calculation or after class cost calculation in
344 register-pressure sensitive insn scheduling or register-pressure
345 sensitive loop-invariant motion. */
346 static void
347 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
349 static struct cost_classes classes;
350 cost_classes_t classes_ptr;
351 enum reg_class cl;
352 int i;
353 cost_classes **slot;
354 HARD_REG_SET temp, temp2;
355 bool exclude_p;
357 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
359 COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
360 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
361 /* We exclude classes from consideration which are subsets of
362 ACLASS only if ACLASS is an uniform class. */
363 exclude_p = ira_uniform_class_p[aclass];
364 classes.num = 0;
365 for (i = 0; i < ira_important_classes_num; i++)
367 cl = ira_important_classes[i];
368 if (exclude_p)
370 /* Exclude non-uniform classes which are subsets of
371 ACLASS. */
372 COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
373 AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
374 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
375 continue;
377 classes.classes[classes.num++] = cl;
379 slot = cost_classes_htab->find_slot (&classes, INSERT);
380 if (*slot == NULL)
382 classes_ptr = setup_cost_classes (&classes);
383 *slot = classes_ptr;
385 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
387 if (regno_reg_rtx[regno] != NULL_RTX)
389 /* Restrict the classes to those that are valid for REGNO's mode
390 (which might for example exclude singleton classes if the mode
391 requires two registers). Also restrict the classes to those that
392 are valid for subregs of REGNO. */
393 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
394 if (!valid_regs)
395 valid_regs = &reg_class_contents[ALL_REGS];
396 classes_ptr = restrict_cost_classes (classes_ptr,
397 PSEUDO_REGNO_MODE (regno),
398 *valid_regs);
400 regno_cost_classes[regno] = classes_ptr;
403 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
404 decrease number of cost classes for the pseudo, if hard registers
405 of some important classes can not hold a value of MODE. So the
406 pseudo can not get hard register of some important classes and cost
407 calculation for such important classes is only wasting CPU
408 time. */
409 static void
410 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
412 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
413 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
414 mode, *valid_regs);
415 else
417 if (cost_classes_mode_cache[mode] == NULL)
418 cost_classes_mode_cache[mode]
419 = restrict_cost_classes (&all_cost_classes, mode,
420 reg_class_contents[ALL_REGS]);
421 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
425 /* Finalize info about the cost classes for each pseudo. */
426 static void
427 finish_regno_cost_classes (void)
429 ira_free (regno_cost_classes);
430 delete cost_classes_htab;
431 cost_classes_htab = NULL;
436 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
437 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
438 be a pseudo register. */
439 static int
440 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
441 secondary_reload_info *prev_sri)
443 secondary_reload_info sri;
444 reg_class_t secondary_class = NO_REGS;
446 /* If X is a SCRATCH, there is actually nothing to move since we are
447 assuming optimal allocation. */
448 if (GET_CODE (x) == SCRATCH)
449 return 0;
451 /* Get the class we will actually use for a reload. */
452 rclass = targetm.preferred_reload_class (x, rclass);
454 /* If we need a secondary reload for an intermediate, the cost is
455 that to load the input into the intermediate register, then to
456 copy it. */
457 sri.prev_sri = prev_sri;
458 sri.extra_cost = 0;
459 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
461 if (secondary_class != NO_REGS)
463 ira_init_register_move_cost_if_necessary (mode);
464 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
465 + sri.extra_cost
466 + copy_cost (x, mode, secondary_class, to_p, &sri));
469 /* For memory, use the memory move cost, for (hard) registers, use
470 the cost to move between the register classes, and use 2 for
471 everything else (constants). */
472 if (MEM_P (x) || rclass == NO_REGS)
473 return sri.extra_cost
474 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
475 else if (REG_P (x))
477 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
479 ira_init_register_move_cost_if_necessary (mode);
480 return (sri.extra_cost
481 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
483 else
484 /* If this is a constant, we may eventually want to call rtx_cost
485 here. */
486 return sri.extra_cost + COSTS_N_INSNS (1);
491 /* Record the cost of using memory or hard registers of various
492 classes for the operands in INSN.
494 N_ALTS is the number of alternatives.
495 N_OPS is the number of operands.
496 OPS is an array of the operands.
497 MODES are the modes of the operands, in case any are VOIDmode.
498 CONSTRAINTS are the constraints to use for the operands. This array
499 is modified by this procedure.
501 This procedure works alternative by alternative. For each
502 alternative we assume that we will be able to allocate all allocnos
503 to their ideal register class and calculate the cost of using that
504 alternative. Then we compute, for each operand that is a
505 pseudo-register, the cost of having the allocno allocated to each
506 register class and using it in that alternative. To this cost is
507 added the cost of the alternative.
509 The cost of each class for this insn is its lowest cost among all
510 the alternatives. */
511 static void
512 record_reg_classes (int n_alts, int n_ops, rtx *ops,
513 machine_mode *modes, const char **constraints,
514 rtx_insn *insn, enum reg_class *pref)
516 int alt;
517 int i, j, k;
518 int insn_allows_mem[MAX_RECOG_OPERANDS];
519 move_table *move_in_cost, *move_out_cost;
520 short (*mem_cost)[2];
522 for (i = 0; i < n_ops; i++)
523 insn_allows_mem[i] = 0;
525 /* Process each alternative, each time minimizing an operand's cost
526 with the cost for each operand in that alternative. */
527 alternative_mask preferred = get_preferred_alternatives (insn);
528 for (alt = 0; alt < n_alts; alt++)
530 enum reg_class classes[MAX_RECOG_OPERANDS];
531 int allows_mem[MAX_RECOG_OPERANDS];
532 enum reg_class rclass;
533 int alt_fail = 0;
534 int alt_cost = 0, op_cost_add;
536 if (!TEST_BIT (preferred, alt))
538 for (i = 0; i < recog_data.n_operands; i++)
539 constraints[i] = skip_alternative (constraints[i]);
541 continue;
544 for (i = 0; i < n_ops; i++)
546 unsigned char c;
547 const char *p = constraints[i];
548 rtx op = ops[i];
549 machine_mode mode = modes[i];
550 int allows_addr = 0;
551 int win = 0;
553 /* Initially show we know nothing about the register class. */
554 classes[i] = NO_REGS;
555 allows_mem[i] = 0;
557 /* If this operand has no constraints at all, we can
558 conclude nothing about it since anything is valid. */
559 if (*p == 0)
561 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
562 memset (this_op_costs[i], 0, struct_costs_size);
563 continue;
566 /* If this alternative is only relevant when this operand
567 matches a previous operand, we do different things
568 depending on whether this operand is a allocno-reg or not.
569 We must process any modifiers for the operand before we
570 can make this test. */
571 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
572 p++;
574 if (p[0] >= '0' && p[0] <= '0' + i)
576 /* Copy class and whether memory is allowed from the
577 matching alternative. Then perform any needed cost
578 computations and/or adjustments. */
579 j = p[0] - '0';
580 classes[i] = classes[j];
581 allows_mem[i] = allows_mem[j];
582 if (allows_mem[i])
583 insn_allows_mem[i] = 1;
585 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
587 /* If this matches the other operand, we have no
588 added cost and we win. */
589 if (rtx_equal_p (ops[j], op))
590 win = 1;
591 /* If we can put the other operand into a register,
592 add to the cost of this alternative the cost to
593 copy this operand to the register used for the
594 other operand. */
595 else if (classes[j] != NO_REGS)
597 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
598 win = 1;
601 else if (! REG_P (ops[j])
602 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
604 /* This op is an allocno but the one it matches is
605 not. */
607 /* If we can't put the other operand into a
608 register, this alternative can't be used. */
610 if (classes[j] == NO_REGS)
611 alt_fail = 1;
612 /* Otherwise, add to the cost of this alternative
613 the cost to copy the other operand to the hard
614 register used for this operand. */
615 else
616 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
618 else
620 /* The costs of this operand are not the same as the
621 other operand since move costs are not symmetric.
622 Moreover, if we cannot tie them, this alternative
623 needs to do a copy, which is one insn. */
624 struct costs *pp = this_op_costs[i];
625 int *pp_costs = pp->cost;
626 cost_classes_t cost_classes_ptr
627 = regno_cost_classes[REGNO (op)];
628 enum reg_class *cost_classes = cost_classes_ptr->classes;
629 bool in_p = recog_data.operand_type[i] != OP_OUT;
630 bool out_p = recog_data.operand_type[i] != OP_IN;
631 enum reg_class op_class = classes[i];
633 ira_init_register_move_cost_if_necessary (mode);
634 if (! in_p)
636 ira_assert (out_p);
637 if (op_class == NO_REGS)
639 mem_cost = ira_memory_move_cost[mode];
640 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
642 rclass = cost_classes[k];
643 pp_costs[k] = mem_cost[rclass][0] * frequency;
646 else
648 move_out_cost = ira_may_move_out_cost[mode];
649 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
651 rclass = cost_classes[k];
652 pp_costs[k]
653 = move_out_cost[op_class][rclass] * frequency;
657 else if (! out_p)
659 ira_assert (in_p);
660 if (op_class == NO_REGS)
662 mem_cost = ira_memory_move_cost[mode];
663 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
665 rclass = cost_classes[k];
666 pp_costs[k] = mem_cost[rclass][1] * frequency;
669 else
671 move_in_cost = ira_may_move_in_cost[mode];
672 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
674 rclass = cost_classes[k];
675 pp_costs[k]
676 = move_in_cost[rclass][op_class] * frequency;
680 else
682 if (op_class == NO_REGS)
684 mem_cost = ira_memory_move_cost[mode];
685 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
687 rclass = cost_classes[k];
688 pp_costs[k] = ((mem_cost[rclass][0]
689 + mem_cost[rclass][1])
690 * frequency);
693 else
695 move_in_cost = ira_may_move_in_cost[mode];
696 move_out_cost = ira_may_move_out_cost[mode];
697 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
699 rclass = cost_classes[k];
700 pp_costs[k] = ((move_in_cost[rclass][op_class]
701 + move_out_cost[op_class][rclass])
702 * frequency);
707 /* If the alternative actually allows memory, make
708 things a bit cheaper since we won't need an extra
709 insn to load it. */
710 pp->mem_cost
711 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
712 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
713 - allows_mem[i]) * frequency;
715 /* If we have assigned a class to this allocno in
716 our first pass, add a cost to this alternative
717 corresponding to what we would add if this
718 allocno were not in the appropriate class. */
719 if (pref)
721 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
723 if (pref_class == NO_REGS)
724 alt_cost
725 += ((out_p
726 ? ira_memory_move_cost[mode][op_class][0] : 0)
727 + (in_p
728 ? ira_memory_move_cost[mode][op_class][1]
729 : 0));
730 else if (ira_reg_class_intersect
731 [pref_class][op_class] == NO_REGS)
732 alt_cost
733 += ira_register_move_cost[mode][pref_class][op_class];
735 if (REGNO (ops[i]) != REGNO (ops[j])
736 && ! find_reg_note (insn, REG_DEAD, op))
737 alt_cost += 2;
739 p++;
743 /* Scan all the constraint letters. See if the operand
744 matches any of the constraints. Collect the valid
745 register classes and see if this operand accepts
746 memory. */
747 while ((c = *p))
749 switch (c)
751 case '*':
752 /* Ignore the next letter for this pass. */
753 c = *++p;
754 break;
756 case '^':
757 alt_cost += 2;
758 break;
760 case '?':
761 alt_cost += 2;
762 break;
764 case 'g':
765 if (MEM_P (op)
766 || (CONSTANT_P (op)
767 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
768 win = 1;
769 insn_allows_mem[i] = allows_mem[i] = 1;
770 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
771 break;
773 default:
774 enum constraint_num cn = lookup_constraint (p);
775 enum reg_class cl;
776 switch (get_constraint_type (cn))
778 case CT_REGISTER:
779 cl = reg_class_for_constraint (cn);
780 if (cl != NO_REGS)
781 classes[i] = ira_reg_class_subunion[classes[i]][cl];
782 break;
784 case CT_CONST_INT:
785 if (CONST_INT_P (op)
786 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
787 win = 1;
788 break;
790 case CT_MEMORY:
791 /* Every MEM can be reloaded to fit. */
792 insn_allows_mem[i] = allows_mem[i] = 1;
793 if (MEM_P (op))
794 win = 1;
795 break;
797 case CT_ADDRESS:
798 /* Every address can be reloaded to fit. */
799 allows_addr = 1;
800 if (address_operand (op, GET_MODE (op))
801 || constraint_satisfied_p (op, cn))
802 win = 1;
803 /* We know this operand is an address, so we
804 want it to be allocated to a hard register
805 that can be the base of an address,
806 i.e. BASE_REG_CLASS. */
807 classes[i]
808 = ira_reg_class_subunion[classes[i]]
809 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
810 ADDRESS, SCRATCH)];
811 break;
813 case CT_FIXED_FORM:
814 if (constraint_satisfied_p (op, cn))
815 win = 1;
816 break;
818 break;
820 p += CONSTRAINT_LEN (c, p);
821 if (c == ',')
822 break;
825 constraints[i] = p;
827 /* How we account for this operand now depends on whether it
828 is a pseudo register or not. If it is, we first check if
829 any register classes are valid. If not, we ignore this
830 alternative, since we want to assume that all allocnos get
831 allocated for register preferencing. If some register
832 class is valid, compute the costs of moving the allocno
833 into that class. */
834 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
836 if (classes[i] == NO_REGS && ! allows_mem[i])
838 /* We must always fail if the operand is a REG, but
839 we did not find a suitable class and memory is
840 not allowed.
842 Otherwise we may perform an uninitialized read
843 from this_op_costs after the `continue' statement
844 below. */
845 alt_fail = 1;
847 else
849 unsigned int regno = REGNO (op);
850 struct costs *pp = this_op_costs[i];
851 int *pp_costs = pp->cost;
852 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
853 enum reg_class *cost_classes = cost_classes_ptr->classes;
854 bool in_p = recog_data.operand_type[i] != OP_OUT;
855 bool out_p = recog_data.operand_type[i] != OP_IN;
856 enum reg_class op_class = classes[i];
858 ira_init_register_move_cost_if_necessary (mode);
859 if (! in_p)
861 ira_assert (out_p);
862 if (op_class == NO_REGS)
864 mem_cost = ira_memory_move_cost[mode];
865 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
867 rclass = cost_classes[k];
868 pp_costs[k] = mem_cost[rclass][0] * frequency;
871 else
873 move_out_cost = ira_may_move_out_cost[mode];
874 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
876 rclass = cost_classes[k];
877 pp_costs[k]
878 = move_out_cost[op_class][rclass] * frequency;
882 else if (! out_p)
884 ira_assert (in_p);
885 if (op_class == NO_REGS)
887 mem_cost = ira_memory_move_cost[mode];
888 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
890 rclass = cost_classes[k];
891 pp_costs[k] = mem_cost[rclass][1] * frequency;
894 else
896 move_in_cost = ira_may_move_in_cost[mode];
897 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
899 rclass = cost_classes[k];
900 pp_costs[k]
901 = move_in_cost[rclass][op_class] * frequency;
905 else
907 if (op_class == NO_REGS)
909 mem_cost = ira_memory_move_cost[mode];
910 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
912 rclass = cost_classes[k];
913 pp_costs[k] = ((mem_cost[rclass][0]
914 + mem_cost[rclass][1])
915 * frequency);
918 else
920 move_in_cost = ira_may_move_in_cost[mode];
921 move_out_cost = ira_may_move_out_cost[mode];
922 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
924 rclass = cost_classes[k];
925 pp_costs[k] = ((move_in_cost[rclass][op_class]
926 + move_out_cost[op_class][rclass])
927 * frequency);
932 if (op_class == NO_REGS)
933 /* Although we don't need insn to reload from
934 memory, still accessing memory is usually more
935 expensive than a register. */
936 pp->mem_cost = frequency;
937 else
938 /* If the alternative actually allows memory, make
939 things a bit cheaper since we won't need an
940 extra insn to load it. */
941 pp->mem_cost
942 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
943 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
944 - allows_mem[i]) * frequency;
945 /* If we have assigned a class to this allocno in
946 our first pass, add a cost to this alternative
947 corresponding to what we would add if this
948 allocno were not in the appropriate class. */
949 if (pref)
951 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
953 if (pref_class == NO_REGS)
955 if (op_class != NO_REGS)
956 alt_cost
957 += ((out_p
958 ? ira_memory_move_cost[mode][op_class][0]
959 : 0)
960 + (in_p
961 ? ira_memory_move_cost[mode][op_class][1]
962 : 0));
964 else if (op_class == NO_REGS)
965 alt_cost
966 += ((out_p
967 ? ira_memory_move_cost[mode][pref_class][1]
968 : 0)
969 + (in_p
970 ? ira_memory_move_cost[mode][pref_class][0]
971 : 0));
972 else if (ira_reg_class_intersect[pref_class][op_class]
973 == NO_REGS)
974 alt_cost += (ira_register_move_cost
975 [mode][pref_class][op_class]);
980 /* Otherwise, if this alternative wins, either because we
981 have already determined that or if we have a hard
982 register of the proper class, there is no cost for this
983 alternative. */
984 else if (win || (REG_P (op)
985 && reg_fits_class_p (op, classes[i],
986 0, GET_MODE (op))))
989 /* If registers are valid, the cost of this alternative
990 includes copying the object to and/or from a
991 register. */
992 else if (classes[i] != NO_REGS)
994 if (recog_data.operand_type[i] != OP_OUT)
995 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
997 if (recog_data.operand_type[i] != OP_IN)
998 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
1000 /* The only other way this alternative can be used is if
1001 this is a constant that could be placed into memory. */
1002 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
1003 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1004 else
1005 alt_fail = 1;
1008 if (alt_fail)
1009 continue;
1011 op_cost_add = alt_cost * frequency;
1012 /* Finally, update the costs with the information we've
1013 calculated about this alternative. */
1014 for (i = 0; i < n_ops; i++)
1015 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1017 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1018 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1019 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1020 cost_classes_t cost_classes_ptr
1021 = regno_cost_classes[REGNO (ops[i])];
1023 pp->mem_cost = MIN (pp->mem_cost,
1024 (qq->mem_cost + op_cost_add) * scale);
1026 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1027 pp_costs[k]
1028 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1032 if (allocno_p)
1033 for (i = 0; i < n_ops; i++)
1035 ira_allocno_t a;
1036 rtx op = ops[i];
1038 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1039 continue;
1040 a = ira_curr_regno_allocno_map [REGNO (op)];
1041 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1042 ALLOCNO_BAD_SPILL_P (a) = true;
1049 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1050 static inline bool
1051 ok_for_index_p_nonstrict (rtx reg)
1053 unsigned regno = REGNO (reg);
1055 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1058 /* A version of regno_ok_for_base_p for use here, when all
1059 pseudo-registers should count as OK. Arguments as for
1060 regno_ok_for_base_p. */
1061 static inline bool
1062 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1063 enum rtx_code outer_code, enum rtx_code index_code)
1065 unsigned regno = REGNO (reg);
1067 if (regno >= FIRST_PSEUDO_REGISTER)
1068 return true;
1069 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1072 /* Record the pseudo registers we must reload into hard registers in a
1073 subexpression of a memory address, X.
1075 If CONTEXT is 0, we are looking at the base part of an address,
1076 otherwise we are looking at the index part.
1078 MODE and AS are the mode and address space of the memory reference;
1079 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1080 These four arguments are passed down to base_reg_class.
1082 SCALE is twice the amount to multiply the cost by (it is twice so
1083 we can represent half-cost adjustments). */
1084 static void
1085 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1086 int context, enum rtx_code outer_code,
1087 enum rtx_code index_code, int scale)
1089 enum rtx_code code = GET_CODE (x);
1090 enum reg_class rclass;
1092 if (context == 1)
1093 rclass = INDEX_REG_CLASS;
1094 else
1095 rclass = base_reg_class (mode, as, outer_code, index_code);
1097 switch (code)
1099 case CONST_INT:
1100 case CONST:
1101 case CC0:
1102 case PC:
1103 case SYMBOL_REF:
1104 case LABEL_REF:
1105 return;
1107 case PLUS:
1108 /* When we have an address that is a sum, we must determine
1109 whether registers are "base" or "index" regs. If there is a
1110 sum of two registers, we must choose one to be the "base".
1111 Luckily, we can use the REG_POINTER to make a good choice
1112 most of the time. We only need to do this on machines that
1113 can have two registers in an address and where the base and
1114 index register classes are different.
1116 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1117 but that seems bogus since it should only be set when we are
1118 sure the register is being used as a pointer. */
1120 rtx arg0 = XEXP (x, 0);
1121 rtx arg1 = XEXP (x, 1);
1122 enum rtx_code code0 = GET_CODE (arg0);
1123 enum rtx_code code1 = GET_CODE (arg1);
1125 /* Look inside subregs. */
1126 if (code0 == SUBREG)
1127 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1128 if (code1 == SUBREG)
1129 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1131 /* If this machine only allows one register per address, it
1132 must be in the first operand. */
1133 if (MAX_REGS_PER_ADDRESS == 1)
1134 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1136 /* If index and base registers are the same on this machine,
1137 just record registers in any non-constant operands. We
1138 assume here, as well as in the tests below, that all
1139 addresses are in canonical form. */
1140 else if (INDEX_REG_CLASS
1141 == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1143 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1144 if (! CONSTANT_P (arg1))
1145 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1148 /* If the second operand is a constant integer, it doesn't
1149 change what class the first operand must be. */
1150 else if (CONST_SCALAR_INT_P (arg1))
1151 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1152 /* If the second operand is a symbolic constant, the first
1153 operand must be an index register. */
1154 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1155 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1156 /* If both operands are registers but one is already a hard
1157 register of index or reg-base class, give the other the
1158 class that the hard register is not. */
1159 else if (code0 == REG && code1 == REG
1160 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1161 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1162 || ok_for_index_p_nonstrict (arg0)))
1163 record_address_regs (mode, as, arg1,
1164 ok_for_base_p_nonstrict (arg0, mode, as,
1165 PLUS, REG) ? 1 : 0,
1166 PLUS, REG, scale);
1167 else if (code0 == REG && code1 == REG
1168 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1169 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1170 || ok_for_index_p_nonstrict (arg1)))
1171 record_address_regs (mode, as, arg0,
1172 ok_for_base_p_nonstrict (arg1, mode, as,
1173 PLUS, REG) ? 1 : 0,
1174 PLUS, REG, scale);
1175 /* If one operand is known to be a pointer, it must be the
1176 base with the other operand the index. Likewise if the
1177 other operand is a MULT. */
1178 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1180 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1181 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1183 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1185 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1186 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1188 /* Otherwise, count equal chances that each might be a base or
1189 index register. This case should be rare. */
1190 else
1192 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1193 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1194 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1195 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1198 break;
1200 /* Double the importance of an allocno that is incremented or
1201 decremented, since it would take two extra insns if it ends
1202 up in the wrong place. */
1203 case POST_MODIFY:
1204 case PRE_MODIFY:
1205 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1206 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1207 if (REG_P (XEXP (XEXP (x, 1), 1)))
1208 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1209 2 * scale);
1210 break;
1212 case POST_INC:
1213 case PRE_INC:
1214 case POST_DEC:
1215 case PRE_DEC:
1216 /* Double the importance of an allocno that is incremented or
1217 decremented, since it would take two extra insns if it ends
1218 up in the wrong place. */
1219 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1220 break;
1222 case REG:
1224 struct costs *pp;
1225 int *pp_costs;
1226 enum reg_class i;
1227 int k, regno, add_cost;
1228 cost_classes_t cost_classes_ptr;
1229 enum reg_class *cost_classes;
1230 move_table *move_in_cost;
1232 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1233 break;
1235 regno = REGNO (x);
1236 if (allocno_p)
1237 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1238 pp = COSTS (costs, COST_INDEX (regno));
1239 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1240 if (INT_MAX - add_cost < pp->mem_cost)
1241 pp->mem_cost = INT_MAX;
1242 else
1243 pp->mem_cost += add_cost;
1244 cost_classes_ptr = regno_cost_classes[regno];
1245 cost_classes = cost_classes_ptr->classes;
1246 pp_costs = pp->cost;
1247 ira_init_register_move_cost_if_necessary (Pmode);
1248 move_in_cost = ira_may_move_in_cost[Pmode];
1249 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1251 i = cost_classes[k];
1252 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1253 if (INT_MAX - add_cost < pp_costs[k])
1254 pp_costs[k] = INT_MAX;
1255 else
1256 pp_costs[k] += add_cost;
1259 break;
1261 default:
1263 const char *fmt = GET_RTX_FORMAT (code);
1264 int i;
1265 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1266 if (fmt[i] == 'e')
1267 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1268 scale);
1275 /* Calculate the costs of insn operands. */
1276 static void
1277 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1279 const char *constraints[MAX_RECOG_OPERANDS];
1280 machine_mode modes[MAX_RECOG_OPERANDS];
1281 rtx ops[MAX_RECOG_OPERANDS];
1282 rtx set;
1283 int i;
1285 for (i = 0; i < recog_data.n_operands; i++)
1287 constraints[i] = recog_data.constraints[i];
1288 modes[i] = recog_data.operand_mode[i];
1291 /* If we get here, we are set up to record the costs of all the
1292 operands for this insn. Start by initializing the costs. Then
1293 handle any address registers. Finally record the desired classes
1294 for any allocnos, doing it twice if some pair of operands are
1295 commutative. */
1296 for (i = 0; i < recog_data.n_operands; i++)
1298 memcpy (op_costs[i], init_cost, struct_costs_size);
1300 ops[i] = recog_data.operand[i];
1301 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1302 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1304 if (MEM_P (recog_data.operand[i]))
1305 record_address_regs (GET_MODE (recog_data.operand[i]),
1306 MEM_ADDR_SPACE (recog_data.operand[i]),
1307 XEXP (recog_data.operand[i], 0),
1308 0, MEM, SCRATCH, frequency * 2);
1309 else if (constraints[i][0] == 'p'
1310 || (insn_extra_address_constraint
1311 (lookup_constraint (constraints[i]))))
1312 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1313 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1314 frequency * 2);
1317 /* Check for commutative in a separate loop so everything will have
1318 been initialized. We must do this even if one operand is a
1319 constant--see addsi3 in m68k.md. */
1320 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1321 if (constraints[i][0] == '%')
1323 const char *xconstraints[MAX_RECOG_OPERANDS];
1324 int j;
1326 /* Handle commutative operands by swapping the constraints.
1327 We assume the modes are the same. */
1328 for (j = 0; j < recog_data.n_operands; j++)
1329 xconstraints[j] = constraints[j];
1331 xconstraints[i] = constraints[i+1];
1332 xconstraints[i+1] = constraints[i];
1333 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1334 recog_data.operand, modes,
1335 xconstraints, insn, pref);
1337 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1338 recog_data.operand, modes,
1339 constraints, insn, pref);
1341 /* If this insn is a single set copying operand 1 to operand 0 and
1342 one operand is an allocno with the other a hard reg or an allocno
1343 that prefers a hard register that is in its own register class
1344 then we may want to adjust the cost of that register class to -1.
1346 Avoid the adjustment if the source does not die to avoid
1347 stressing of register allocator by preferencing two colliding
1348 registers into single class.
1350 Also avoid the adjustment if a copy between hard registers of the
1351 class is expensive (ten times the cost of a default copy is
1352 considered arbitrarily expensive). This avoids losing when the
1353 preferred class is very expensive as the source of a copy
1354 instruction. */
1355 if ((set = single_set (insn)) != NULL_RTX
1356 /* In rare cases the single set insn might have less 2 operands
1357 as the source can be a fixed special reg. */
1358 && recog_data.n_operands > 1
1359 && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set))
1361 int regno, other_regno;
1362 rtx dest = SET_DEST (set);
1363 rtx src = SET_SRC (set);
1365 if (GET_CODE (dest) == SUBREG
1366 && (GET_MODE_SIZE (GET_MODE (dest))
1367 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1368 dest = SUBREG_REG (dest);
1369 if (GET_CODE (src) == SUBREG
1370 && (GET_MODE_SIZE (GET_MODE (src))
1371 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1372 src = SUBREG_REG (src);
1373 if (REG_P (src) && REG_P (dest)
1374 && find_regno_note (insn, REG_DEAD, REGNO (src))
1375 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1376 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1377 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1378 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1380 machine_mode mode = GET_MODE (src);
1381 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1382 enum reg_class *cost_classes = cost_classes_ptr->classes;
1383 reg_class_t rclass;
1384 int k, nr;
1386 i = regno == (int) REGNO (src) ? 1 : 0;
1387 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1389 rclass = cost_classes[k];
1390 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1391 && (reg_class_size[(int) rclass]
1392 == ira_reg_class_max_nregs [(int) rclass][(int) mode]))
1394 if (reg_class_size[rclass] == 1)
1395 op_costs[i]->cost[k] = -frequency;
1396 else
1398 for (nr = 0;
1399 nr < hard_regno_nregs[other_regno][mode];
1400 nr++)
1401 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass],
1402 other_regno + nr))
1403 break;
1405 if (nr == hard_regno_nregs[other_regno][mode])
1406 op_costs[i]->cost[k] = -frequency;
1416 /* Process one insn INSN. Scan it and record each time it would save
1417 code to put a certain allocnos in a certain class. Return the last
1418 insn processed, so that the scan can be continued from there. */
1419 static rtx_insn *
1420 scan_one_insn (rtx_insn *insn)
1422 enum rtx_code pat_code;
1423 rtx set, note;
1424 int i, k;
1425 bool counted_mem;
1427 if (!NONDEBUG_INSN_P (insn))
1428 return insn;
1430 pat_code = GET_CODE (PATTERN (insn));
1431 if (pat_code == USE || pat_code == CLOBBER || pat_code == ASM_INPUT)
1432 return insn;
1434 counted_mem = false;
1435 set = single_set (insn);
1436 extract_insn (insn);
1438 /* If this insn loads a parameter from its stack slot, then it
1439 represents a savings, rather than a cost, if the parameter is
1440 stored in memory. Record this fact.
1442 Similarly if we're loading other constants from memory (constant
1443 pool, TOC references, small data areas, etc) and this is the only
1444 assignment to the destination pseudo.
1446 Don't do this if SET_SRC (set) isn't a general operand, if it is
1447 a memory requiring special instructions to load it, decreasing
1448 mem_cost might result in it being loaded using the specialized
1449 instruction into a register, then stored into stack and loaded
1450 again from the stack. See PR52208.
1452 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1453 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1454 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1455 && ((MEM_P (XEXP (note, 0))
1456 && !side_effects_p (SET_SRC (set)))
1457 || (CONSTANT_P (XEXP (note, 0))
1458 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1459 XEXP (note, 0))
1460 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1461 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set))))
1463 enum reg_class cl = GENERAL_REGS;
1464 rtx reg = SET_DEST (set);
1465 int num = COST_INDEX (REGNO (reg));
1467 COSTS (costs, num)->mem_cost
1468 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1469 record_address_regs (GET_MODE (SET_SRC (set)),
1470 MEM_ADDR_SPACE (SET_SRC (set)),
1471 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1472 frequency * 2);
1473 counted_mem = true;
1476 record_operand_costs (insn, pref);
1478 /* Now add the cost for each operand to the total costs for its
1479 allocno. */
1480 for (i = 0; i < recog_data.n_operands; i++)
1481 if (REG_P (recog_data.operand[i])
1482 && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER)
1484 int regno = REGNO (recog_data.operand[i]);
1485 struct costs *p = COSTS (costs, COST_INDEX (regno));
1486 struct costs *q = op_costs[i];
1487 int *p_costs = p->cost, *q_costs = q->cost;
1488 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1489 int add_cost;
1491 /* If the already accounted for the memory "cost" above, don't
1492 do so again. */
1493 if (!counted_mem)
1495 add_cost = q->mem_cost;
1496 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1497 p->mem_cost = INT_MAX;
1498 else
1499 p->mem_cost += add_cost;
1501 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1503 add_cost = q_costs[k];
1504 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1505 p_costs[k] = INT_MAX;
1506 else
1507 p_costs[k] += add_cost;
1511 return insn;
1516 /* Print allocnos costs to file F. */
1517 static void
1518 print_allocno_costs (FILE *f)
1520 int k;
1521 ira_allocno_t a;
1522 ira_allocno_iterator ai;
1524 ira_assert (allocno_p);
1525 fprintf (f, "\n");
1526 FOR_EACH_ALLOCNO (a, ai)
1528 int i, rclass;
1529 basic_block bb;
1530 int regno = ALLOCNO_REGNO (a);
1531 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1532 enum reg_class *cost_classes = cost_classes_ptr->classes;
1534 i = ALLOCNO_NUM (a);
1535 fprintf (f, " a%d(r%d,", i, regno);
1536 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1537 fprintf (f, "b%d", bb->index);
1538 else
1539 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1540 fprintf (f, ") costs:");
1541 for (k = 0; k < cost_classes_ptr->num; k++)
1543 rclass = cost_classes[k];
1544 fprintf (f, " %s:%d", reg_class_names[rclass],
1545 COSTS (costs, i)->cost[k]);
1546 if (flag_ira_region == IRA_REGION_ALL
1547 || flag_ira_region == IRA_REGION_MIXED)
1548 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1550 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1551 if (flag_ira_region == IRA_REGION_ALL
1552 || flag_ira_region == IRA_REGION_MIXED)
1553 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1554 fprintf (f, "\n");
1558 /* Print pseudo costs to file F. */
1559 static void
1560 print_pseudo_costs (FILE *f)
1562 int regno, k;
1563 int rclass;
1564 cost_classes_t cost_classes_ptr;
1565 enum reg_class *cost_classes;
1567 ira_assert (! allocno_p);
1568 fprintf (f, "\n");
1569 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1571 if (REG_N_REFS (regno) <= 0)
1572 continue;
1573 cost_classes_ptr = regno_cost_classes[regno];
1574 cost_classes = cost_classes_ptr->classes;
1575 fprintf (f, " r%d costs:", regno);
1576 for (k = 0; k < cost_classes_ptr->num; k++)
1578 rclass = cost_classes[k];
1579 fprintf (f, " %s:%d", reg_class_names[rclass],
1580 COSTS (costs, regno)->cost[k]);
1582 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1586 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1587 costs. */
1588 static void
1589 process_bb_for_costs (basic_block bb)
1591 rtx_insn *insn;
1593 frequency = REG_FREQ_FROM_BB (bb);
1594 if (frequency == 0)
1595 frequency = 1;
1596 FOR_BB_INSNS (bb, insn)
1597 insn = scan_one_insn (insn);
1600 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1601 costs. */
1602 static void
1603 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1605 basic_block bb;
1607 bb = loop_tree_node->bb;
1608 if (bb != NULL)
1609 process_bb_for_costs (bb);
1612 /* Find costs of register classes and memory for allocnos or pseudos
1613 and their best costs. Set up preferred, alternative and allocno
1614 classes for pseudos. */
1615 static void
1616 find_costs_and_classes (FILE *dump_file)
1618 int i, k, start, max_cost_classes_num;
1619 int pass;
1620 basic_block bb;
1621 enum reg_class *regno_best_class, new_class;
1623 init_recog ();
1624 regno_best_class
1625 = (enum reg_class *) ira_allocate (max_reg_num ()
1626 * sizeof (enum reg_class));
1627 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1628 regno_best_class[i] = NO_REGS;
1629 if (!resize_reg_info () && allocno_p
1630 && pseudo_classes_defined_p && flag_expensive_optimizations)
1632 ira_allocno_t a;
1633 ira_allocno_iterator ai;
1635 pref = pref_buffer;
1636 max_cost_classes_num = 1;
1637 FOR_EACH_ALLOCNO (a, ai)
1639 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1640 setup_regno_cost_classes_by_aclass
1641 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1642 max_cost_classes_num
1643 = MAX (max_cost_classes_num,
1644 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1646 start = 1;
1648 else
1650 pref = NULL;
1651 max_cost_classes_num = ira_important_classes_num;
1652 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1653 if (regno_reg_rtx[i] != NULL_RTX)
1654 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1655 else
1656 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1657 start = 0;
1659 if (allocno_p)
1660 /* Clear the flag for the next compiled function. */
1661 pseudo_classes_defined_p = false;
1662 /* Normally we scan the insns once and determine the best class to
1663 use for each allocno. However, if -fexpensive-optimizations are
1664 on, we do so twice, the second time using the tentative best
1665 classes to guide the selection. */
1666 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1668 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1669 fprintf (dump_file,
1670 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1672 if (pass != start)
1674 max_cost_classes_num = 1;
1675 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1677 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1678 max_cost_classes_num
1679 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1683 struct_costs_size
1684 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1685 /* Zero out our accumulation of the cost of each class for each
1686 allocno. */
1687 memset (costs, 0, cost_elements_num * struct_costs_size);
1689 if (allocno_p)
1691 /* Scan the instructions and record each time it would save code
1692 to put a certain allocno in a certain class. */
1693 ira_traverse_loop_tree (true, ira_loop_tree_root,
1694 process_bb_node_for_costs, NULL);
1696 memcpy (total_allocno_costs, costs,
1697 max_struct_costs_size * ira_allocnos_num);
1699 else
1701 basic_block bb;
1703 FOR_EACH_BB_FN (bb, cfun)
1704 process_bb_for_costs (bb);
1707 if (pass == 0)
1708 pref = pref_buffer;
1710 /* Now for each allocno look at how desirable each class is and
1711 find which class is preferred. */
1712 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1714 ira_allocno_t a, parent_a;
1715 int rclass, a_num, parent_a_num, add_cost;
1716 ira_loop_tree_node_t parent;
1717 int best_cost, allocno_cost;
1718 enum reg_class best, alt_class;
1719 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1720 enum reg_class *cost_classes = cost_classes_ptr->classes;
1721 int *i_costs = temp_costs->cost;
1722 int i_mem_cost;
1723 int equiv_savings = regno_equiv_gains[i];
1725 if (! allocno_p)
1727 if (regno_reg_rtx[i] == NULL_RTX)
1728 continue;
1729 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1730 i_mem_cost = temp_costs->mem_cost;
1732 else
1734 if (ira_regno_allocno_map[i] == NULL)
1735 continue;
1736 memset (temp_costs, 0, struct_costs_size);
1737 i_mem_cost = 0;
1738 /* Find cost of all allocnos with the same regno. */
1739 for (a = ira_regno_allocno_map[i];
1740 a != NULL;
1741 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1743 int *a_costs, *p_costs;
1745 a_num = ALLOCNO_NUM (a);
1746 if ((flag_ira_region == IRA_REGION_ALL
1747 || flag_ira_region == IRA_REGION_MIXED)
1748 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1749 && (parent_a = parent->regno_allocno_map[i]) != NULL
1750 /* There are no caps yet. */
1751 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1752 (a)->border_allocnos,
1753 ALLOCNO_NUM (a)))
1755 /* Propagate costs to upper levels in the region
1756 tree. */
1757 parent_a_num = ALLOCNO_NUM (parent_a);
1758 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1759 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1760 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1762 add_cost = a_costs[k];
1763 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1764 p_costs[k] = INT_MAX;
1765 else
1766 p_costs[k] += add_cost;
1768 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1769 if (add_cost > 0
1770 && (INT_MAX - add_cost
1771 < COSTS (total_allocno_costs,
1772 parent_a_num)->mem_cost))
1773 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1774 = INT_MAX;
1775 else
1776 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1777 += add_cost;
1779 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1780 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1782 a_costs = COSTS (costs, a_num)->cost;
1783 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1785 add_cost = a_costs[k];
1786 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1787 i_costs[k] = INT_MAX;
1788 else
1789 i_costs[k] += add_cost;
1791 add_cost = COSTS (costs, a_num)->mem_cost;
1792 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1793 i_mem_cost = INT_MAX;
1794 else
1795 i_mem_cost += add_cost;
1798 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1799 i_mem_cost = 0;
1800 else if (equiv_savings < 0)
1801 i_mem_cost = -equiv_savings;
1802 else if (equiv_savings > 0)
1804 i_mem_cost = 0;
1805 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1806 i_costs[k] += equiv_savings;
1809 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1810 best = ALL_REGS;
1811 alt_class = NO_REGS;
1812 /* Find best common class for all allocnos with the same
1813 regno. */
1814 for (k = 0; k < cost_classes_ptr->num; k++)
1816 rclass = cost_classes[k];
1817 if (i_costs[k] < best_cost)
1819 best_cost = i_costs[k];
1820 best = (enum reg_class) rclass;
1822 else if (i_costs[k] == best_cost)
1823 best = ira_reg_class_subunion[best][rclass];
1824 if (pass == flag_expensive_optimizations
1825 /* We still prefer registers to memory even at this
1826 stage if their costs are the same. We will make
1827 a final decision during assigning hard registers
1828 when we have all info including more accurate
1829 costs which might be affected by assigning hard
1830 registers to other pseudos because the pseudos
1831 involved in moves can be coalesced. */
1832 && i_costs[k] <= i_mem_cost
1833 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1834 > reg_class_size[alt_class]))
1835 alt_class = reg_class_subunion[alt_class][rclass];
1837 alt_class = ira_allocno_class_translate[alt_class];
1838 if (best_cost > i_mem_cost
1839 && ! non_spilled_static_chain_regno_p (i))
1840 regno_aclass[i] = NO_REGS;
1841 else if (!optimize && !targetm.class_likely_spilled_p (best))
1842 /* Registers in the alternative class are likely to need
1843 longer or slower sequences than registers in the best class.
1844 When optimizing we make some effort to use the best class
1845 over the alternative class where possible, but at -O0 we
1846 effectively give the alternative class equal weight.
1847 We then run the risk of using slower alternative registers
1848 when plenty of registers from the best class are still free.
1849 This is especially true because live ranges tend to be very
1850 short in -O0 code and so register pressure tends to be low.
1852 Avoid that by ignoring the alternative class if the best
1853 class has plenty of registers. */
1854 regno_aclass[i] = best;
1855 else
1857 /* Make the common class the biggest class of best and
1858 alt_class. */
1859 regno_aclass[i]
1860 = ira_reg_class_superunion[best][alt_class];
1861 ira_assert (regno_aclass[i] != NO_REGS
1862 && ira_reg_allocno_class_p[regno_aclass[i]]);
1864 if ((new_class
1865 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1866 (i, regno_aclass[i]))) != regno_aclass[i])
1868 regno_aclass[i] = new_class;
1869 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1870 reg_class_contents[best]))
1871 best = new_class;
1872 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1873 reg_class_contents[alt_class]))
1874 alt_class = new_class;
1876 if (pass == flag_expensive_optimizations)
1878 if (best_cost > i_mem_cost
1879 /* Do not assign NO_REGS to static chain pointer
1880 pseudo when non-local goto is used. */
1881 && ! non_spilled_static_chain_regno_p (i))
1882 best = alt_class = NO_REGS;
1883 else if (best == alt_class)
1884 alt_class = NO_REGS;
1885 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1886 if ((!allocno_p || internal_flag_ira_verbose > 2)
1887 && dump_file != NULL)
1888 fprintf (dump_file,
1889 " r%d: preferred %s, alternative %s, allocno %s\n",
1890 i, reg_class_names[best], reg_class_names[alt_class],
1891 reg_class_names[regno_aclass[i]]);
1893 regno_best_class[i] = best;
1894 if (! allocno_p)
1896 pref[i] = (best_cost > i_mem_cost
1897 && ! non_spilled_static_chain_regno_p (i)
1898 ? NO_REGS : best);
1899 continue;
1901 for (a = ira_regno_allocno_map[i];
1902 a != NULL;
1903 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1905 enum reg_class aclass = regno_aclass[i];
1906 int a_num = ALLOCNO_NUM (a);
1907 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1908 int *a_costs = COSTS (costs, a_num)->cost;
1910 if (aclass == NO_REGS)
1911 best = NO_REGS;
1912 else
1914 /* Finding best class which is subset of the common
1915 class. */
1916 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1917 allocno_cost = best_cost;
1918 best = ALL_REGS;
1919 for (k = 0; k < cost_classes_ptr->num; k++)
1921 rclass = cost_classes[k];
1922 if (! ira_class_subset_p[rclass][aclass])
1923 continue;
1924 if (total_a_costs[k] < best_cost)
1926 best_cost = total_a_costs[k];
1927 allocno_cost = a_costs[k];
1928 best = (enum reg_class) rclass;
1930 else if (total_a_costs[k] == best_cost)
1932 best = ira_reg_class_subunion[best][rclass];
1933 allocno_cost = MAX (allocno_cost, a_costs[k]);
1936 ALLOCNO_CLASS_COST (a) = allocno_cost;
1938 if (internal_flag_ira_verbose > 2 && dump_file != NULL
1939 && (pass == 0 || pref[a_num] != best))
1941 fprintf (dump_file, " a%d (r%d,", a_num, i);
1942 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1943 fprintf (dump_file, "b%d", bb->index);
1944 else
1945 fprintf (dump_file, "l%d",
1946 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1947 fprintf (dump_file, ") best %s, allocno %s\n",
1948 reg_class_names[best],
1949 reg_class_names[aclass]);
1951 pref[a_num] = best;
1952 if (pass == flag_expensive_optimizations && best != aclass
1953 && ira_class_hard_regs_num[best] > 0
1954 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
1955 >= ira_class_hard_regs_num[best]))
1957 int ind = cost_classes_ptr->index[aclass];
1959 ira_assert (ind >= 0);
1960 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
1961 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
1962 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
1963 / (ira_register_move_cost
1964 [ALLOCNO_MODE (a)][best][aclass]));
1965 for (k = 0; k < cost_classes_ptr->num; k++)
1966 if (ira_class_subset_p[cost_classes[k]][best])
1967 a_costs[k] = a_costs[ind];
1972 if (internal_flag_ira_verbose > 4 && dump_file)
1974 if (allocno_p)
1975 print_allocno_costs (dump_file);
1976 else
1977 print_pseudo_costs (dump_file);
1978 fprintf (dump_file,"\n");
1981 ira_free (regno_best_class);
1986 /* Process moves involving hard regs to modify allocno hard register
1987 costs. We can do this only after determining allocno class. If a
1988 hard register forms a register class, then moves with the hard
1989 register are already taken into account in class costs for the
1990 allocno. */
1991 static void
1992 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
1994 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
1995 bool to_p;
1996 ira_allocno_t a, curr_a;
1997 ira_loop_tree_node_t curr_loop_tree_node;
1998 enum reg_class rclass;
1999 basic_block bb;
2000 rtx_insn *insn;
2001 rtx set, src, dst;
2003 bb = loop_tree_node->bb;
2004 if (bb == NULL)
2005 return;
2006 freq = REG_FREQ_FROM_BB (bb);
2007 if (freq == 0)
2008 freq = 1;
2009 FOR_BB_INSNS (bb, insn)
2011 if (!NONDEBUG_INSN_P (insn))
2012 continue;
2013 set = single_set (insn);
2014 if (set == NULL_RTX)
2015 continue;
2016 dst = SET_DEST (set);
2017 src = SET_SRC (set);
2018 if (! REG_P (dst) || ! REG_P (src))
2019 continue;
2020 dst_regno = REGNO (dst);
2021 src_regno = REGNO (src);
2022 if (dst_regno >= FIRST_PSEUDO_REGISTER
2023 && src_regno < FIRST_PSEUDO_REGISTER)
2025 hard_regno = src_regno;
2026 a = ira_curr_regno_allocno_map[dst_regno];
2027 to_p = true;
2029 else if (src_regno >= FIRST_PSEUDO_REGISTER
2030 && dst_regno < FIRST_PSEUDO_REGISTER)
2032 hard_regno = dst_regno;
2033 a = ira_curr_regno_allocno_map[src_regno];
2034 to_p = false;
2036 else
2037 continue;
2038 rclass = ALLOCNO_CLASS (a);
2039 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2040 continue;
2041 i = ira_class_hard_reg_index[rclass][hard_regno];
2042 if (i < 0)
2043 continue;
2044 a_regno = ALLOCNO_REGNO (a);
2045 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2046 curr_loop_tree_node != NULL;
2047 curr_loop_tree_node = curr_loop_tree_node->parent)
2048 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2049 ira_add_allocno_pref (curr_a, hard_regno, freq);
2051 int cost;
2052 enum reg_class hard_reg_class;
2053 machine_mode mode;
2055 mode = ALLOCNO_MODE (a);
2056 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2057 ira_init_register_move_cost_if_necessary (mode);
2058 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2059 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2060 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2061 ALLOCNO_CLASS_COST (a));
2062 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2063 rclass, 0);
2064 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2065 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2066 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2067 ALLOCNO_HARD_REG_COSTS (a)[i]);
2072 /* After we find hard register and memory costs for allocnos, define
2073 its class and modify hard register cost because insns moving
2074 allocno to/from hard registers. */
2075 static void
2076 setup_allocno_class_and_costs (void)
2078 int i, j, n, regno, hard_regno, num;
2079 int *reg_costs;
2080 enum reg_class aclass, rclass;
2081 ira_allocno_t a;
2082 ira_allocno_iterator ai;
2083 cost_classes_t cost_classes_ptr;
2085 ira_assert (allocno_p);
2086 FOR_EACH_ALLOCNO (a, ai)
2088 i = ALLOCNO_NUM (a);
2089 regno = ALLOCNO_REGNO (a);
2090 aclass = regno_aclass[regno];
2091 cost_classes_ptr = regno_cost_classes[regno];
2092 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2093 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2094 ira_set_allocno_class (a, aclass);
2095 if (aclass == NO_REGS)
2096 continue;
2097 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2099 n = ira_class_hard_regs_num[aclass];
2100 ALLOCNO_HARD_REG_COSTS (a)
2101 = reg_costs = ira_allocate_cost_vector (aclass);
2102 for (j = n - 1; j >= 0; j--)
2104 hard_regno = ira_class_hard_regs[aclass][j];
2105 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2106 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2107 else
2109 rclass = REGNO_REG_CLASS (hard_regno);
2110 num = cost_classes_ptr->index[rclass];
2111 if (num < 0)
2113 num = cost_classes_ptr->hard_regno_index[hard_regno];
2114 ira_assert (num >= 0);
2116 reg_costs[j] = COSTS (costs, i)->cost[num];
2121 if (optimize)
2122 ira_traverse_loop_tree (true, ira_loop_tree_root,
2123 process_bb_node_for_hard_reg_moves, NULL);
2128 /* Function called once during compiler work. */
2129 void
2130 ira_init_costs_once (void)
2132 int i;
2134 init_cost = NULL;
2135 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2137 op_costs[i] = NULL;
2138 this_op_costs[i] = NULL;
2140 temp_costs = NULL;
2143 /* Free allocated temporary cost vectors. */
2144 void
2145 target_ira_int::free_ira_costs ()
2147 int i;
2149 free (x_init_cost);
2150 x_init_cost = NULL;
2151 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2153 free (x_op_costs[i]);
2154 free (x_this_op_costs[i]);
2155 x_op_costs[i] = x_this_op_costs[i] = NULL;
2157 free (x_temp_costs);
2158 x_temp_costs = NULL;
2161 /* This is called each time register related information is
2162 changed. */
2163 void
2164 ira_init_costs (void)
2166 int i;
2168 this_target_ira_int->free_ira_costs ();
2169 max_struct_costs_size
2170 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2171 /* Don't use ira_allocate because vectors live through several IRA
2172 calls. */
2173 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2174 init_cost->mem_cost = 1000000;
2175 for (i = 0; i < ira_important_classes_num; i++)
2176 init_cost->cost[i] = 1000000;
2177 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2179 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2180 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2182 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2187 /* Common initialization function for ira_costs and
2188 ira_set_pseudo_classes. */
2189 static void
2190 init_costs (void)
2192 init_subregs_of_mode ();
2193 costs = (struct costs *) ira_allocate (max_struct_costs_size
2194 * cost_elements_num);
2195 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2196 * cost_elements_num);
2197 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2198 * max_reg_num ());
2199 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2200 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2203 /* Common finalization function for ira_costs and
2204 ira_set_pseudo_classes. */
2205 static void
2206 finish_costs (void)
2208 finish_subregs_of_mode ();
2209 ira_free (regno_equiv_gains);
2210 ira_free (regno_aclass);
2211 ira_free (pref_buffer);
2212 ira_free (costs);
2215 /* Entry function which defines register class, memory and hard
2216 register costs for each allocno. */
2217 void
2218 ira_costs (void)
2220 allocno_p = true;
2221 cost_elements_num = ira_allocnos_num;
2222 init_costs ();
2223 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2224 * ira_allocnos_num);
2225 initiate_regno_cost_classes ();
2226 calculate_elim_costs_all_insns ();
2227 find_costs_and_classes (ira_dump_file);
2228 setup_allocno_class_and_costs ();
2229 finish_regno_cost_classes ();
2230 finish_costs ();
2231 ira_free (total_allocno_costs);
2234 /* Entry function which defines classes for pseudos.
2235 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2236 void
2237 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2239 allocno_p = false;
2240 internal_flag_ira_verbose = flag_ira_verbose;
2241 cost_elements_num = max_reg_num ();
2242 init_costs ();
2243 initiate_regno_cost_classes ();
2244 find_costs_and_classes (dump_file);
2245 finish_regno_cost_classes ();
2246 if (define_pseudo_classes)
2247 pseudo_classes_defined_p = true;
2249 finish_costs ();
2254 /* Change hard register costs for allocnos which lives through
2255 function calls. This is called only when we found all intersected
2256 calls during building allocno live ranges. */
2257 void
2258 ira_tune_allocno_costs (void)
2260 int j, n, regno;
2261 int cost, min_cost, *reg_costs;
2262 enum reg_class aclass, rclass;
2263 machine_mode mode;
2264 ira_allocno_t a;
2265 ira_allocno_iterator ai;
2266 ira_allocno_object_iterator oi;
2267 ira_object_t obj;
2268 bool skip_p;
2269 HARD_REG_SET *crossed_calls_clobber_regs;
2271 FOR_EACH_ALLOCNO (a, ai)
2273 aclass = ALLOCNO_CLASS (a);
2274 if (aclass == NO_REGS)
2275 continue;
2276 mode = ALLOCNO_MODE (a);
2277 n = ira_class_hard_regs_num[aclass];
2278 min_cost = INT_MAX;
2279 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2280 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2282 ira_allocate_and_set_costs
2283 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2284 ALLOCNO_CLASS_COST (a));
2285 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2286 for (j = n - 1; j >= 0; j--)
2288 regno = ira_class_hard_regs[aclass][j];
2289 skip_p = false;
2290 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2292 if (ira_hard_reg_set_intersection_p (regno, mode,
2293 OBJECT_CONFLICT_HARD_REGS
2294 (obj)))
2296 skip_p = true;
2297 break;
2300 if (skip_p)
2301 continue;
2302 rclass = REGNO_REG_CLASS (regno);
2303 cost = 0;
2304 crossed_calls_clobber_regs
2305 = &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2306 if (ira_hard_reg_set_intersection_p (regno, mode,
2307 *crossed_calls_clobber_regs)
2308 && (ira_hard_reg_set_intersection_p (regno, mode,
2309 call_used_reg_set)
2310 || HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
2311 cost += (ALLOCNO_CALL_FREQ (a)
2312 * (ira_memory_move_cost[mode][rclass][0]
2313 + ira_memory_move_cost[mode][rclass][1]));
2314 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2315 cost += ((ira_memory_move_cost[mode][rclass][0]
2316 + ira_memory_move_cost[mode][rclass][1])
2317 * ALLOCNO_FREQ (a)
2318 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2319 #endif
2320 if (INT_MAX - cost < reg_costs[j])
2321 reg_costs[j] = INT_MAX;
2322 else
2323 reg_costs[j] += cost;
2324 if (min_cost > reg_costs[j])
2325 min_cost = reg_costs[j];
2328 if (min_cost != INT_MAX)
2329 ALLOCNO_CLASS_COST (a) = min_cost;
2331 /* Some targets allow pseudos to be allocated to unaligned sequences
2332 of hard registers. However, selecting an unaligned sequence can
2333 unnecessarily restrict later allocations. So increase the cost of
2334 unaligned hard regs to encourage the use of aligned hard regs. */
2336 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2338 if (nregs > 1)
2340 ira_allocate_and_set_costs
2341 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2342 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2343 for (j = n - 1; j >= 0; j--)
2345 regno = ira_non_ordered_class_hard_regs[aclass][j];
2346 if ((regno % nregs) != 0)
2348 int index = ira_class_hard_reg_index[aclass][regno];
2349 ira_assert (index != -1);
2350 reg_costs[index] += ALLOCNO_FREQ (a);
2358 /* Add COST to the estimated gain for eliminating REGNO with its
2359 equivalence. If COST is zero, record that no such elimination is
2360 possible. */
2362 void
2363 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2365 if (cost == 0)
2366 regno_equiv_gains[regno] = 0;
2367 else
2368 regno_equiv_gains[regno] += cost;
2371 void
2372 ira_costs_c_finalize (void)
2374 this_target_ira_int->free_ira_costs ();