2010-07-27 Paolo Carlini <paolo.carlini@oracle.com>
[official-gcc/alias-decl.git] / gcc / ira-int.h
blobd06ce4e57cb463f129265c26eddaa6b8c93c1f6e
1 /* Integrated Register Allocator (IRA) intercommunication header file.
2 Copyright (C) 2006, 2007, 2008, 2009
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "cfgloop.h"
23 #include "ira.h"
24 #include "alloc-pool.h"
26 /* To provide consistency in naming, all IRA external variables,
27 functions, common typedefs start with prefix ira_. */
29 #ifdef ENABLE_CHECKING
30 #define ENABLE_IRA_CHECKING
31 #endif
33 #ifdef ENABLE_IRA_CHECKING
34 #define ira_assert(c) gcc_assert (c)
35 #else
36 /* Always define and include C, so that warnings for empty body in an
37 ‘if’ statement and unused variable do not occur. */
38 #define ira_assert(c) ((void)(0 && (c)))
39 #endif
41 /* Compute register frequency from edge frequency FREQ. It is
42 analogous to REG_FREQ_FROM_BB. When optimizing for size, or
43 profile driven feedback is available and the function is never
44 executed, frequency is always equivalent. Otherwise rescale the
45 edge frequency. */
46 #define REG_FREQ_FROM_EDGE_FREQ(freq) \
47 (optimize_size || (flag_branch_probabilities && !ENTRY_BLOCK_PTR->count) \
48 ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
49 ? (freq * REG_FREQ_MAX / BB_FREQ_MAX) : 1)
51 /* All natural loops. */
52 extern struct loops ira_loops;
54 /* A modified value of flag `-fira-verbose' used internally. */
55 extern int internal_flag_ira_verbose;
57 /* Dump file of the allocator if it is not NULL. */
58 extern FILE *ira_dump_file;
60 /* Typedefs for pointers to allocno live range, allocno, and copy of
61 allocnos. */
62 typedef struct live_range *live_range_t;
63 typedef struct ira_allocno *ira_allocno_t;
64 typedef struct ira_allocno_copy *ira_copy_t;
65 typedef struct ira_object *ira_object_t;
67 /* Definition of vector of allocnos and copies. */
68 DEF_VEC_P(ira_allocno_t);
69 DEF_VEC_ALLOC_P(ira_allocno_t, heap);
70 DEF_VEC_P(ira_object_t);
71 DEF_VEC_ALLOC_P(ira_object_t, heap);
72 DEF_VEC_P(ira_copy_t);
73 DEF_VEC_ALLOC_P(ira_copy_t, heap);
75 /* Typedef for pointer to the subsequent structure. */
76 typedef struct ira_loop_tree_node *ira_loop_tree_node_t;
78 /* In general case, IRA is a regional allocator. The regions are
79 nested and form a tree. Currently regions are natural loops. The
80 following structure describes loop tree node (representing basic
81 block or loop). We need such tree because the loop tree from
82 cfgloop.h is not convenient for the optimization: basic blocks are
83 not a part of the tree from cfgloop.h. We also use the nodes for
84 storing additional information about basic blocks/loops for the
85 register allocation purposes. */
86 struct ira_loop_tree_node
88 /* The node represents basic block if children == NULL. */
89 basic_block bb; /* NULL for loop. */
90 struct loop *loop; /* NULL for BB. */
91 /* NEXT/SUBLOOP_NEXT is the next node/loop-node of the same parent.
92 SUBLOOP_NEXT is always NULL for BBs. */
93 ira_loop_tree_node_t subloop_next, next;
94 /* CHILDREN/SUBLOOPS is the first node/loop-node immediately inside
95 the node. They are NULL for BBs. */
96 ira_loop_tree_node_t subloops, children;
97 /* The node immediately containing given node. */
98 ira_loop_tree_node_t parent;
100 /* Loop level in range [0, ira_loop_tree_height). */
101 int level;
103 /* All the following members are defined only for nodes representing
104 loops. */
106 /* True if the loop was marked for removal from the register
107 allocation. */
108 bool to_remove_p;
110 /* Allocnos in the loop corresponding to their regnos. If it is
111 NULL the loop does not form a separate register allocation region
112 (e.g. because it has abnormal enter/exit edges and we can not put
113 code for register shuffling on the edges if a different
114 allocation is used for a pseudo-register on different sides of
115 the edges). Caps are not in the map (remember we can have more
116 one cap with the same regno in a region). */
117 ira_allocno_t *regno_allocno_map;
119 /* True if there is an entry to given loop not from its parent (or
120 grandparent) basic block. For example, it is possible for two
121 adjacent loops inside another loop. */
122 bool entered_from_non_parent_p;
124 /* Maximal register pressure inside loop for given register class
125 (defined only for the cover classes). */
126 int reg_pressure[N_REG_CLASSES];
128 /* Numbers of allocnos referred or living in the loop node (except
129 for its subloops). */
130 bitmap all_allocnos;
132 /* Numbers of allocnos living at the loop borders. */
133 bitmap border_allocnos;
135 /* Regnos of pseudos modified in the loop node (including its
136 subloops). */
137 bitmap modified_regnos;
139 /* Numbers of copies referred in the corresponding loop. */
140 bitmap local_copies;
143 /* The root of the loop tree corresponding to the all function. */
144 extern ira_loop_tree_node_t ira_loop_tree_root;
146 /* Height of the loop tree. */
147 extern int ira_loop_tree_height;
149 /* All nodes representing basic blocks are referred through the
150 following array. We can not use basic block member `aux' for this
151 because it is used for insertion of insns on edges. */
152 extern ira_loop_tree_node_t ira_bb_nodes;
154 /* Two access macros to the nodes representing basic blocks. */
155 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
156 #define IRA_BB_NODE_BY_INDEX(index) __extension__ \
157 (({ ira_loop_tree_node_t _node = (&ira_bb_nodes[index]); \
158 if (_node->children != NULL || _node->loop != NULL || _node->bb == NULL)\
160 fprintf (stderr, \
161 "\n%s: %d: error in %s: it is not a block node\n", \
162 __FILE__, __LINE__, __FUNCTION__); \
163 gcc_unreachable (); \
165 _node; }))
166 #else
167 #define IRA_BB_NODE_BY_INDEX(index) (&ira_bb_nodes[index])
168 #endif
170 #define IRA_BB_NODE(bb) IRA_BB_NODE_BY_INDEX ((bb)->index)
172 /* All nodes representing loops are referred through the following
173 array. */
174 extern ira_loop_tree_node_t ira_loop_nodes;
176 /* Two access macros to the nodes representing loops. */
177 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
178 #define IRA_LOOP_NODE_BY_INDEX(index) __extension__ \
179 (({ ira_loop_tree_node_t const _node = (&ira_loop_nodes[index]);\
180 if (_node->children == NULL || _node->bb != NULL || _node->loop == NULL)\
182 fprintf (stderr, \
183 "\n%s: %d: error in %s: it is not a loop node\n", \
184 __FILE__, __LINE__, __FUNCTION__); \
185 gcc_unreachable (); \
187 _node; }))
188 #else
189 #define IRA_LOOP_NODE_BY_INDEX(index) (&ira_loop_nodes[index])
190 #endif
192 #define IRA_LOOP_NODE(loop) IRA_LOOP_NODE_BY_INDEX ((loop)->num)
195 /* The structure describes program points where a given allocno lives.
196 To save memory we store allocno conflicts only for the same cover
197 class allocnos which is enough to assign hard registers. To find
198 conflicts for other allocnos (e.g. to assign stack memory slot) we
199 use the live ranges. If the live ranges of two allocnos are
200 intersected, the allocnos are in conflict. */
201 struct live_range
203 /* Object whose live range is described by given structure. */
204 ira_object_t object;
205 /* Program point range. */
206 int start, finish;
207 /* Next structure describing program points where the allocno
208 lives. */
209 live_range_t next;
210 /* Pointer to structures with the same start/finish. */
211 live_range_t start_next, finish_next;
214 /* Program points are enumerated by numbers from range
215 0..IRA_MAX_POINT-1. There are approximately two times more program
216 points than insns. Program points are places in the program where
217 liveness info can be changed. In most general case (there are more
218 complicated cases too) some program points correspond to places
219 where input operand dies and other ones correspond to places where
220 output operands are born. */
221 extern int ira_max_point;
223 /* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
224 live ranges with given start/finish point. */
225 extern live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
227 /* A structure representing conflict information for an allocno
228 (or one of its subwords). */
229 struct ira_object
231 /* The allocno associated with this record. */
232 ira_allocno_t allocno;
233 /* Vector of accumulated conflicting conflict_redords with NULL end
234 marker (if OBJECT_CONFLICT_VEC_P is true) or conflict bit vector
235 otherwise. Only ira_objects belonging to allocnos with the
236 same cover class are in the vector or in the bit vector. */
237 void *conflicts_array;
238 /* Pointer to structures describing at what program point the
239 object lives. We always maintain the list in such way that *the
240 ranges in the list are not intersected and ordered by decreasing
241 their program points*. */
242 live_range_t live_ranges;
243 /* The subword within ALLOCNO which is represented by this object.
244 Zero means the lowest-order subword (or the entire allocno in case
245 it is not being tracked in subwords). */
246 int subword;
247 /* Allocated size of the conflicts array. */
248 unsigned int conflicts_array_size;
249 /* A unique number for every instance of this structure, which is used
250 to represent it in conflict bit vectors. */
251 int id;
252 /* Before building conflicts, MIN and MAX are initialized to
253 correspondingly minimal and maximal points of the accumulated
254 live ranges. Afterwards, they hold the minimal and maximal ids
255 of other ira_objects that this one can conflict with. */
256 int min, max;
257 /* Initial and accumulated hard registers conflicting with this
258 object and as a consequences can not be assigned to the allocno.
259 All non-allocatable hard regs and hard regs of cover classes
260 different from given allocno one are included in the sets. */
261 HARD_REG_SET conflict_hard_regs, total_conflict_hard_regs;
262 /* Number of accumulated conflicts in the vector of conflicting
263 objects. */
264 int num_accumulated_conflicts;
265 /* TRUE if conflicts are represented by a vector of pointers to
266 ira_object structures. Otherwise, we use a bit vector indexed
267 by conflict ID numbers. */
268 unsigned int conflict_vec_p : 1;
271 /* A structure representing an allocno (allocation entity). Allocno
272 represents a pseudo-register in an allocation region. If
273 pseudo-register does not live in a region but it lives in the
274 nested regions, it is represented in the region by special allocno
275 called *cap*. There may be more one cap representing the same
276 pseudo-register in region. It means that the corresponding
277 pseudo-register lives in more one non-intersected subregion. */
278 struct ira_allocno
280 /* The allocno order number starting with 0. Each allocno has an
281 unique number and the number is never changed for the
282 allocno. */
283 int num;
284 /* Regno for allocno or cap. */
285 int regno;
286 /* Mode of the allocno which is the mode of the corresponding
287 pseudo-register. */
288 enum machine_mode mode;
289 /* Hard register assigned to given allocno. Negative value means
290 that memory was allocated to the allocno. During the reload,
291 spilled allocno has value equal to the corresponding stack slot
292 number (0, ...) - 2. Value -1 is used for allocnos spilled by the
293 reload (at this point pseudo-register has only one allocno) which
294 did not get stack slot yet. */
295 int hard_regno;
296 /* Final rtx representation of the allocno. */
297 rtx reg;
298 /* Allocnos with the same regno are linked by the following member.
299 Allocnos corresponding to inner loops are first in the list (it
300 corresponds to depth-first traverse of the loops). */
301 ira_allocno_t next_regno_allocno;
302 /* There may be different allocnos with the same regno in different
303 regions. Allocnos are bound to the corresponding loop tree node.
304 Pseudo-register may have only one regular allocno with given loop
305 tree node but more than one cap (see comments above). */
306 ira_loop_tree_node_t loop_tree_node;
307 /* Accumulated usage references of the allocno. Here and below,
308 word 'accumulated' means info for given region and all nested
309 subregions. In this case, 'accumulated' means sum of references
310 of the corresponding pseudo-register in this region and in all
311 nested subregions recursively. */
312 int nrefs;
313 /* Accumulated frequency of usage of the allocno. */
314 int freq;
315 /* Register class which should be used for allocation for given
316 allocno. NO_REGS means that we should use memory. */
317 enum reg_class cover_class;
318 /* Minimal accumulated and updated costs of usage register of the
319 cover class for the allocno. */
320 int cover_class_cost, updated_cover_class_cost;
321 /* Minimal accumulated, and updated costs of memory for the allocno.
322 At the allocation start, the original and updated costs are
323 equal. The updated cost may be changed after finishing
324 allocation in a region and starting allocation in a subregion.
325 The change reflects the cost of spill/restore code on the
326 subregion border if we assign memory to the pseudo in the
327 subregion. */
328 int memory_cost, updated_memory_cost;
329 /* Accumulated number of points where the allocno lives and there is
330 excess pressure for its class. Excess pressure for a register
331 class at some point means that there are more allocnos of given
332 register class living at the point than number of hard-registers
333 of the class available for the allocation. */
334 int excess_pressure_points_num;
335 /* Copies to other non-conflicting allocnos. The copies can
336 represent move insn or potential move insn usually because of two
337 operand insn constraints. */
338 ira_copy_t allocno_copies;
339 /* It is a allocno (cap) representing given allocno on upper loop tree
340 level. */
341 ira_allocno_t cap;
342 /* It is a link to allocno (cap) on lower loop level represented by
343 given cap. Null if given allocno is not a cap. */
344 ira_allocno_t cap_member;
345 /* Coalesced allocnos form a cyclic list. One allocno given by
346 FIRST_COALESCED_ALLOCNO represents all coalesced allocnos. The
347 list is chained by NEXT_COALESCED_ALLOCNO. */
348 ira_allocno_t first_coalesced_allocno;
349 ira_allocno_t next_coalesced_allocno;
350 /* The number of objects tracked in the following array. */
351 int num_objects;
352 /* An array of structures describing conflict information and live
353 ranges for each object associated with the allocno. There may be
354 more than one such object in cases where the allocno represents a
355 multi-word register. */
356 ira_object_t objects[2];
357 /* Accumulated frequency of calls which given allocno
358 intersects. */
359 int call_freq;
360 /* Accumulated number of the intersected calls. */
361 int calls_crossed_num;
362 /* TRUE if the allocno assigned to memory was a destination of
363 removed move (see ira-emit.c) at loop exit because the value of
364 the corresponding pseudo-register is not changed inside the
365 loop. */
366 unsigned int mem_optimized_dest_p : 1;
367 /* TRUE if the corresponding pseudo-register has disjoint live
368 ranges and the other allocnos of the pseudo-register except this
369 one changed REG. */
370 unsigned int somewhere_renamed_p : 1;
371 /* TRUE if allocno with the same REGNO in a subregion has been
372 renamed, in other words, got a new pseudo-register. */
373 unsigned int child_renamed_p : 1;
374 /* During the reload, value TRUE means that we should not reassign a
375 hard register to the allocno got memory earlier. It is set up
376 when we removed memory-memory move insn before each iteration of
377 the reload. */
378 unsigned int dont_reassign_p : 1;
379 #ifdef STACK_REGS
380 /* Set to TRUE if allocno can't be assigned to the stack hard
381 register correspondingly in this region and area including the
382 region and all its subregions recursively. */
383 unsigned int no_stack_reg_p : 1, total_no_stack_reg_p : 1;
384 #endif
385 /* TRUE value means that there is no sense to spill the allocno
386 during coloring because the spill will result in additional
387 reloads in reload pass. */
388 unsigned int bad_spill_p : 1;
389 /* TRUE value means that the allocno was not removed yet from the
390 conflicting graph during colouring. */
391 unsigned int in_graph_p : 1;
392 /* TRUE if a hard register or memory has been assigned to the
393 allocno. */
394 unsigned int assigned_p : 1;
395 /* TRUE if it is put on the stack to make other allocnos
396 colorable. */
397 unsigned int may_be_spilled_p : 1;
398 /* TRUE if the allocno was removed from the splay tree used to
399 choose allocn for spilling (see ira-color.c::. */
400 unsigned int splay_removed_p : 1;
401 /* Non NULL if we remove restoring value from given allocno to
402 MEM_OPTIMIZED_DEST at loop exit (see ira-emit.c) because the
403 allocno value is not changed inside the loop. */
404 ira_allocno_t mem_optimized_dest;
405 /* Array of usage costs (accumulated and the one updated during
406 coloring) for each hard register of the allocno cover class. The
407 member value can be NULL if all costs are the same and equal to
408 COVER_CLASS_COST. For example, the costs of two different hard
409 registers can be different if one hard register is callee-saved
410 and another one is callee-used and the allocno lives through
411 calls. Another example can be case when for some insn the
412 corresponding pseudo-register value should be put in specific
413 register class (e.g. AREG for x86) which is a strict subset of
414 the allocno cover class (GENERAL_REGS for x86). We have updated
415 costs to reflect the situation when the usage cost of a hard
416 register is decreased because the allocno is connected to another
417 allocno by a copy and the another allocno has been assigned to
418 the hard register. */
419 int *hard_reg_costs, *updated_hard_reg_costs;
420 /* Array of decreasing costs (accumulated and the one updated during
421 coloring) for allocnos conflicting with given allocno for hard
422 regno of the allocno cover class. The member value can be NULL
423 if all costs are the same. These costs are used to reflect
424 preferences of other allocnos not assigned yet during assigning
425 to given allocno. */
426 int *conflict_hard_reg_costs, *updated_conflict_hard_reg_costs;
427 /* Size (in hard registers) of the same cover class allocnos with
428 TRUE in_graph_p value and conflicting with given allocno during
429 each point of graph coloring. */
430 int left_conflicts_size;
431 /* Number of hard registers of the allocno cover class really
432 available for the allocno allocation. */
433 int available_regs_num;
434 /* Allocnos in a bucket (used in coloring) chained by the following
435 two members. */
436 ira_allocno_t next_bucket_allocno;
437 ira_allocno_t prev_bucket_allocno;
438 /* Used for temporary purposes. */
439 int temp;
442 /* All members of the allocno structures should be accessed only
443 through the following macros. */
444 #define ALLOCNO_NUM(A) ((A)->num)
445 #define ALLOCNO_REGNO(A) ((A)->regno)
446 #define ALLOCNO_REG(A) ((A)->reg)
447 #define ALLOCNO_NEXT_REGNO_ALLOCNO(A) ((A)->next_regno_allocno)
448 #define ALLOCNO_LOOP_TREE_NODE(A) ((A)->loop_tree_node)
449 #define ALLOCNO_CAP(A) ((A)->cap)
450 #define ALLOCNO_CAP_MEMBER(A) ((A)->cap_member)
451 #define ALLOCNO_NREFS(A) ((A)->nrefs)
452 #define ALLOCNO_FREQ(A) ((A)->freq)
453 #define ALLOCNO_HARD_REGNO(A) ((A)->hard_regno)
454 #define ALLOCNO_CALL_FREQ(A) ((A)->call_freq)
455 #define ALLOCNO_CALLS_CROSSED_NUM(A) ((A)->calls_crossed_num)
456 #define ALLOCNO_MEM_OPTIMIZED_DEST(A) ((A)->mem_optimized_dest)
457 #define ALLOCNO_MEM_OPTIMIZED_DEST_P(A) ((A)->mem_optimized_dest_p)
458 #define ALLOCNO_SOMEWHERE_RENAMED_P(A) ((A)->somewhere_renamed_p)
459 #define ALLOCNO_CHILD_RENAMED_P(A) ((A)->child_renamed_p)
460 #define ALLOCNO_DONT_REASSIGN_P(A) ((A)->dont_reassign_p)
461 #ifdef STACK_REGS
462 #define ALLOCNO_NO_STACK_REG_P(A) ((A)->no_stack_reg_p)
463 #define ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
464 #endif
465 #define ALLOCNO_BAD_SPILL_P(A) ((A)->bad_spill_p)
466 #define ALLOCNO_IN_GRAPH_P(A) ((A)->in_graph_p)
467 #define ALLOCNO_ASSIGNED_P(A) ((A)->assigned_p)
468 #define ALLOCNO_MAY_BE_SPILLED_P(A) ((A)->may_be_spilled_p)
469 #define ALLOCNO_SPLAY_REMOVED_P(A) ((A)->splay_removed_p)
470 #define ALLOCNO_MODE(A) ((A)->mode)
471 #define ALLOCNO_COPIES(A) ((A)->allocno_copies)
472 #define ALLOCNO_HARD_REG_COSTS(A) ((A)->hard_reg_costs)
473 #define ALLOCNO_UPDATED_HARD_REG_COSTS(A) ((A)->updated_hard_reg_costs)
474 #define ALLOCNO_CONFLICT_HARD_REG_COSTS(A) \
475 ((A)->conflict_hard_reg_costs)
476 #define ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS(A) \
477 ((A)->updated_conflict_hard_reg_costs)
478 #define ALLOCNO_LEFT_CONFLICTS_SIZE(A) ((A)->left_conflicts_size)
479 #define ALLOCNO_COVER_CLASS(A) ((A)->cover_class)
480 #define ALLOCNO_COVER_CLASS_COST(A) ((A)->cover_class_cost)
481 #define ALLOCNO_UPDATED_COVER_CLASS_COST(A) ((A)->updated_cover_class_cost)
482 #define ALLOCNO_MEMORY_COST(A) ((A)->memory_cost)
483 #define ALLOCNO_UPDATED_MEMORY_COST(A) ((A)->updated_memory_cost)
484 #define ALLOCNO_EXCESS_PRESSURE_POINTS_NUM(A) ((A)->excess_pressure_points_num)
485 #define ALLOCNO_AVAILABLE_REGS_NUM(A) ((A)->available_regs_num)
486 #define ALLOCNO_NEXT_BUCKET_ALLOCNO(A) ((A)->next_bucket_allocno)
487 #define ALLOCNO_PREV_BUCKET_ALLOCNO(A) ((A)->prev_bucket_allocno)
488 #define ALLOCNO_TEMP(A) ((A)->temp)
489 #define ALLOCNO_FIRST_COALESCED_ALLOCNO(A) ((A)->first_coalesced_allocno)
490 #define ALLOCNO_NEXT_COALESCED_ALLOCNO(A) ((A)->next_coalesced_allocno)
491 #define ALLOCNO_OBJECT(A,N) ((A)->objects[N])
492 #define ALLOCNO_NUM_OBJECTS(A) ((A)->num_objects)
494 #define OBJECT_ALLOCNO(C) ((C)->allocno)
495 #define OBJECT_SUBWORD(C) ((C)->subword)
496 #define OBJECT_CONFLICT_ARRAY(C) ((C)->conflicts_array)
497 #define OBJECT_CONFLICT_VEC(C) ((ira_object_t *)(C)->conflicts_array)
498 #define OBJECT_CONFLICT_BITVEC(C) ((IRA_INT_TYPE *)(C)->conflicts_array)
499 #define OBJECT_CONFLICT_ARRAY_SIZE(C) ((C)->conflicts_array_size)
500 #define OBJECT_CONFLICT_VEC_P(C) ((C)->conflict_vec_p)
501 #define OBJECT_NUM_CONFLICTS(C) ((C)->num_accumulated_conflicts)
502 #define OBJECT_CONFLICT_HARD_REGS(C) ((C)->conflict_hard_regs)
503 #define OBJECT_TOTAL_CONFLICT_HARD_REGS(C) ((C)->total_conflict_hard_regs)
504 #define OBJECT_MIN(C) ((C)->min)
505 #define OBJECT_MAX(C) ((C)->max)
506 #define OBJECT_CONFLICT_ID(C) ((C)->id)
507 #define OBJECT_LIVE_RANGES(A) ((A)->live_ranges)
509 /* Map regno -> allocnos with given regno (see comments for
510 allocno member `next_regno_allocno'). */
511 extern ira_allocno_t *ira_regno_allocno_map;
513 /* Array of references to all allocnos. The order number of the
514 allocno corresponds to the index in the array. Removed allocnos
515 have NULL element value. */
516 extern ira_allocno_t *ira_allocnos;
518 /* The size of the previous array. */
519 extern int ira_allocnos_num;
521 /* Map a conflict id to its corresponding ira_object structure. */
522 extern ira_object_t *ira_object_id_map;
524 /* The size of the previous array. */
525 extern int ira_objects_num;
527 /* The following structure represents a copy of two allocnos. The
528 copies represent move insns or potential move insns usually because
529 of two operand insn constraints. To remove register shuffle, we
530 also create copies between allocno which is output of an insn and
531 allocno becoming dead in the insn. */
532 struct ira_allocno_copy
534 /* The unique order number of the copy node starting with 0. */
535 int num;
536 /* Allocnos connected by the copy. The first allocno should have
537 smaller order number than the second one. */
538 ira_allocno_t first, second;
539 /* Execution frequency of the copy. */
540 int freq;
541 bool constraint_p;
542 /* It is a move insn which is an origin of the copy. The member
543 value for the copy representing two operand insn constraints or
544 for the copy created to remove register shuffle is NULL. In last
545 case the copy frequency is smaller than the corresponding insn
546 execution frequency. */
547 rtx insn;
548 /* All copies with the same allocno as FIRST are linked by the two
549 following members. */
550 ira_copy_t prev_first_allocno_copy, next_first_allocno_copy;
551 /* All copies with the same allocno as SECOND are linked by the two
552 following members. */
553 ira_copy_t prev_second_allocno_copy, next_second_allocno_copy;
554 /* Region from which given copy is originated. */
555 ira_loop_tree_node_t loop_tree_node;
558 /* Array of references to all copies. The order number of the copy
559 corresponds to the index in the array. Removed copies have NULL
560 element value. */
561 extern ira_copy_t *ira_copies;
563 /* Size of the previous array. */
564 extern int ira_copies_num;
566 /* The following structure describes a stack slot used for spilled
567 pseudo-registers. */
568 struct ira_spilled_reg_stack_slot
570 /* pseudo-registers assigned to the stack slot. */
571 bitmap_head spilled_regs;
572 /* RTL representation of the stack slot. */
573 rtx mem;
574 /* Size of the stack slot. */
575 unsigned int width;
578 /* The number of elements in the following array. */
579 extern int ira_spilled_reg_stack_slots_num;
581 /* The following array contains info about spilled pseudo-registers
582 stack slots used in current function so far. */
583 extern struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
585 /* Correspondingly overall cost of the allocation, cost of the
586 allocnos assigned to hard-registers, cost of the allocnos assigned
587 to memory, cost of loads, stores and register move insns generated
588 for pseudo-register live range splitting (see ira-emit.c). */
589 extern int ira_overall_cost;
590 extern int ira_reg_cost, ira_mem_cost;
591 extern int ira_load_cost, ira_store_cost, ira_shuffle_cost;
592 extern int ira_move_loops_num, ira_additional_jumps_num;
594 /* This page contains a bitset implementation called 'min/max sets' used to
595 record conflicts in IRA.
596 They are named min/maxs set since we keep track of a minimum and a maximum
597 bit number for each set representing the bounds of valid elements. Otherwise,
598 the implementation resembles sbitmaps in that we store an array of integers
599 whose bits directly represent the members of the set. */
601 /* The type used as elements in the array, and the number of bits in
602 this type. */
604 #define IRA_INT_BITS HOST_BITS_PER_WIDE_INT
605 #define IRA_INT_TYPE HOST_WIDE_INT
607 /* Set, clear or test bit number I in R, a bit vector of elements with
608 minimal index and maximal index equal correspondingly to MIN and
609 MAX. */
610 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
612 #define SET_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
613 (({ int _min = (MIN), _max = (MAX), _i = (I); \
614 if (_i < _min || _i > _max) \
616 fprintf (stderr, \
617 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
618 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
619 gcc_unreachable (); \
621 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
622 |= ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
625 #define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
626 (({ int _min = (MIN), _max = (MAX), _i = (I); \
627 if (_i < _min || _i > _max) \
629 fprintf (stderr, \
630 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
631 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
632 gcc_unreachable (); \
634 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
635 &= ~((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
637 #define TEST_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
638 (({ int _min = (MIN), _max = (MAX), _i = (I); \
639 if (_i < _min || _i > _max) \
641 fprintf (stderr, \
642 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
643 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
644 gcc_unreachable (); \
646 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
647 & ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
649 #else
651 #define SET_MINMAX_SET_BIT(R, I, MIN, MAX) \
652 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
653 |= ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
655 #define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX) \
656 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
657 &= ~((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
659 #define TEST_MINMAX_SET_BIT(R, I, MIN, MAX) \
660 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
661 & ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
663 #endif
665 /* The iterator for min/max sets. */
666 typedef struct {
668 /* Array containing the bit vector. */
669 IRA_INT_TYPE *vec;
671 /* The number of the current element in the vector. */
672 unsigned int word_num;
674 /* The number of bits in the bit vector. */
675 unsigned int nel;
677 /* The current bit index of the bit vector. */
678 unsigned int bit_num;
680 /* Index corresponding to the 1st bit of the bit vector. */
681 int start_val;
683 /* The word of the bit vector currently visited. */
684 unsigned IRA_INT_TYPE word;
685 } minmax_set_iterator;
687 /* Initialize the iterator I for bit vector VEC containing minimal and
688 maximal values MIN and MAX. */
689 static inline void
690 minmax_set_iter_init (minmax_set_iterator *i, IRA_INT_TYPE *vec, int min,
691 int max)
693 i->vec = vec;
694 i->word_num = 0;
695 i->nel = max < min ? 0 : max - min + 1;
696 i->start_val = min;
697 i->bit_num = 0;
698 i->word = i->nel == 0 ? 0 : vec[0];
701 /* Return TRUE if we have more allocnos to visit, in which case *N is
702 set to the number of the element to be visited. Otherwise, return
703 FALSE. */
704 static inline bool
705 minmax_set_iter_cond (minmax_set_iterator *i, int *n)
707 /* Skip words that are zeros. */
708 for (; i->word == 0; i->word = i->vec[i->word_num])
710 i->word_num++;
711 i->bit_num = i->word_num * IRA_INT_BITS;
713 /* If we have reached the end, break. */
714 if (i->bit_num >= i->nel)
715 return false;
718 /* Skip bits that are zero. */
719 for (; (i->word & 1) == 0; i->word >>= 1)
720 i->bit_num++;
722 *n = (int) i->bit_num + i->start_val;
724 return true;
727 /* Advance to the next element in the set. */
728 static inline void
729 minmax_set_iter_next (minmax_set_iterator *i)
731 i->word >>= 1;
732 i->bit_num++;
735 /* Loop over all elements of a min/max set given by bit vector VEC and
736 their minimal and maximal values MIN and MAX. In each iteration, N
737 is set to the number of next allocno. ITER is an instance of
738 minmax_set_iterator used to iterate over the set. */
739 #define FOR_EACH_BIT_IN_MINMAX_SET(VEC, MIN, MAX, N, ITER) \
740 for (minmax_set_iter_init (&(ITER), (VEC), (MIN), (MAX)); \
741 minmax_set_iter_cond (&(ITER), &(N)); \
742 minmax_set_iter_next (&(ITER)))
744 struct target_ira_int {
745 /* Initialized once. It is a maximal possible size of the allocated
746 struct costs. */
747 int x_max_struct_costs_size;
749 /* Allocated and initialized once, and used to initialize cost values
750 for each insn. */
751 struct costs *x_init_cost;
753 /* Allocated once, and used for temporary purposes. */
754 struct costs *x_temp_costs;
756 /* Allocated once, and used for the cost calculation. */
757 struct costs *x_op_costs[MAX_RECOG_OPERANDS];
758 struct costs *x_this_op_costs[MAX_RECOG_OPERANDS];
760 /* Classes used for cost calculation. They may be different on
761 different iterations of the cost calculations or in different
762 optimization modes. */
763 enum reg_class *x_cost_classes;
765 /* Hard registers that can not be used for the register allocator for
766 all functions of the current compilation unit. */
767 HARD_REG_SET x_no_unit_alloc_regs;
769 /* Map: hard regs X modes -> set of hard registers for storing value
770 of given mode starting with given hard register. */
771 HARD_REG_SET (x_ira_reg_mode_hard_regset
772 [FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES]);
774 /* Array based on TARGET_REGISTER_MOVE_COST. Don't use
775 ira_register_move_cost directly. Use function of
776 ira_get_may_move_cost instead. */
777 move_table *x_ira_register_move_cost[MAX_MACHINE_MODE];
779 /* Similar to may_move_in_cost but it is calculated in IRA instead of
780 regclass. Another difference we take only available hard registers
781 into account to figure out that one register class is a subset of
782 the another one. Don't use it directly. Use function of
783 ira_get_may_move_cost instead. */
784 move_table *x_ira_may_move_in_cost[MAX_MACHINE_MODE];
786 /* Similar to may_move_out_cost but it is calculated in IRA instead of
787 regclass. Another difference we take only available hard registers
788 into account to figure out that one register class is a subset of
789 the another one. Don't use it directly. Use function of
790 ira_get_may_move_cost instead. */
791 move_table *x_ira_may_move_out_cost[MAX_MACHINE_MODE];
793 /* Register class subset relation: TRUE if the first class is a subset
794 of the second one considering only hard registers available for the
795 allocation. */
796 int x_ira_class_subset_p[N_REG_CLASSES][N_REG_CLASSES];
798 /* Array of the number of hard registers of given class which are
799 available for allocation. The order is defined by the the hard
800 register numbers. */
801 short x_ira_non_ordered_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
803 /* Index (in ira_class_hard_regs; for given register class and hard
804 register (in general case a hard register can belong to several
805 register classes;. The index is negative for hard registers
806 unavailable for the allocation. */
807 short x_ira_class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
809 /* Array whose values are hard regset of hard registers available for
810 the allocation of given register class whose HARD_REGNO_MODE_OK
811 values for given mode are zero. */
812 HARD_REG_SET x_prohibited_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
814 /* The value is number of elements in the subsequent array. */
815 int x_ira_important_classes_num;
817 /* The array containing non-empty classes (including non-empty cover
818 classes; which are subclasses of cover classes. Such classes is
819 important for calculation of the hard register usage costs. */
820 enum reg_class x_ira_important_classes[N_REG_CLASSES];
822 /* The biggest important class inside of intersection of the two
823 classes (that is calculated taking only hard registers available
824 for allocation into account;. If the both classes contain no hard
825 registers available for allocation, the value is calculated with
826 taking all hard-registers including fixed ones into account. */
827 enum reg_class x_ira_reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
829 /* True if the two classes (that is calculated taking only hard
830 registers available for allocation into account; are
831 intersected. */
832 bool x_ira_reg_classes_intersect_p[N_REG_CLASSES][N_REG_CLASSES];
834 /* Classes with end marker LIM_REG_CLASSES which are intersected with
835 given class (the first index;. That includes given class itself.
836 This is calculated taking only hard registers available for
837 allocation into account. */
838 enum reg_class x_ira_reg_class_super_classes[N_REG_CLASSES][N_REG_CLASSES];
840 /* The biggest important class inside of union of the two classes
841 (that is calculated taking only hard registers available for
842 allocation into account;. If the both classes contain no hard
843 registers available for allocation, the value is calculated with
844 taking all hard-registers including fixed ones into account. In
845 other words, the value is the corresponding reg_class_subunion
846 value. */
847 enum reg_class x_ira_reg_class_union[N_REG_CLASSES][N_REG_CLASSES];
849 /* For each reg class, table listing all the classes contained in it
850 (excluding the class itself. Non-allocatable registers are
851 excluded from the consideration;. */
852 enum reg_class x_alloc_reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
854 /* Array whose values are hard regset of hard registers for which
855 move of the hard register in given mode into itself is
856 prohibited. */
857 HARD_REG_SET x_ira_prohibited_mode_move_regs[NUM_MACHINE_MODES];
859 /* Flag of that the above array has been initialized. */
860 bool x_ira_prohibited_mode_move_regs_initialized_p;
863 extern struct target_ira_int default_target_ira_int;
864 #if SWITCHABLE_TARGET
865 extern struct target_ira_int *this_target_ira_int;
866 #else
867 #define this_target_ira_int (&default_target_ira_int)
868 #endif
870 #define ira_reg_mode_hard_regset \
871 (this_target_ira_int->x_ira_reg_mode_hard_regset)
872 #define ira_register_move_cost \
873 (this_target_ira_int->x_ira_register_move_cost)
874 #define ira_may_move_in_cost \
875 (this_target_ira_int->x_ira_may_move_in_cost)
876 #define ira_may_move_out_cost \
877 (this_target_ira_int->x_ira_may_move_out_cost)
878 #define ira_class_subset_p \
879 (this_target_ira_int->x_ira_class_subset_p)
880 #define ira_non_ordered_class_hard_regs \
881 (this_target_ira_int->x_ira_non_ordered_class_hard_regs)
882 #define ira_class_hard_reg_index \
883 (this_target_ira_int->x_ira_class_hard_reg_index)
884 #define prohibited_class_mode_regs \
885 (this_target_ira_int->x_prohibited_class_mode_regs)
886 #define ira_important_classes_num \
887 (this_target_ira_int->x_ira_important_classes_num)
888 #define ira_important_classes \
889 (this_target_ira_int->x_ira_important_classes)
890 #define ira_reg_class_intersect \
891 (this_target_ira_int->x_ira_reg_class_intersect)
892 #define ira_reg_classes_intersect_p \
893 (this_target_ira_int->x_ira_reg_classes_intersect_p)
894 #define ira_reg_class_super_classes \
895 (this_target_ira_int->x_ira_reg_class_super_classes)
896 #define ira_reg_class_union \
897 (this_target_ira_int->x_ira_reg_class_union)
898 #define ira_prohibited_mode_move_regs \
899 (this_target_ira_int->x_ira_prohibited_mode_move_regs)
901 /* ira.c: */
903 extern void *ira_allocate (size_t);
904 extern void *ira_reallocate (void *, size_t);
905 extern void ira_free (void *addr);
906 extern bitmap ira_allocate_bitmap (void);
907 extern void ira_free_bitmap (bitmap);
908 extern void ira_print_disposition (FILE *);
909 extern void ira_debug_disposition (void);
910 extern void ira_debug_class_cover (void);
911 extern void ira_init_register_move_cost (enum machine_mode);
913 /* The length of the two following arrays. */
914 extern int ira_reg_equiv_len;
916 /* The element value is TRUE if the corresponding regno value is
917 invariant. */
918 extern bool *ira_reg_equiv_invariant_p;
920 /* The element value is equiv constant of given pseudo-register or
921 NULL_RTX. */
922 extern rtx *ira_reg_equiv_const;
924 /* ira-build.c */
926 /* The current loop tree node and its regno allocno map. */
927 extern ira_loop_tree_node_t ira_curr_loop_tree_node;
928 extern ira_allocno_t *ira_curr_regno_allocno_map;
930 extern void ira_debug_copy (ira_copy_t);
931 extern void ira_debug_copies (void);
932 extern void ira_debug_allocno_copies (ira_allocno_t);
934 extern void ira_traverse_loop_tree (bool, ira_loop_tree_node_t,
935 void (*) (ira_loop_tree_node_t),
936 void (*) (ira_loop_tree_node_t));
937 extern ira_allocno_t ira_parent_allocno (ira_allocno_t);
938 extern ira_allocno_t ira_parent_or_cap_allocno (ira_allocno_t);
939 extern ira_allocno_t ira_create_allocno (int, bool, ira_loop_tree_node_t);
940 extern void ira_create_allocno_objects (ira_allocno_t);
941 extern void ira_set_allocno_cover_class (ira_allocno_t, enum reg_class);
942 extern bool ira_conflict_vector_profitable_p (ira_object_t, int);
943 extern void ira_allocate_conflict_vec (ira_object_t, int);
944 extern void ira_allocate_object_conflicts (ira_object_t, int);
945 extern void ior_hard_reg_conflicts (ira_allocno_t, HARD_REG_SET *);
946 extern void ira_print_expanded_allocno (ira_allocno_t);
947 extern void ira_add_live_range_to_object (ira_object_t, int, int);
948 extern live_range_t ira_create_live_range (ira_object_t, int, int,
949 live_range_t);
950 extern live_range_t ira_copy_live_range_list (live_range_t);
951 extern live_range_t ira_merge_live_ranges (live_range_t, live_range_t);
952 extern bool ira_live_ranges_intersect_p (live_range_t, live_range_t);
953 extern void ira_finish_live_range (live_range_t);
954 extern void ira_finish_live_range_list (live_range_t);
955 extern void ira_free_allocno_updated_costs (ira_allocno_t);
956 extern ira_copy_t ira_create_copy (ira_allocno_t, ira_allocno_t,
957 int, bool, rtx, ira_loop_tree_node_t);
958 extern void ira_add_allocno_copy_to_list (ira_copy_t);
959 extern void ira_swap_allocno_copy_ends_if_necessary (ira_copy_t);
960 extern void ira_remove_allocno_copy_from_list (ira_copy_t);
961 extern ira_copy_t ira_add_allocno_copy (ira_allocno_t, ira_allocno_t, int,
962 bool, rtx, ira_loop_tree_node_t);
964 extern int *ira_allocate_cost_vector (enum reg_class);
965 extern void ira_free_cost_vector (int *, enum reg_class);
967 extern void ira_flattening (int, int);
968 extern bool ira_build (bool);
969 extern void ira_destroy (void);
971 /* ira-costs.c */
972 extern void ira_init_costs_once (void);
973 extern void ira_init_costs (void);
974 extern void ira_finish_costs_once (void);
975 extern void ira_costs (void);
976 extern void ira_tune_allocno_costs_and_cover_classes (void);
978 /* ira-lives.c */
980 extern void ira_rebuild_start_finish_chains (void);
981 extern void ira_print_live_range_list (FILE *, live_range_t);
982 extern void ira_debug_live_range_list (live_range_t);
983 extern void ira_debug_allocno_live_ranges (ira_allocno_t);
984 extern void ira_debug_live_ranges (void);
985 extern void ira_create_allocno_live_ranges (void);
986 extern void ira_compress_allocno_live_ranges (void);
987 extern void ira_finish_allocno_live_ranges (void);
989 /* ira-conflicts.c */
990 extern void ira_debug_conflicts (bool);
991 extern void ira_build_conflicts (void);
993 /* ira-color.c */
994 extern int ira_loop_edge_freq (ira_loop_tree_node_t, int, bool);
995 extern void ira_reassign_conflict_allocnos (int);
996 extern void ira_initiate_assign (void);
997 extern void ira_finish_assign (void);
998 extern void ira_color (void);
1000 /* ira-emit.c */
1001 extern void ira_emit (bool);
1005 /* Return cost of moving value of MODE from register of class FROM to
1006 register of class TO. */
1007 static inline int
1008 ira_get_register_move_cost (enum machine_mode mode,
1009 enum reg_class from, enum reg_class to)
1011 if (ira_register_move_cost[mode] == NULL)
1012 ira_init_register_move_cost (mode);
1013 return ira_register_move_cost[mode][from][to];
1016 /* Return cost of moving value of MODE from register of class FROM to
1017 register of class TO. Return zero if IN_P is true and FROM is
1018 subset of TO or if IN_P is false and FROM is superset of TO. */
1019 static inline int
1020 ira_get_may_move_cost (enum machine_mode mode,
1021 enum reg_class from, enum reg_class to,
1022 bool in_p)
1024 if (ira_register_move_cost[mode] == NULL)
1025 ira_init_register_move_cost (mode);
1026 return (in_p
1027 ? ira_may_move_in_cost[mode][from][to]
1028 : ira_may_move_out_cost[mode][from][to]);
1033 /* The iterator for all allocnos. */
1034 typedef struct {
1035 /* The number of the current element in IRA_ALLOCNOS. */
1036 int n;
1037 } ira_allocno_iterator;
1039 /* Initialize the iterator I. */
1040 static inline void
1041 ira_allocno_iter_init (ira_allocno_iterator *i)
1043 i->n = 0;
1046 /* Return TRUE if we have more allocnos to visit, in which case *A is
1047 set to the allocno to be visited. Otherwise, return FALSE. */
1048 static inline bool
1049 ira_allocno_iter_cond (ira_allocno_iterator *i, ira_allocno_t *a)
1051 int n;
1053 for (n = i->n; n < ira_allocnos_num; n++)
1054 if (ira_allocnos[n] != NULL)
1056 *a = ira_allocnos[n];
1057 i->n = n + 1;
1058 return true;
1060 return false;
1063 /* Loop over all allocnos. In each iteration, A is set to the next
1064 allocno. ITER is an instance of ira_allocno_iterator used to iterate
1065 the allocnos. */
1066 #define FOR_EACH_ALLOCNO(A, ITER) \
1067 for (ira_allocno_iter_init (&(ITER)); \
1068 ira_allocno_iter_cond (&(ITER), &(A));)
1070 /* The iterator for all objects. */
1071 typedef struct {
1072 /* The number of the current element in ira_object_id_map. */
1073 int n;
1074 } ira_object_iterator;
1076 /* Initialize the iterator I. */
1077 static inline void
1078 ira_object_iter_init (ira_object_iterator *i)
1080 i->n = 0;
1083 /* Return TRUE if we have more objects to visit, in which case *OBJ is
1084 set to the object to be visited. Otherwise, return FALSE. */
1085 static inline bool
1086 ira_object_iter_cond (ira_object_iterator *i, ira_object_t *obj)
1088 int n;
1090 for (n = i->n; n < ira_objects_num; n++)
1091 if (ira_object_id_map[n] != NULL)
1093 *obj = ira_object_id_map[n];
1094 i->n = n + 1;
1095 return true;
1097 return false;
1100 /* Loop over all objects. In each iteration, OBJ is set to the next
1101 object. ITER is an instance of ira_object_iterator used to iterate
1102 the objects. */
1103 #define FOR_EACH_OBJECT(OBJ, ITER) \
1104 for (ira_object_iter_init (&(ITER)); \
1105 ira_object_iter_cond (&(ITER), &(OBJ));)
1107 /* The iterator for objects associated with an allocno. */
1108 typedef struct {
1109 /* The number of the element the allocno's object array. */
1110 int n;
1111 } ira_allocno_object_iterator;
1113 /* Initialize the iterator I. */
1114 static inline void
1115 ira_allocno_object_iter_init (ira_allocno_object_iterator *i)
1117 i->n = 0;
1120 /* Return TRUE if we have more objects to visit in allocno A, in which
1121 case *O is set to the object to be visited. Otherwise, return
1122 FALSE. */
1123 static inline bool
1124 ira_allocno_object_iter_cond (ira_allocno_object_iterator *i, ira_allocno_t a,
1125 ira_object_t *o)
1127 *o = ALLOCNO_OBJECT (a, i->n);
1128 return i->n++ < ALLOCNO_NUM_OBJECTS (a);
1131 /* Loop over all objects associated with allocno A. In each
1132 iteration, O is set to the next object. ITER is an instance of
1133 ira_allocno_object_iterator used to iterate the conflicts. */
1134 #define FOR_EACH_ALLOCNO_OBJECT(A, O, ITER) \
1135 for (ira_allocno_object_iter_init (&(ITER)); \
1136 ira_allocno_object_iter_cond (&(ITER), (A), &(O));)
1139 /* The iterator for copies. */
1140 typedef struct {
1141 /* The number of the current element in IRA_COPIES. */
1142 int n;
1143 } ira_copy_iterator;
1145 /* Initialize the iterator I. */
1146 static inline void
1147 ira_copy_iter_init (ira_copy_iterator *i)
1149 i->n = 0;
1152 /* Return TRUE if we have more copies to visit, in which case *CP is
1153 set to the copy to be visited. Otherwise, return FALSE. */
1154 static inline bool
1155 ira_copy_iter_cond (ira_copy_iterator *i, ira_copy_t *cp)
1157 int n;
1159 for (n = i->n; n < ira_copies_num; n++)
1160 if (ira_copies[n] != NULL)
1162 *cp = ira_copies[n];
1163 i->n = n + 1;
1164 return true;
1166 return false;
1169 /* Loop over all copies. In each iteration, C is set to the next
1170 copy. ITER is an instance of ira_copy_iterator used to iterate
1171 the copies. */
1172 #define FOR_EACH_COPY(C, ITER) \
1173 for (ira_copy_iter_init (&(ITER)); \
1174 ira_copy_iter_cond (&(ITER), &(C));)
1176 /* The iterator for object conflicts. */
1177 typedef struct {
1179 /* TRUE if the conflicts are represented by vector of allocnos. */
1180 bool conflict_vec_p;
1182 /* The conflict vector or conflict bit vector. */
1183 void *vec;
1185 /* The number of the current element in the vector (of type
1186 ira_object_t or IRA_INT_TYPE). */
1187 unsigned int word_num;
1189 /* The bit vector size. It is defined only if
1190 OBJECT_CONFLICT_VEC_P is FALSE. */
1191 unsigned int size;
1193 /* The current bit index of bit vector. It is defined only if
1194 OBJECT_CONFLICT_VEC_P is FALSE. */
1195 unsigned int bit_num;
1197 /* The object id corresponding to the 1st bit of the bit vector. It
1198 is defined only if OBJECT_CONFLICT_VEC_P is FALSE. */
1199 int base_conflict_id;
1201 /* The word of bit vector currently visited. It is defined only if
1202 OBJECT_CONFLICT_VEC_P is FALSE. */
1203 unsigned IRA_INT_TYPE word;
1204 } ira_object_conflict_iterator;
1206 /* Initialize the iterator I with ALLOCNO conflicts. */
1207 static inline void
1208 ira_object_conflict_iter_init (ira_object_conflict_iterator *i,
1209 ira_object_t obj)
1211 i->conflict_vec_p = OBJECT_CONFLICT_VEC_P (obj);
1212 i->vec = OBJECT_CONFLICT_ARRAY (obj);
1213 i->word_num = 0;
1214 if (i->conflict_vec_p)
1215 i->size = i->bit_num = i->base_conflict_id = i->word = 0;
1216 else
1218 if (OBJECT_MIN (obj) > OBJECT_MAX (obj))
1219 i->size = 0;
1220 else
1221 i->size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj)
1222 + IRA_INT_BITS)
1223 / IRA_INT_BITS) * sizeof (IRA_INT_TYPE);
1224 i->bit_num = 0;
1225 i->base_conflict_id = OBJECT_MIN (obj);
1226 i->word = (i->size == 0 ? 0 : ((IRA_INT_TYPE *) i->vec)[0]);
1230 /* Return TRUE if we have more conflicting allocnos to visit, in which
1231 case *A is set to the allocno to be visited. Otherwise, return
1232 FALSE. */
1233 static inline bool
1234 ira_object_conflict_iter_cond (ira_object_conflict_iterator *i,
1235 ira_object_t *pobj)
1237 ira_object_t obj;
1239 if (i->conflict_vec_p)
1241 obj = ((ira_object_t *) i->vec)[i->word_num];
1242 if (obj == NULL)
1243 return false;
1245 else
1247 /* Skip words that are zeros. */
1248 for (; i->word == 0; i->word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
1250 i->word_num++;
1252 /* If we have reached the end, break. */
1253 if (i->word_num * sizeof (IRA_INT_TYPE) >= i->size)
1254 return false;
1256 i->bit_num = i->word_num * IRA_INT_BITS;
1259 /* Skip bits that are zero. */
1260 for (; (i->word & 1) == 0; i->word >>= 1)
1261 i->bit_num++;
1263 obj = ira_object_id_map[i->bit_num + i->base_conflict_id];
1266 *pobj = obj;
1267 return true;
1270 /* Advance to the next conflicting allocno. */
1271 static inline void
1272 ira_object_conflict_iter_next (ira_object_conflict_iterator *i)
1274 if (i->conflict_vec_p)
1275 i->word_num++;
1276 else
1278 i->word >>= 1;
1279 i->bit_num++;
1283 /* Loop over all objects conflicting with OBJ. In each iteration,
1284 CONF is set to the next conflicting object. ITER is an instance
1285 of ira_object_conflict_iterator used to iterate the conflicts. */
1286 #define FOR_EACH_OBJECT_CONFLICT(OBJ, CONF, ITER) \
1287 for (ira_object_conflict_iter_init (&(ITER), (OBJ)); \
1288 ira_object_conflict_iter_cond (&(ITER), &(CONF)); \
1289 ira_object_conflict_iter_next (&(ITER)))
1293 /* The function returns TRUE if hard registers starting with
1294 HARD_REGNO and containing value of MODE are not in set
1295 HARD_REGSET. */
1296 static inline bool
1297 ira_hard_reg_not_in_set_p (int hard_regno, enum machine_mode mode,
1298 HARD_REG_SET hard_regset)
1300 int i;
1302 ira_assert (hard_regno >= 0);
1303 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1304 if (TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
1305 return false;
1306 return true;
1311 /* To save memory we use a lazy approach for allocation and
1312 initialization of the cost vectors. We do this only when it is
1313 really necessary. */
1315 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1316 initialize the elements by VAL if it is necessary */
1317 static inline void
1318 ira_allocate_and_set_costs (int **vec, enum reg_class cover_class, int val)
1320 int i, *reg_costs;
1321 int len;
1323 if (*vec != NULL)
1324 return;
1325 *vec = reg_costs = ira_allocate_cost_vector (cover_class);
1326 len = ira_class_hard_regs_num[cover_class];
1327 for (i = 0; i < len; i++)
1328 reg_costs[i] = val;
1331 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1332 copy values of vector SRC into the vector if it is necessary */
1333 static inline void
1334 ira_allocate_and_copy_costs (int **vec, enum reg_class cover_class, int *src)
1336 int len;
1338 if (*vec != NULL || src == NULL)
1339 return;
1340 *vec = ira_allocate_cost_vector (cover_class);
1341 len = ira_class_hard_regs_num[cover_class];
1342 memcpy (*vec, src, sizeof (int) * len);
1345 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1346 add values of vector SRC into the vector if it is necessary */
1347 static inline void
1348 ira_allocate_and_accumulate_costs (int **vec, enum reg_class cover_class,
1349 int *src)
1351 int i, len;
1353 if (src == NULL)
1354 return;
1355 len = ira_class_hard_regs_num[cover_class];
1356 if (*vec == NULL)
1358 *vec = ira_allocate_cost_vector (cover_class);
1359 memset (*vec, 0, sizeof (int) * len);
1361 for (i = 0; i < len; i++)
1362 (*vec)[i] += src[i];
1365 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1366 copy values of vector SRC into the vector or initialize it by VAL
1367 (if SRC is null). */
1368 static inline void
1369 ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class cover_class,
1370 int val, int *src)
1372 int i, *reg_costs;
1373 int len;
1375 if (*vec != NULL)
1376 return;
1377 *vec = reg_costs = ira_allocate_cost_vector (cover_class);
1378 len = ira_class_hard_regs_num[cover_class];
1379 if (src != NULL)
1380 memcpy (reg_costs, src, sizeof (int) * len);
1381 else
1383 for (i = 0; i < len; i++)
1384 reg_costs[i] = val;