Move all varpool routines out of cgraph/cgraphunit to varpool.c
[official-gcc.git] / gcc / cgraph.c
blob7706098ab3c96ab091a280abc31e88d88668140f
1 /* Callgraph handling code.
2 Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 /* This file contains basic routines manipulating call graph
24 The callgraph:
26 The call-graph is data structure designed for intra-procedural optimization
27 but it is also used in non-unit-at-a-time compilation to allow easier code
28 sharing.
30 The call-graph consist of nodes and edges represented via linked lists.
31 Each function (external or not) corresponds to the unique node (in
32 contrast to tree DECL nodes where we can have multiple nodes for each
33 function).
35 The mapping from declarations to call-graph nodes is done using hash table
36 based on DECL_ASSEMBLER_NAME, so it is essential for assembler name to
37 not change once the declaration is inserted into the call-graph.
38 The call-graph nodes are created lazily using cgraph_node function when
39 called for unknown declaration.
41 When built, there is one edge for each direct call. It is possible that
42 the reference will be later optimized out. The call-graph is built
43 conservatively in order to make conservative data flow analysis possible.
45 The callgraph at the moment does not represent indirect calls or calls
46 from other compilation unit. Flag NEEDED is set for each node that may
47 be accessed in such an invisible way and it shall be considered an
48 entry point to the callgraph.
50 Interprocedural information:
52 Callgraph is place to store data needed for interprocedural optimization.
53 All data structures are divided into three components: local_info that
54 is produced while analyzing the function, global_info that is result
55 of global walking of the callgraph on the end of compilation and
56 rtl_info used by RTL backend to propagate data from already compiled
57 functions to their callers.
59 Inlining plans:
61 The function inlining information is decided in advance and maintained
62 in the callgraph as so called inline plan.
63 For each inlined call, the callee's node is cloned to represent the
64 new function copy produced by inliner.
65 Each inlined call gets a unique corresponding clone node of the callee
66 and the data structure is updated while inlining is performed, so
67 the clones are eliminated and their callee edges redirected to the
68 caller.
70 Each edge has "inline_failed" field. When the field is set to NULL,
71 the call will be inlined. When it is non-NULL it contains a reason
72 why inlining wasn't performed. */
74 #include "config.h"
75 #include "system.h"
76 #include "coretypes.h"
77 #include "tm.h"
78 #include "tree.h"
79 #include "tree-inline.h"
80 #include "langhooks.h"
81 #include "hashtab.h"
82 #include "toplev.h"
83 #include "flags.h"
84 #include "ggc.h"
85 #include "debug.h"
86 #include "target.h"
87 #include "basic-block.h"
88 #include "cgraph.h"
89 #include "varray.h"
90 #include "output.h"
91 #include "intl.h"
92 #include "tree-gimple.h"
93 #include "tree-dump.h"
95 static void cgraph_node_remove_callers (struct cgraph_node *node);
96 static inline void cgraph_edge_remove_caller (struct cgraph_edge *e);
97 static inline void cgraph_edge_remove_callee (struct cgraph_edge *e);
99 /* Hash table used to convert declarations into nodes. */
100 static GTY((param_is (struct cgraph_node))) htab_t cgraph_hash;
102 /* The linked list of cgraph nodes. */
103 struct cgraph_node *cgraph_nodes;
105 /* Queue of cgraph nodes scheduled to be lowered. */
106 struct cgraph_node *cgraph_nodes_queue;
108 /* Queue of cgraph nodes scheduled to be expanded. This is a
109 secondary queue used during optimization to accommodate passes that
110 may generate new functions that need to be optimized and expanded. */
111 struct cgraph_node *cgraph_expand_queue;
113 /* Number of nodes in existence. */
114 int cgraph_n_nodes;
116 /* Maximal uid used in cgraph nodes. */
117 int cgraph_max_uid;
119 /* Set when whole unit has been analyzed so we can access global info. */
120 bool cgraph_global_info_ready = false;
122 /* Set when the cgraph is fully build and the basic flags are computed. */
123 bool cgraph_function_flags_ready = false;
125 /* Linked list of cgraph asm nodes. */
126 struct cgraph_asm_node *cgraph_asm_nodes;
128 /* Last node in cgraph_asm_nodes. */
129 static GTY(()) struct cgraph_asm_node *cgraph_asm_last_node;
131 /* The order index of the next cgraph node to be created. This is
132 used so that we can sort the cgraph nodes in order by when we saw
133 them, to support -fno-toplevel-reorder. */
134 int cgraph_order;
136 static hashval_t hash_node (const void *);
137 static int eq_node (const void *, const void *);
139 /* Returns a hash code for P. */
141 static hashval_t
142 hash_node (const void *p)
144 const struct cgraph_node *n = (const struct cgraph_node *) p;
145 return (hashval_t) DECL_UID (n->decl);
148 /* Returns nonzero if P1 and P2 are equal. */
150 static int
151 eq_node (const void *p1, const void *p2)
153 const struct cgraph_node *n1 = (const struct cgraph_node *) p1;
154 const struct cgraph_node *n2 = (const struct cgraph_node *) p2;
155 return DECL_UID (n1->decl) == DECL_UID (n2->decl);
158 /* Allocate new callgraph node and insert it into basic data structures. */
159 static struct cgraph_node *
160 cgraph_create_node (void)
162 struct cgraph_node *node;
164 node = GGC_CNEW (struct cgraph_node);
165 node->next = cgraph_nodes;
166 node->uid = cgraph_max_uid++;
167 node->order = cgraph_order++;
168 if (cgraph_nodes)
169 cgraph_nodes->previous = node;
170 node->previous = NULL;
171 node->global.estimated_growth = INT_MIN;
172 cgraph_nodes = node;
173 cgraph_n_nodes++;
174 return node;
177 /* Return cgraph node assigned to DECL. Create new one when needed. */
178 struct cgraph_node *
179 cgraph_node (tree decl)
181 struct cgraph_node key, *node, **slot;
183 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
185 if (!cgraph_hash)
186 cgraph_hash = htab_create_ggc (10, hash_node, eq_node, NULL);
188 key.decl = decl;
190 slot = (struct cgraph_node **) htab_find_slot (cgraph_hash, &key, INSERT);
192 if (*slot)
194 node = *slot;
195 if (!node->master_clone)
196 node->master_clone = node;
197 return node;
200 node = cgraph_create_node ();
201 node->decl = decl;
202 *slot = node;
203 if (DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL)
205 node->origin = cgraph_node (DECL_CONTEXT (decl));
206 node->next_nested = node->origin->nested;
207 node->origin->nested = node;
208 node->master_clone = node;
210 return node;
213 /* Insert already constructed node into hashtable. */
215 void
216 cgraph_insert_node_to_hashtable (struct cgraph_node *node)
218 struct cgraph_node **slot;
220 slot = (struct cgraph_node **) htab_find_slot (cgraph_hash, node, INSERT);
222 gcc_assert (!*slot);
223 *slot = node;
227 /* Return the cgraph node that has ASMNAME for its DECL_ASSEMBLER_NAME.
228 Return NULL if there's no such node. */
230 struct cgraph_node *
231 cgraph_node_for_asm (tree asmname)
233 struct cgraph_node *node;
235 for (node = cgraph_nodes; node ; node = node->next)
236 if (decl_assembler_name_equal (node->decl, asmname))
237 return node;
239 return NULL;
242 /* Returns a hash value for X (which really is a die_struct). */
244 static hashval_t
245 edge_hash (const void *x)
247 return htab_hash_pointer (((struct cgraph_edge *) x)->call_stmt);
250 /* Return nonzero if decl_id of die_struct X is the same as UID of decl *Y. */
252 static int
253 edge_eq (const void *x, const void *y)
255 return ((struct cgraph_edge *) x)->call_stmt == y;
258 /* Return callgraph edge representing CALL_EXPR statement. */
259 struct cgraph_edge *
260 cgraph_edge (struct cgraph_node *node, tree call_stmt)
262 struct cgraph_edge *e, *e2;
263 int n = 0;
265 if (node->call_site_hash)
266 return htab_find_with_hash (node->call_site_hash, call_stmt,
267 htab_hash_pointer (call_stmt));
269 /* This loop may turn out to be performance problem. In such case adding
270 hashtables into call nodes with very many edges is probably best
271 solution. It is not good idea to add pointer into CALL_EXPR itself
272 because we want to make possible having multiple cgraph nodes representing
273 different clones of the same body before the body is actually cloned. */
274 for (e = node->callees; e; e= e->next_callee)
276 if (e->call_stmt == call_stmt)
277 break;
278 n++;
280 if (n > 100)
282 node->call_site_hash = htab_create_ggc (120, edge_hash, edge_eq, NULL);
283 for (e2 = node->callees; e2; e2 = e2->next_callee)
285 void **slot;
286 slot = htab_find_slot_with_hash (node->call_site_hash,
287 e2->call_stmt,
288 htab_hash_pointer (e2->call_stmt),
289 INSERT);
290 gcc_assert (!*slot);
291 *slot = e2;
294 return e;
297 /* Change call_smtt of edge E to NEW_STMT. */
298 void
299 cgraph_set_call_stmt (struct cgraph_edge *e, tree new_stmt)
301 if (e->caller->call_site_hash)
303 htab_remove_elt_with_hash (e->caller->call_site_hash,
304 e->call_stmt,
305 htab_hash_pointer (e->call_stmt));
307 e->call_stmt = new_stmt;
308 if (e->caller->call_site_hash)
310 void **slot;
311 slot = htab_find_slot_with_hash (e->caller->call_site_hash,
312 e->call_stmt,
313 htab_hash_pointer
314 (e->call_stmt), INSERT);
315 gcc_assert (!*slot);
316 *slot = e;
320 /* Create edge from CALLER to CALLEE in the cgraph. */
322 struct cgraph_edge *
323 cgraph_create_edge (struct cgraph_node *caller, struct cgraph_node *callee,
324 tree call_stmt, gcov_type count, int nest)
326 struct cgraph_edge *edge = GGC_NEW (struct cgraph_edge);
327 #ifdef ENABLE_CHECKING
328 struct cgraph_edge *e;
330 for (e = caller->callees; e; e = e->next_callee)
331 gcc_assert (e->call_stmt != call_stmt);
332 #endif
334 gcc_assert (get_call_expr_in (call_stmt));
336 if (!DECL_SAVED_TREE (callee->decl))
337 edge->inline_failed = N_("function body not available");
338 else if (callee->local.redefined_extern_inline)
339 edge->inline_failed = N_("redefined extern inline functions are not "
340 "considered for inlining");
341 else if (callee->local.inlinable)
342 edge->inline_failed = N_("function not considered for inlining");
343 else
344 edge->inline_failed = N_("function not inlinable");
346 edge->aux = NULL;
348 edge->caller = caller;
349 edge->callee = callee;
350 edge->call_stmt = call_stmt;
351 edge->prev_caller = NULL;
352 edge->next_caller = callee->callers;
353 if (callee->callers)
354 callee->callers->prev_caller = edge;
355 edge->prev_callee = NULL;
356 edge->next_callee = caller->callees;
357 if (caller->callees)
358 caller->callees->prev_callee = edge;
359 caller->callees = edge;
360 callee->callers = edge;
361 edge->count = count;
362 edge->loop_nest = nest;
363 if (caller->call_site_hash)
365 void **slot;
366 slot = htab_find_slot_with_hash (caller->call_site_hash,
367 edge->call_stmt,
368 htab_hash_pointer
369 (edge->call_stmt),
370 INSERT);
371 gcc_assert (!*slot);
372 *slot = edge;
374 return edge;
377 /* Remove the edge E from the list of the callers of the callee. */
379 static inline void
380 cgraph_edge_remove_callee (struct cgraph_edge *e)
382 if (e->prev_caller)
383 e->prev_caller->next_caller = e->next_caller;
384 if (e->next_caller)
385 e->next_caller->prev_caller = e->prev_caller;
386 if (!e->prev_caller)
387 e->callee->callers = e->next_caller;
390 /* Remove the edge E from the list of the callees of the caller. */
392 static inline void
393 cgraph_edge_remove_caller (struct cgraph_edge *e)
395 if (e->prev_callee)
396 e->prev_callee->next_callee = e->next_callee;
397 if (e->next_callee)
398 e->next_callee->prev_callee = e->prev_callee;
399 if (!e->prev_callee)
400 e->caller->callees = e->next_callee;
401 if (e->caller->call_site_hash)
402 htab_remove_elt_with_hash (e->caller->call_site_hash,
403 e->call_stmt,
404 htab_hash_pointer (e->call_stmt));
407 /* Remove the edge E in the cgraph. */
409 void
410 cgraph_remove_edge (struct cgraph_edge *e)
412 /* Remove from callers list of the callee. */
413 cgraph_edge_remove_callee (e);
415 /* Remove from callees list of the callers. */
416 cgraph_edge_remove_caller (e);
419 /* Redirect callee of E to N. The function does not update underlying
420 call expression. */
422 void
423 cgraph_redirect_edge_callee (struct cgraph_edge *e, struct cgraph_node *n)
425 /* Remove from callers list of the current callee. */
426 cgraph_edge_remove_callee (e);
428 /* Insert to callers list of the new callee. */
429 e->prev_caller = NULL;
430 if (n->callers)
431 n->callers->prev_caller = e;
432 e->next_caller = n->callers;
433 n->callers = e;
434 e->callee = n;
437 /* Remove all callees from the node. */
439 void
440 cgraph_node_remove_callees (struct cgraph_node *node)
442 struct cgraph_edge *e;
444 /* It is sufficient to remove the edges from the lists of callers of
445 the callees. The callee list of the node can be zapped with one
446 assignment. */
447 for (e = node->callees; e; e = e->next_callee)
448 cgraph_edge_remove_callee (e);
449 node->callees = NULL;
450 if (node->call_site_hash)
452 htab_delete (node->call_site_hash);
453 node->call_site_hash = NULL;
457 /* Remove all callers from the node. */
459 static void
460 cgraph_node_remove_callers (struct cgraph_node *node)
462 struct cgraph_edge *e;
464 /* It is sufficient to remove the edges from the lists of callees of
465 the callers. The caller list of the node can be zapped with one
466 assignment. */
467 for (e = node->callers; e; e = e->next_caller)
468 cgraph_edge_remove_caller (e);
469 node->callers = NULL;
472 /* Remove the node from cgraph. */
474 void
475 cgraph_remove_node (struct cgraph_node *node)
477 void **slot;
478 bool kill_body = false;
480 cgraph_node_remove_callers (node);
481 cgraph_node_remove_callees (node);
482 /* Incremental inlining access removed nodes stored in the postorder list.
484 node->needed = node->reachable = false;
485 while (node->nested)
486 cgraph_remove_node (node->nested);
487 if (node->origin)
489 struct cgraph_node **node2 = &node->origin->nested;
491 while (*node2 != node)
492 node2 = &(*node2)->next_nested;
493 *node2 = node->next_nested;
495 if (node->previous)
496 node->previous->next = node->next;
497 else
498 cgraph_nodes = node->next;
499 if (node->next)
500 node->next->previous = node->previous;
501 node->next = NULL;
502 node->previous = NULL;
503 slot = htab_find_slot (cgraph_hash, node, NO_INSERT);
504 if (*slot == node)
506 if (node->next_clone)
508 struct cgraph_node *new_node = node->next_clone;
509 struct cgraph_node *n;
511 /* Make the next clone be the master clone */
512 for (n = new_node; n; n = n->next_clone)
513 n->master_clone = new_node;
515 *slot = new_node;
516 node->next_clone->prev_clone = NULL;
518 else
520 htab_clear_slot (cgraph_hash, slot);
521 kill_body = true;
524 else
526 node->prev_clone->next_clone = node->next_clone;
527 if (node->next_clone)
528 node->next_clone->prev_clone = node->prev_clone;
531 /* While all the clones are removed after being proceeded, the function
532 itself is kept in the cgraph even after it is compiled. Check whether
533 we are done with this body and reclaim it proactively if this is the case.
535 if (!kill_body && *slot)
537 struct cgraph_node *n = (struct cgraph_node *) *slot;
538 if (!n->next_clone && !n->global.inlined_to
539 && (cgraph_global_info_ready
540 && (TREE_ASM_WRITTEN (n->decl) || DECL_EXTERNAL (n->decl))))
541 kill_body = true;
544 if (kill_body && flag_unit_at_a_time)
546 DECL_SAVED_TREE (node->decl) = NULL;
547 DECL_STRUCT_FUNCTION (node->decl) = NULL;
548 DECL_INITIAL (node->decl) = error_mark_node;
550 node->decl = NULL;
551 if (node->call_site_hash)
553 htab_delete (node->call_site_hash);
554 node->call_site_hash = NULL;
556 cgraph_n_nodes--;
557 /* Do not free the structure itself so the walk over chain can continue. */
560 /* Notify finalize_compilation_unit that given node is reachable. */
562 void
563 cgraph_mark_reachable_node (struct cgraph_node *node)
565 if (!node->reachable && node->local.finalized)
567 notice_global_symbol (node->decl);
568 node->reachable = 1;
569 gcc_assert (!cgraph_global_info_ready);
571 node->next_needed = cgraph_nodes_queue;
572 cgraph_nodes_queue = node;
576 /* Likewise indicate that a node is needed, i.e. reachable via some
577 external means. */
579 void
580 cgraph_mark_needed_node (struct cgraph_node *node)
582 node->needed = 1;
583 cgraph_mark_reachable_node (node);
586 /* Return local info for the compiled function. */
588 struct cgraph_local_info *
589 cgraph_local_info (tree decl)
591 struct cgraph_node *node;
593 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
594 node = cgraph_node (decl);
595 return &node->local;
598 /* Return local info for the compiled function. */
600 struct cgraph_global_info *
601 cgraph_global_info (tree decl)
603 struct cgraph_node *node;
605 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL && cgraph_global_info_ready);
606 node = cgraph_node (decl);
607 return &node->global;
610 /* Return local info for the compiled function. */
612 struct cgraph_rtl_info *
613 cgraph_rtl_info (tree decl)
615 struct cgraph_node *node;
617 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
618 node = cgraph_node (decl);
619 if (decl != current_function_decl
620 && !TREE_ASM_WRITTEN (node->decl))
621 return NULL;
622 return &node->rtl;
625 /* Return name of the node used in debug output. */
626 const char *
627 cgraph_node_name (struct cgraph_node *node)
629 return lang_hooks.decl_printable_name (node->decl, 2);
632 /* Names used to print out the availability enum. */
633 const char * const cgraph_availability_names[] =
634 {"unset", "not_available", "overwrittable", "available", "local"};
636 /* Dump given cgraph node. */
637 void
638 dump_cgraph_node (FILE *f, struct cgraph_node *node)
640 struct cgraph_edge *edge;
641 fprintf (f, "%s/%i:", cgraph_node_name (node), node->uid);
642 if (node->global.inlined_to)
643 fprintf (f, " (inline copy in %s/%i)",
644 cgraph_node_name (node->global.inlined_to),
645 node->global.inlined_to->uid);
646 if (cgraph_function_flags_ready)
647 fprintf (f, " availability:%s",
648 cgraph_availability_names [cgraph_function_body_availability (node)]);
649 if (node->master_clone && node->master_clone->uid != node->uid)
650 fprintf (f, "(%i)", node->master_clone->uid);
651 if (node->count)
652 fprintf (f, " executed "HOST_WIDEST_INT_PRINT_DEC"x",
653 (HOST_WIDEST_INT)node->count);
654 if (node->local.self_insns)
655 fprintf (f, " %i insns", node->local.self_insns);
656 if (node->global.insns && node->global.insns != node->local.self_insns)
657 fprintf (f, " (%i after inlining)", node->global.insns);
658 if (node->local.estimated_self_stack_size)
659 fprintf (f, " %i bytes stack usage", (int)node->local.estimated_self_stack_size);
660 if (node->global.estimated_stack_size != node->local.estimated_self_stack_size)
661 fprintf (f, " %i bytes after inlining", (int)node->global.estimated_stack_size);
662 if (node->origin)
663 fprintf (f, " nested in: %s", cgraph_node_name (node->origin));
664 if (node->needed)
665 fprintf (f, " needed");
666 else if (node->reachable)
667 fprintf (f, " reachable");
668 if (DECL_SAVED_TREE (node->decl))
669 fprintf (f, " tree");
670 if (node->output)
671 fprintf (f, " output");
672 if (node->local.local)
673 fprintf (f, " local");
674 if (node->local.externally_visible)
675 fprintf (f, " externally_visible");
676 if (node->local.finalized)
677 fprintf (f, " finalized");
678 if (node->local.disregard_inline_limits)
679 fprintf (f, " always_inline");
680 else if (node->local.inlinable)
681 fprintf (f, " inlinable");
682 if (node->local.redefined_extern_inline)
683 fprintf (f, " redefined_extern_inline");
684 if (TREE_ASM_WRITTEN (node->decl))
685 fprintf (f, " asm_written");
687 fprintf (f, "\n called by: ");
688 for (edge = node->callers; edge; edge = edge->next_caller)
690 fprintf (f, "%s/%i ", cgraph_node_name (edge->caller),
691 edge->caller->uid);
692 if (edge->count)
693 fprintf (f, "("HOST_WIDEST_INT_PRINT_DEC"x) ",
694 (HOST_WIDEST_INT)edge->count);
695 if (!edge->inline_failed)
696 fprintf(f, "(inlined) ");
699 fprintf (f, "\n calls: ");
700 for (edge = node->callees; edge; edge = edge->next_callee)
702 fprintf (f, "%s/%i ", cgraph_node_name (edge->callee),
703 edge->callee->uid);
704 if (!edge->inline_failed)
705 fprintf(f, "(inlined) ");
706 if (edge->count)
707 fprintf (f, "("HOST_WIDEST_INT_PRINT_DEC"x) ",
708 (HOST_WIDEST_INT)edge->count);
709 if (edge->loop_nest)
710 fprintf (f, "(nested in %i loops) ", edge->loop_nest);
712 fprintf (f, "\n");
715 /* Dump the callgraph. */
717 void
718 dump_cgraph (FILE *f)
720 struct cgraph_node *node;
722 fprintf (f, "callgraph:\n\n");
723 for (node = cgraph_nodes; node; node = node->next)
724 dump_cgraph_node (f, node);
727 /* Set the DECL_ASSEMBLER_NAME and update cgraph hashtables. */
728 void
729 change_decl_assembler_name (tree decl, tree name)
731 if (!DECL_ASSEMBLER_NAME_SET_P (decl))
733 SET_DECL_ASSEMBLER_NAME (decl, name);
734 return;
736 if (name == DECL_ASSEMBLER_NAME (decl))
737 return;
739 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))
740 && DECL_RTL_SET_P (decl))
741 warning (0, "%D renamed after being referenced in assembly", decl);
743 SET_DECL_ASSEMBLER_NAME (decl, name);
746 /* Add a top-level asm statement to the list. */
748 struct cgraph_asm_node *
749 cgraph_add_asm_node (tree asm_str)
751 struct cgraph_asm_node *node;
753 node = GGC_CNEW (struct cgraph_asm_node);
754 node->asm_str = asm_str;
755 node->order = cgraph_order++;
756 node->next = NULL;
757 if (cgraph_asm_nodes == NULL)
758 cgraph_asm_nodes = node;
759 else
760 cgraph_asm_last_node->next = node;
761 cgraph_asm_last_node = node;
762 return node;
765 /* Return true when the DECL can possibly be inlined. */
766 bool
767 cgraph_function_possibly_inlined_p (tree decl)
769 if (!cgraph_global_info_ready)
770 return (DECL_INLINE (decl) && !flag_really_no_inline);
771 return DECL_POSSIBLY_INLINED (decl);
774 /* Create clone of E in the node N represented by CALL_EXPR the callgraph. */
775 struct cgraph_edge *
776 cgraph_clone_edge (struct cgraph_edge *e, struct cgraph_node *n,
777 tree call_stmt, gcov_type count_scale, int loop_nest,
778 bool update_original)
780 struct cgraph_edge *new;
782 new = cgraph_create_edge (n, e->callee, call_stmt,
783 e->count * count_scale / REG_BR_PROB_BASE,
784 e->loop_nest + loop_nest);
786 new->inline_failed = e->inline_failed;
787 if (update_original)
789 e->count -= new->count;
790 if (e->count < 0)
791 e->count = 0;
793 return new;
796 /* Create node representing clone of N executed COUNT times. Decrease
797 the execution counts from original node too.
799 When UPDATE_ORIGINAL is true, the counts are subtracted from the original
800 function's profile to reflect the fact that part of execution is handled
801 by node. */
802 struct cgraph_node *
803 cgraph_clone_node (struct cgraph_node *n, gcov_type count, int loop_nest,
804 bool update_original)
806 struct cgraph_node *new = cgraph_create_node ();
807 struct cgraph_edge *e;
808 gcov_type count_scale;
810 new->decl = n->decl;
811 new->origin = n->origin;
812 if (new->origin)
814 new->next_nested = new->origin->nested;
815 new->origin->nested = new;
817 new->analyzed = n->analyzed;
818 new->local = n->local;
819 new->global = n->global;
820 new->rtl = n->rtl;
821 new->master_clone = n->master_clone;
822 new->count = count;
823 if (n->count)
824 count_scale = new->count * REG_BR_PROB_BASE / n->count;
825 else
826 count_scale = 0;
827 if (update_original)
829 n->count -= count;
830 if (n->count < 0)
831 n->count = 0;
834 for (e = n->callees;e; e=e->next_callee)
835 cgraph_clone_edge (e, new, e->call_stmt, count_scale, loop_nest,
836 update_original);
838 new->next_clone = n->next_clone;
839 new->prev_clone = n;
840 n->next_clone = new;
841 if (new->next_clone)
842 new->next_clone->prev_clone = new;
844 return new;
847 /* Return true if N is an master_clone, (see cgraph_master_clone). */
849 bool
850 cgraph_is_master_clone (struct cgraph_node *n)
852 return (n == cgraph_master_clone (n));
855 struct cgraph_node *
856 cgraph_master_clone (struct cgraph_node *n)
858 enum availability avail = cgraph_function_body_availability (n);
860 if (avail == AVAIL_NOT_AVAILABLE || avail == AVAIL_OVERWRITABLE)
861 return NULL;
863 if (!n->master_clone)
864 n->master_clone = cgraph_node (n->decl);
866 return n->master_clone;
869 /* NODE is no longer nested function; update cgraph accordingly. */
870 void
871 cgraph_unnest_node (struct cgraph_node *node)
873 struct cgraph_node **node2 = &node->origin->nested;
874 gcc_assert (node->origin);
876 while (*node2 != node)
877 node2 = &(*node2)->next_nested;
878 *node2 = node->next_nested;
879 node->origin = NULL;
882 /* Return function availability. See cgraph.h for description of individual
883 return values. */
884 enum availability
885 cgraph_function_body_availability (struct cgraph_node *node)
887 enum availability avail;
888 gcc_assert (cgraph_function_flags_ready);
889 if (!node->analyzed)
890 avail = AVAIL_NOT_AVAILABLE;
891 else if (node->local.local)
892 avail = AVAIL_LOCAL;
893 else if (node->local.externally_visible)
894 avail = AVAIL_AVAILABLE;
896 /* If the function can be overwritten, return OVERWRITABLE. Take
897 care at least of two notable extensions - the COMDAT functions
898 used to share template instantiations in C++ (this is symmetric
899 to code cp_cannot_inline_tree_fn and probably shall be shared and
900 the inlinability hooks completely eliminated).
902 ??? Does the C++ one definition rule allow us to always return
903 AVAIL_AVAILABLE here? That would be good reason to preserve this
904 hook Similarly deal with extern inline functions - this is again
905 necessary to get C++ shared functions having keyed templates
906 right and in the C extension documentation we probably should
907 document the requirement of both versions of function (extern
908 inline and offline) having same side effect characteristics as
909 good optimization is what this optimization is about. */
911 else if (!(*targetm.binds_local_p) (node->decl)
912 && !DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl))
913 avail = AVAIL_OVERWRITABLE;
914 else avail = AVAIL_AVAILABLE;
916 return avail;
919 /* Add the function FNDECL to the call graph. FNDECL is assumed to be
920 in low GIMPLE form and ready to be processed by cgraph_finalize_function.
922 When operating in unit-at-a-time, a new callgraph node is added to
923 CGRAPH_EXPAND_QUEUE, which is processed after all the original
924 functions in the call graph .
926 When not in unit-at-a-time, the new callgraph node is added to
927 CGRAPH_NODES_QUEUE for cgraph_assemble_pending_functions to
928 process. */
930 void
931 cgraph_add_new_function (tree fndecl)
933 struct cgraph_node *n = cgraph_node (fndecl);
934 n->next_needed = cgraph_expand_queue;
935 cgraph_expand_queue = n;
938 #include "gt-cgraph.h"