bringing the front-end together
[official-gcc.git] / gcc / lto-streamer.c
blob561ecbc1a7edf7d085cb42912f68f9edf75fc229
1 /* Miscellaneous utilities for GIMPLE streaming. Things that are used
2 in both input and output are here.
4 Copyright 2009, 2010 Free Software Foundation, Inc.
5 Contributed by Doug Kwan <dougkwan@google.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28 #include "flags.h"
29 #include "tree.h"
30 #include "gimple.h"
31 #include "tree-flow.h"
32 #include "diagnostic-core.h"
33 #include "bitmap.h"
34 #include "vec.h"
35 #include "lto-streamer.h"
37 /* Statistics gathered during LTO, WPA and LTRANS. */
38 struct lto_stats_d lto_stats;
40 /* LTO uses bitmaps with different life-times. So use a seperate
41 obstack for all LTO bitmaps. */
42 static bitmap_obstack lto_obstack;
43 static bool lto_obstack_initialized;
46 /* Return a string representing LTO tag TAG. */
48 const char *
49 lto_tag_name (enum LTO_tags tag)
51 if (lto_tag_is_tree_code_p (tag))
53 /* For tags representing tree nodes, return the name of the
54 associated tree code. */
55 return tree_code_name[lto_tag_to_tree_code (tag)];
58 if (lto_tag_is_gimple_code_p (tag))
60 /* For tags representing gimple statements, return the name of
61 the associated gimple code. */
62 return gimple_code_name[lto_tag_to_gimple_code (tag)];
65 switch (tag)
67 case LTO_null:
68 return "LTO_null";
69 case LTO_bb0:
70 return "LTO_bb0";
71 case LTO_bb1:
72 return "LTO_bb1";
73 case LTO_eh_region:
74 return "LTO_eh_region";
75 case LTO_function:
76 return "LTO_function";
77 case LTO_eh_table:
78 return "LTO_eh_table";
79 case LTO_ert_cleanup:
80 return "LTO_ert_cleanup";
81 case LTO_ert_try:
82 return "LTO_ert_try";
83 case LTO_ert_allowed_exceptions:
84 return "LTO_ert_allowed_exceptions";
85 case LTO_ert_must_not_throw:
86 return "LTO_ert_must_not_throw";
87 case LTO_tree_pickle_reference:
88 return "LTO_tree_pickle_reference";
89 case LTO_field_decl_ref:
90 return "LTO_field_decl_ref";
91 case LTO_function_decl_ref:
92 return "LTO_function_decl_ref";
93 case LTO_label_decl_ref:
94 return "LTO_label_decl_ref";
95 case LTO_namespace_decl_ref:
96 return "LTO_namespace_decl_ref";
97 case LTO_result_decl_ref:
98 return "LTO_result_decl_ref";
99 case LTO_ssa_name_ref:
100 return "LTO_ssa_name_ref";
101 case LTO_type_decl_ref:
102 return "LTO_type_decl_ref";
103 case LTO_type_ref:
104 return "LTO_type_ref";
105 case LTO_global_decl_ref:
106 return "LTO_global_decl_ref";
107 default:
108 return "LTO_UNKNOWN";
113 /* Allocate a bitmap from heap. Initializes the LTO obstack if necessary. */
115 bitmap
116 lto_bitmap_alloc (void)
118 if (!lto_obstack_initialized)
120 bitmap_obstack_initialize (&lto_obstack);
121 lto_obstack_initialized = true;
123 return BITMAP_ALLOC (&lto_obstack);
126 /* Free bitmap B. */
128 void
129 lto_bitmap_free (bitmap b)
131 BITMAP_FREE (b);
135 /* Get a section name for a particular type or name. The NAME field
136 is only used if SECTION_TYPE is LTO_section_function_body or
137 LTO_static_initializer. For all others it is ignored. The callee
138 of this function is responcible to free the returned name. */
140 char *
141 lto_get_section_name (int section_type, const char *name)
143 switch (section_type)
145 case LTO_section_function_body:
146 gcc_assert (name != NULL);
147 if (name[0] == '*')
148 name++;
149 return concat (LTO_SECTION_NAME_PREFIX, name, NULL);
151 case LTO_section_static_initializer:
152 return concat (LTO_SECTION_NAME_PREFIX, ".statics", NULL);
154 case LTO_section_symtab:
155 return concat (LTO_SECTION_NAME_PREFIX, ".symtab", NULL);
157 case LTO_section_decls:
158 return concat (LTO_SECTION_NAME_PREFIX, ".decls", NULL);
160 case LTO_section_cgraph:
161 return concat (LTO_SECTION_NAME_PREFIX, ".cgraph", NULL);
163 case LTO_section_varpool:
164 return concat (LTO_SECTION_NAME_PREFIX, ".vars", NULL);
166 case LTO_section_refs:
167 return concat (LTO_SECTION_NAME_PREFIX, ".refs", NULL);
169 case LTO_section_jump_functions:
170 return concat (LTO_SECTION_NAME_PREFIX, ".jmpfuncs", NULL);
172 case LTO_section_ipa_pure_const:
173 return concat (LTO_SECTION_NAME_PREFIX, ".pureconst", NULL);
175 case LTO_section_ipa_reference:
176 return concat (LTO_SECTION_NAME_PREFIX, ".reference", NULL);
178 case LTO_section_opts:
179 return concat (LTO_SECTION_NAME_PREFIX, ".opts", NULL);
181 case LTO_section_cgraph_opt_sum:
182 return concat (LTO_SECTION_NAME_PREFIX, ".cgraphopt", NULL);
184 default:
185 internal_error ("bytecode stream: unexpected LTO section %s", name);
190 /* Show various memory usage statistics related to LTO. */
192 void
193 print_lto_report (void)
195 const char *s = (flag_lto) ? "LTO" : (flag_wpa) ? "WPA" : "LTRANS";
196 unsigned i;
198 fprintf (stderr, "%s statistics\n", s);
199 fprintf (stderr, "[%s] # of input files: "
200 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_files);
202 fprintf (stderr, "[%s] # of input cgraph nodes: "
203 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
204 lto_stats.num_input_cgraph_nodes);
206 fprintf (stderr, "[%s] # of function bodies: "
207 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
208 lto_stats.num_function_bodies);
210 fprintf (stderr, "[%s] ", s);
211 print_gimple_types_stats ();
213 for (i = 0; i < NUM_TREE_CODES; i++)
214 if (lto_stats.num_trees[i])
215 fprintf (stderr, "[%s] # of '%s' objects read: "
216 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
217 tree_code_name[i], lto_stats.num_trees[i]);
219 if (flag_lto)
221 fprintf (stderr, "[%s] Compression: "
222 HOST_WIDE_INT_PRINT_UNSIGNED " output bytes, "
223 HOST_WIDE_INT_PRINT_UNSIGNED " compressed bytes", s,
224 lto_stats.num_output_il_bytes,
225 lto_stats.num_compressed_il_bytes);
226 if (lto_stats.num_output_il_bytes > 0)
228 const float dividend = (float) lto_stats.num_compressed_il_bytes;
229 const float divisor = (float) lto_stats.num_output_il_bytes;
230 fprintf (stderr, " (ratio: %f)", dividend / divisor);
232 fprintf (stderr, "\n");
235 if (flag_wpa)
237 fprintf (stderr, "[%s] # of output files: "
238 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
239 lto_stats.num_output_files);
241 fprintf (stderr, "[%s] # of output cgraph nodes: "
242 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
243 lto_stats.num_output_cgraph_nodes);
245 fprintf (stderr, "[%s] # callgraph partitions: "
246 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
247 lto_stats.num_cgraph_partitions);
249 fprintf (stderr, "[%s] Compression: "
250 HOST_WIDE_INT_PRINT_UNSIGNED " input bytes, "
251 HOST_WIDE_INT_PRINT_UNSIGNED " uncompressed bytes", s,
252 lto_stats.num_input_il_bytes,
253 lto_stats.num_uncompressed_il_bytes);
254 if (lto_stats.num_input_il_bytes > 0)
256 const float dividend = (float) lto_stats.num_uncompressed_il_bytes;
257 const float divisor = (float) lto_stats.num_input_il_bytes;
258 fprintf (stderr, " (ratio: %f)", dividend / divisor);
260 fprintf (stderr, "\n");
263 for (i = 0; i < LTO_N_SECTION_TYPES; i++)
264 fprintf (stderr, "[%s] Size of mmap'd section %s: "
265 HOST_WIDE_INT_PRINT_UNSIGNED " bytes\n", s,
266 lto_section_name[i], lto_stats.section_size[i]);
270 /* Create a new bitpack. */
272 struct bitpack_d *
273 bitpack_create (void)
275 return XCNEW (struct bitpack_d);
279 /* Free the memory used by bitpack BP. */
281 void
282 bitpack_delete (struct bitpack_d *bp)
284 VEC_free (bitpack_word_t, heap, bp->values);
285 free (bp);
289 /* Return an index to the word in bitpack BP that contains the
290 next NBITS. */
292 static inline unsigned
293 bp_get_next_word (struct bitpack_d *bp, unsigned nbits)
295 unsigned last, ix;
297 /* In principle, the next word to use is determined by the
298 number of bits already processed in BP. */
299 ix = bp->num_bits / BITS_PER_BITPACK_WORD;
301 /* All the encoded bit patterns in BP are contiguous, therefore if
302 the next NBITS would straddle over two different words, move the
303 index to the next word and update the number of encoded bits
304 by adding up the hole of unused bits created by this move. */
305 bp->first_unused_bit %= BITS_PER_BITPACK_WORD;
306 last = bp->first_unused_bit + nbits - 1;
307 if (last >= BITS_PER_BITPACK_WORD)
309 ix++;
310 bp->num_bits += (BITS_PER_BITPACK_WORD - bp->first_unused_bit);
311 bp->first_unused_bit = 0;
314 return ix;
318 /* Pack NBITS of value VAL into bitpack BP. */
320 void
321 bp_pack_value (struct bitpack_d *bp, bitpack_word_t val, unsigned nbits)
323 unsigned ix;
324 bitpack_word_t word;
326 /* We cannot encode more bits than BITS_PER_BITPACK_WORD. */
327 gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
329 /* Compute which word will contain the next NBITS. */
330 ix = bp_get_next_word (bp, nbits);
331 if (ix >= VEC_length (bitpack_word_t, bp->values))
333 /* If there is no room left in the last word of the values
334 array, add a new word. Additionally, we should only
335 need to add a single word, since every pack operation cannot
336 use more bits than fit in a single word. */
337 gcc_assert (ix < VEC_length (bitpack_word_t, bp->values) + 1);
338 VEC_safe_push (bitpack_word_t, heap, bp->values, 0);
341 /* Grab the last word to pack VAL into. */
342 word = VEC_index (bitpack_word_t, bp->values, ix);
344 /* To fit VAL in WORD, we need to shift VAL to the left to
345 skip the bottom BP->FIRST_UNUSED_BIT bits. */
346 gcc_assert (BITS_PER_BITPACK_WORD >= bp->first_unused_bit + nbits);
347 val <<= bp->first_unused_bit;
349 /* Update WORD with VAL. */
350 word |= val;
352 /* Update BP. */
353 VEC_replace (bitpack_word_t, bp->values, ix, word);
354 bp->num_bits += nbits;
355 bp->first_unused_bit += nbits;
359 /* Unpack the next NBITS from bitpack BP. */
361 bitpack_word_t
362 bp_unpack_value (struct bitpack_d *bp, unsigned nbits)
364 bitpack_word_t val, word, mask;
365 unsigned ix;
367 /* We cannot decode more bits than BITS_PER_BITPACK_WORD. */
368 gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
370 /* Compute which word contains the next NBITS. */
371 ix = bp_get_next_word (bp, nbits);
372 word = VEC_index (bitpack_word_t, bp->values, ix);
374 /* Compute the mask to get NBITS from WORD. */
375 mask = (nbits == BITS_PER_BITPACK_WORD)
376 ? (bitpack_word_t) -1
377 : ((bitpack_word_t) 1 << nbits) - 1;
379 /* Shift WORD to the right to skip over the bits already decoded
380 in word. */
381 word >>= bp->first_unused_bit;
383 /* Apply the mask to obtain the requested value. */
384 val = word & mask;
386 /* Update BP->NUM_BITS for the next unpack operation. */
387 bp->num_bits += nbits;
388 bp->first_unused_bit += nbits;
390 return val;
394 /* Check that all the TS_* structures handled by the lto_output_* and
395 lto_input_* routines are exactly ALL the structures defined in
396 treestruct.def. */
398 static void
399 check_handled_ts_structures (void)
401 bool handled_p[LAST_TS_ENUM];
402 unsigned i;
404 memset (&handled_p, 0, sizeof (handled_p));
406 /* These are the TS_* structures that are either handled or
407 explicitly ignored by the streamer routines. */
408 handled_p[TS_BASE] = true;
409 handled_p[TS_COMMON] = true;
410 handled_p[TS_INT_CST] = true;
411 handled_p[TS_REAL_CST] = true;
412 handled_p[TS_FIXED_CST] = true;
413 handled_p[TS_VECTOR] = true;
414 handled_p[TS_STRING] = true;
415 handled_p[TS_COMPLEX] = true;
416 handled_p[TS_IDENTIFIER] = true;
417 handled_p[TS_DECL_MINIMAL] = true;
418 handled_p[TS_DECL_COMMON] = true;
419 handled_p[TS_DECL_WRTL] = true;
420 handled_p[TS_DECL_NON_COMMON] = true;
421 handled_p[TS_DECL_WITH_VIS] = true;
422 handled_p[TS_FIELD_DECL] = true;
423 handled_p[TS_VAR_DECL] = true;
424 handled_p[TS_PARM_DECL] = true;
425 handled_p[TS_LABEL_DECL] = true;
426 handled_p[TS_RESULT_DECL] = true;
427 handled_p[TS_CONST_DECL] = true;
428 handled_p[TS_TYPE_DECL] = true;
429 handled_p[TS_FUNCTION_DECL] = true;
430 handled_p[TS_TYPE] = true;
431 handled_p[TS_LIST] = true;
432 handled_p[TS_VEC] = true;
433 handled_p[TS_EXP] = true;
434 handled_p[TS_SSA_NAME] = true;
435 handled_p[TS_BLOCK] = true;
436 handled_p[TS_BINFO] = true;
437 handled_p[TS_STATEMENT_LIST] = true;
438 handled_p[TS_CONSTRUCTOR] = true;
439 handled_p[TS_OMP_CLAUSE] = true;
440 handled_p[TS_OPTIMIZATION] = true;
441 handled_p[TS_TARGET_OPTION] = true;
443 /* Anything not marked above will trigger the following assertion.
444 If this assertion triggers, it means that there is a new TS_*
445 structure that should be handled by the streamer. */
446 for (i = 0; i < LAST_TS_ENUM; i++)
447 gcc_assert (handled_p[i]);
451 /* Helper for lto_streamer_cache_insert_1. Add T to CACHE->NODES at
452 slot IX. Add OFFSET to CACHE->OFFSETS at slot IX. */
454 static void
455 lto_streamer_cache_add_to_node_array (struct lto_streamer_cache_d *cache,
456 int ix, tree t, unsigned offset)
458 gcc_assert (ix >= 0);
460 /* Grow the array of nodes and offsets to accomodate T at IX. */
461 if (ix >= (int) VEC_length (tree, cache->nodes))
463 size_t sz = ix + (20 + ix) / 4;
464 VEC_safe_grow_cleared (tree, heap, cache->nodes, sz);
465 VEC_safe_grow_cleared (unsigned, heap, cache->offsets, sz);
468 VEC_replace (tree, cache->nodes, ix, t);
469 VEC_replace (unsigned, cache->offsets, ix, offset);
473 /* Helper for lto_streamer_cache_insert and lto_streamer_cache_insert_at.
474 CACHE, T, IX_P and OFFSET_P are as in lto_streamer_cache_insert.
476 If INSERT_AT_NEXT_SLOT_P is true, T is inserted at the next available
477 slot in the cache. Otherwise, T is inserted at the position indicated
478 in *IX_P.
480 If T already existed in CACHE, return true. Otherwise,
481 return false. */
483 static bool
484 lto_streamer_cache_insert_1 (struct lto_streamer_cache_d *cache,
485 tree t, int *ix_p, unsigned *offset_p,
486 bool insert_at_next_slot_p)
488 void **slot;
489 struct tree_int_map d_entry, *entry;
490 int ix;
491 unsigned offset;
492 bool existed_p;
494 gcc_assert (t);
496 d_entry.base.from = t;
497 slot = htab_find_slot (cache->node_map, &d_entry, INSERT);
498 if (*slot == NULL)
500 /* Determine the next slot to use in the cache. */
501 if (insert_at_next_slot_p)
502 ix = cache->next_slot++;
503 else
504 ix = *ix_p;
506 entry = (struct tree_int_map *)pool_alloc (cache->node_map_entries);
507 entry->base.from = t;
508 entry->to = (unsigned) ix;
509 *slot = entry;
511 /* If no offset was given, store the invalid offset -1. */
512 offset = (offset_p) ? *offset_p : (unsigned) -1;
514 lto_streamer_cache_add_to_node_array (cache, ix, t, offset);
516 /* Indicate that the item was not present in the cache. */
517 existed_p = false;
519 else
521 entry = (struct tree_int_map *) *slot;
522 ix = (int) entry->to;
523 offset = VEC_index (unsigned, cache->offsets, ix);
525 if (!insert_at_next_slot_p && ix != *ix_p)
527 /* If the caller wants to insert T at a specific slot
528 location, and ENTRY->TO does not match *IX_P, add T to
529 the requested location slot. This situation arises when
530 streaming builtin functions.
532 For instance, on the writer side we could have two
533 FUNCTION_DECLS T1 and T2 that are represented by the same
534 builtin function. The reader will only instantiate the
535 canonical builtin, but since T1 and T2 had been
536 originally stored in different cache slots (S1 and S2),
537 the reader must be able to find the canonical builtin
538 function at slots S1 and S2. */
539 gcc_assert (lto_stream_as_builtin_p (t));
540 ix = *ix_p;
542 /* Since we are storing a builtin, the offset into the
543 stream is not necessary as we will not need to read
544 forward in the stream. */
545 lto_streamer_cache_add_to_node_array (cache, ix, t, -1);
548 /* Indicate that T was already in the cache. */
549 existed_p = true;
552 if (ix_p)
553 *ix_p = ix;
555 if (offset_p)
556 *offset_p = offset;
558 return existed_p;
562 /* Insert tree node T in CACHE. If T already existed in the cache
563 return true. Otherwise, return false.
565 If IX_P is non-null, update it with the index into the cache where
566 T has been stored.
568 *OFFSET_P represents the offset in the stream where T is physically
569 written out. The first time T is added to the cache, *OFFSET_P is
570 recorded in the cache together with T. But if T already existed
571 in the cache, *OFFSET_P is updated with the value that was recorded
572 the first time T was added to the cache.
574 If OFFSET_P is NULL, it is ignored. */
576 bool
577 lto_streamer_cache_insert (struct lto_streamer_cache_d *cache, tree t,
578 int *ix_p, unsigned *offset_p)
580 return lto_streamer_cache_insert_1 (cache, t, ix_p, offset_p, true);
584 /* Insert tree node T in CACHE at slot IX. If T already
585 existed in the cache return true. Otherwise, return false. */
587 bool
588 lto_streamer_cache_insert_at (struct lto_streamer_cache_d *cache,
589 tree t, int ix)
591 return lto_streamer_cache_insert_1 (cache, t, &ix, NULL, false);
595 /* Return true if tree node T exists in CACHE. If IX_P is
596 not NULL, write to *IX_P the index into the cache where T is stored
597 (-1 if T is not found). */
599 bool
600 lto_streamer_cache_lookup (struct lto_streamer_cache_d *cache, tree t,
601 int *ix_p)
603 void **slot;
604 struct tree_int_map d_slot;
605 bool retval;
606 int ix;
608 gcc_assert (t);
610 d_slot.base.from = t;
611 slot = htab_find_slot (cache->node_map, &d_slot, NO_INSERT);
612 if (slot == NULL)
614 retval = false;
615 ix = -1;
617 else
619 retval = true;
620 ix = (int) ((struct tree_int_map *) *slot)->to;
623 if (ix_p)
624 *ix_p = ix;
626 return retval;
630 /* Return the tree node at slot IX in CACHE. */
632 tree
633 lto_streamer_cache_get (struct lto_streamer_cache_d *cache, int ix)
635 gcc_assert (cache);
637 /* If the reader is requesting an index beyond the length of the
638 cache, it will need to read ahead. Return NULL_TREE to indicate
639 that. */
640 if ((unsigned) ix >= VEC_length (tree, cache->nodes))
641 return NULL_TREE;
643 return VEC_index (tree, cache->nodes, (unsigned) ix);
647 /* Record NODE in COMMON_NODES if it is not NULL and is not already in
648 SEEN_NODES. */
650 static void
651 lto_record_common_node (tree *nodep, VEC(tree, heap) **common_nodes,
652 struct pointer_set_t *seen_nodes)
654 tree node = *nodep;
656 if (node == NULL_TREE)
657 return;
659 if (TYPE_P (node))
660 *nodep = node = gimple_register_type (node);
662 /* Return if node is already seen. */
663 if (pointer_set_insert (seen_nodes, node))
664 return;
666 VEC_safe_push (tree, heap, *common_nodes, node);
668 if (tree_node_can_be_shared (node))
670 if (POINTER_TYPE_P (node)
671 || TREE_CODE (node) == COMPLEX_TYPE
672 || TREE_CODE (node) == ARRAY_TYPE)
673 lto_record_common_node (&TREE_TYPE (node), common_nodes, seen_nodes);
678 /* Generate a vector of common nodes and make sure they are merged
679 properly according to the the gimple type table. */
681 static VEC(tree,heap) *
682 lto_get_common_nodes (void)
684 unsigned i;
685 VEC(tree,heap) *common_nodes = NULL;
686 struct pointer_set_t *seen_nodes;
688 /* The MAIN_IDENTIFIER_NODE is normally set up by the front-end, but the
689 LTO back-end must agree. Currently, the only languages that set this
690 use the name "main". */
691 if (main_identifier_node)
693 const char *main_name = IDENTIFIER_POINTER (main_identifier_node);
694 gcc_assert (strcmp (main_name, "main") == 0);
696 else
697 main_identifier_node = get_identifier ("main");
699 gcc_assert (ptrdiff_type_node == integer_type_node);
701 /* FIXME lto. In the C++ front-end, fileptr_type_node is defined as a
702 variant copy of of ptr_type_node, rather than ptr_node itself. The
703 distinction should only be relevant to the front-end, so we always
704 use the C definition here in lto1.
706 These should be assured in pass_ipa_free_lang_data. */
707 gcc_assert (fileptr_type_node == ptr_type_node);
708 gcc_assert (TYPE_MAIN_VARIANT (fileptr_type_node) == ptr_type_node);
710 seen_nodes = pointer_set_create ();
712 /* Skip itk_char. char_type_node is shared with the appropriately
713 signed variant. */
714 for (i = itk_signed_char; i < itk_none; i++)
715 lto_record_common_node (&integer_types[i], &common_nodes, seen_nodes);
717 for (i = 0; i < TYPE_KIND_LAST; i++)
718 lto_record_common_node (&sizetype_tab[i], &common_nodes, seen_nodes);
720 for (i = 0; i < TI_MAX; i++)
721 lto_record_common_node (&global_trees[i], &common_nodes, seen_nodes);
723 pointer_set_destroy (seen_nodes);
725 return common_nodes;
729 /* Assign an index to tree node T and enter it in the streamer cache
730 CACHE. */
732 static void
733 preload_common_node (struct lto_streamer_cache_d *cache, tree t)
735 gcc_assert (t);
737 lto_streamer_cache_insert (cache, t, NULL, NULL);
739 /* The FIELD_DECLs of structures should be shared, so that every
740 COMPONENT_REF uses the same tree node when referencing a field.
741 Pointer equality between FIELD_DECLs is used by the alias
742 machinery to compute overlapping memory references (See
743 nonoverlapping_component_refs_p). */
744 if (TREE_CODE (t) == RECORD_TYPE)
746 tree f;
748 for (f = TYPE_FIELDS (t); f; f = TREE_CHAIN (f))
749 preload_common_node (cache, f);
754 /* Create a cache of pickled nodes. */
756 struct lto_streamer_cache_d *
757 lto_streamer_cache_create (void)
759 struct lto_streamer_cache_d *cache;
760 VEC(tree, heap) *common_nodes;
761 unsigned i;
762 tree node;
764 cache = XCNEW (struct lto_streamer_cache_d);
766 cache->node_map = htab_create (101, tree_int_map_hash, tree_int_map_eq, NULL);
768 cache->node_map_entries = create_alloc_pool ("node map",
769 sizeof (struct tree_int_map),
770 100);
772 /* Load all the well-known tree nodes that are always created by
773 the compiler on startup. This prevents writing them out
774 unnecessarily. */
775 common_nodes = lto_get_common_nodes ();
777 for (i = 0; VEC_iterate (tree, common_nodes, i, node); i++)
778 preload_common_node (cache, node);
780 VEC_free(tree, heap, common_nodes);
782 return cache;
786 /* Delete the streamer cache C. */
788 void
789 lto_streamer_cache_delete (struct lto_streamer_cache_d *c)
791 if (c == NULL)
792 return;
794 htab_delete (c->node_map);
795 free_alloc_pool (c->node_map_entries);
796 VEC_free (tree, heap, c->nodes);
797 VEC_free (unsigned, heap, c->offsets);
798 free (c);
802 #ifdef LTO_STREAMER_DEBUG
803 static htab_t tree_htab;
805 struct tree_hash_entry
807 tree key;
808 intptr_t value;
811 static hashval_t
812 hash_tree (const void *p)
814 const struct tree_hash_entry *e = (const struct tree_hash_entry *) p;
815 return htab_hash_pointer (e->key);
818 static int
819 eq_tree (const void *p1, const void *p2)
821 const struct tree_hash_entry *e1 = (const struct tree_hash_entry *) p1;
822 const struct tree_hash_entry *e2 = (const struct tree_hash_entry *) p2;
823 return (e1->key == e2->key);
825 #endif
827 /* Initialization common to the LTO reader and writer. */
829 void
830 lto_streamer_init (void)
832 /* Check that all the TS_* handled by the reader and writer routines
833 match exactly the structures defined in treestruct.def. When a
834 new TS_* astructure is added, the streamer should be updated to
835 handle it. */
836 check_handled_ts_structures ();
838 #ifdef LTO_STREAMER_DEBUG
839 tree_htab = htab_create (31, hash_tree, eq_tree, NULL);
840 #endif
844 /* Gate function for all LTO streaming passes. */
846 bool
847 gate_lto_out (void)
849 return ((flag_generate_lto || in_lto_p)
850 /* Don't bother doing anything if the program has errors. */
851 && !seen_error ());
855 #ifdef LTO_STREAMER_DEBUG
856 /* Add a mapping between T and ORIG_T, which is the numeric value of
857 the original address of T as it was seen by the LTO writer. This
858 mapping is useful when debugging streaming problems. A debugging
859 session can be started on both reader and writer using ORIG_T
860 as a breakpoint value in both sessions.
862 Note that this mapping is transient and only valid while T is
863 being reconstructed. Once T is fully built, the mapping is
864 removed. */
866 void
867 lto_orig_address_map (tree t, intptr_t orig_t)
869 struct tree_hash_entry ent;
870 struct tree_hash_entry **slot;
872 ent.key = t;
873 ent.value = orig_t;
874 slot
875 = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, INSERT);
876 gcc_assert (!*slot);
877 *slot = XNEW (struct tree_hash_entry);
878 **slot = ent;
882 /* Get the original address of T as it was seen by the writer. This
883 is only valid while T is being reconstructed. */
885 intptr_t
886 lto_orig_address_get (tree t)
888 struct tree_hash_entry ent;
889 struct tree_hash_entry **slot;
891 ent.key = t;
892 slot
893 = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, NO_INSERT);
894 return (slot ? (*slot)->value : 0);
898 /* Clear the mapping of T to its original address. */
900 void
901 lto_orig_address_remove (tree t)
903 struct tree_hash_entry ent;
904 struct tree_hash_entry **slot;
906 ent.key = t;
907 slot
908 = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, NO_INSERT);
909 gcc_assert (slot);
910 free (*slot);
911 htab_clear_slot (tree_htab, (PTR *)slot);
913 #endif
916 /* Check that the version MAJOR.MINOR is the correct version number. */
918 void
919 lto_check_version (int major, int minor)
921 if (major != LTO_major_version || minor != LTO_minor_version)
922 fatal_error ("bytecode stream generated with LTO version %d.%d instead "
923 "of the expected %d.%d",
924 major, minor,
925 LTO_major_version, LTO_minor_version);