Support AVX for cmpss/cmpsd.
[official-gcc.git] / gcc / lto-streamer.c
blob153d2a786083757ade8e4fce3b236ca7033407a8
1 /* Miscellaneous utilities for GIMPLE streaming. Things that are used
2 in both input and output are here.
4 Copyright 2009, 2010 Free Software Foundation, Inc.
5 Contributed by Doug Kwan <dougkwan@google.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28 #include "flags.h"
29 #include "tree.h"
30 #include "gimple.h"
31 #include "tree-flow.h"
32 #include "diagnostic.h"
33 #include "bitmap.h"
34 #include "vec.h"
35 #include "lto-streamer.h"
37 /* Statistics gathered during LTO, WPA and LTRANS. */
38 struct lto_stats_d lto_stats;
40 /* LTO uses bitmaps with different life-times. So use a seperate
41 obstack for all LTO bitmaps. */
42 static bitmap_obstack lto_obstack;
43 static bool lto_obstack_initialized;
46 /* Return a string representing LTO tag TAG. */
48 const char *
49 lto_tag_name (enum LTO_tags tag)
51 if (lto_tag_is_tree_code_p (tag))
53 /* For tags representing tree nodes, return the name of the
54 associated tree code. */
55 return tree_code_name[lto_tag_to_tree_code (tag)];
58 if (lto_tag_is_gimple_code_p (tag))
60 /* For tags representing gimple statements, return the name of
61 the associated gimple code. */
62 return gimple_code_name[lto_tag_to_gimple_code (tag)];
65 switch (tag)
67 case LTO_null:
68 return "LTO_null";
69 case LTO_bb0:
70 return "LTO_bb0";
71 case LTO_bb1:
72 return "LTO_bb1";
73 case LTO_eh_region:
74 return "LTO_eh_region";
75 case LTO_function:
76 return "LTO_function";
77 case LTO_eh_table:
78 return "LTO_eh_table";
79 case LTO_ert_cleanup:
80 return "LTO_ert_cleanup";
81 case LTO_ert_try:
82 return "LTO_ert_try";
83 case LTO_ert_allowed_exceptions:
84 return "LTO_ert_allowed_exceptions";
85 case LTO_ert_must_not_throw:
86 return "LTO_ert_must_not_throw";
87 case LTO_tree_pickle_reference:
88 return "LTO_tree_pickle_reference";
89 case LTO_field_decl_ref:
90 return "LTO_field_decl_ref";
91 case LTO_function_decl_ref:
92 return "LTO_function_decl_ref";
93 case LTO_label_decl_ref:
94 return "LTO_label_decl_ref";
95 case LTO_namespace_decl_ref:
96 return "LTO_namespace_decl_ref";
97 case LTO_result_decl_ref:
98 return "LTO_result_decl_ref";
99 case LTO_ssa_name_ref:
100 return "LTO_ssa_name_ref";
101 case LTO_type_decl_ref:
102 return "LTO_type_decl_ref";
103 case LTO_type_ref:
104 return "LTO_type_ref";
105 case LTO_global_decl_ref:
106 return "LTO_global_decl_ref";
107 default:
108 return "LTO_UNKNOWN";
113 /* Allocate a bitmap from heap. Initializes the LTO obstack if necessary. */
115 bitmap
116 lto_bitmap_alloc (void)
118 if (!lto_obstack_initialized)
120 bitmap_obstack_initialize (&lto_obstack);
121 lto_obstack_initialized = true;
123 return BITMAP_ALLOC (&lto_obstack);
126 /* Free bitmap B. */
128 void
129 lto_bitmap_free (bitmap b)
131 BITMAP_FREE (b);
135 /* Get a section name for a particular type or name. The NAME field
136 is only used if SECTION_TYPE is LTO_section_function_body or
137 LTO_static_initializer. For all others it is ignored. The callee
138 of this function is responcible to free the returned name. */
140 char *
141 lto_get_section_name (int section_type, const char *name)
143 switch (section_type)
145 case LTO_section_function_body:
146 gcc_assert (name != NULL);
147 if (name[0] == '*')
148 name++;
149 return concat (LTO_SECTION_NAME_PREFIX, name, NULL);
151 case LTO_section_static_initializer:
152 return concat (LTO_SECTION_NAME_PREFIX, ".statics", NULL);
154 case LTO_section_symtab:
155 return concat (LTO_SECTION_NAME_PREFIX, ".symtab", NULL);
157 case LTO_section_decls:
158 return concat (LTO_SECTION_NAME_PREFIX, ".decls", NULL);
160 case LTO_section_cgraph:
161 return concat (LTO_SECTION_NAME_PREFIX, ".cgraph", NULL);
163 case LTO_section_varpool:
164 return concat (LTO_SECTION_NAME_PREFIX, ".vars", NULL);
166 case LTO_section_refs:
167 return concat (LTO_SECTION_NAME_PREFIX, ".refs", NULL);
169 case LTO_section_jump_functions:
170 return concat (LTO_SECTION_NAME_PREFIX, ".jmpfuncs", NULL);
172 case LTO_section_ipa_pure_const:
173 return concat (LTO_SECTION_NAME_PREFIX, ".pureconst", NULL);
175 case LTO_section_ipa_reference:
176 return concat (LTO_SECTION_NAME_PREFIX, ".reference", NULL);
178 case LTO_section_opts:
179 return concat (LTO_SECTION_NAME_PREFIX, ".opts", NULL);
181 default:
182 internal_error ("bytecode stream: unexpected LTO section %s", name);
187 /* Show various memory usage statistics related to LTO. */
189 void
190 print_lto_report (void)
192 const char *s = (flag_lto) ? "LTO" : (flag_wpa) ? "WPA" : "LTRANS";
193 unsigned i;
195 fprintf (stderr, "%s statistics\n", s);
196 fprintf (stderr, "[%s] # of input files: "
197 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_files);
199 fprintf (stderr, "[%s] # of input cgraph nodes: "
200 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
201 lto_stats.num_input_cgraph_nodes);
203 fprintf (stderr, "[%s] # of function bodies: "
204 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
205 lto_stats.num_function_bodies);
207 fprintf (stderr, "[%s] ", s);
208 print_gimple_types_stats ();
210 for (i = 0; i < NUM_TREE_CODES; i++)
211 if (lto_stats.num_trees[i])
212 fprintf (stderr, "[%s] # of '%s' objects read: "
213 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
214 tree_code_name[i], lto_stats.num_trees[i]);
216 if (flag_lto)
218 fprintf (stderr, "[%s] Compression: "
219 HOST_WIDE_INT_PRINT_UNSIGNED " output bytes, "
220 HOST_WIDE_INT_PRINT_UNSIGNED " compressed bytes", s,
221 lto_stats.num_output_il_bytes,
222 lto_stats.num_compressed_il_bytes);
223 if (lto_stats.num_output_il_bytes > 0)
225 const float dividend = (float) lto_stats.num_compressed_il_bytes;
226 const float divisor = (float) lto_stats.num_output_il_bytes;
227 fprintf (stderr, " (ratio: %f)", dividend / divisor);
229 fprintf (stderr, "\n");
232 if (flag_wpa)
234 fprintf (stderr, "[%s] # of output files: "
235 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
236 lto_stats.num_output_files);
238 fprintf (stderr, "[%s] # of output cgraph nodes: "
239 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
240 lto_stats.num_output_cgraph_nodes);
242 fprintf (stderr, "[%s] # callgraph partitions: "
243 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
244 lto_stats.num_cgraph_partitions);
246 fprintf (stderr, "[%s] Compression: "
247 HOST_WIDE_INT_PRINT_UNSIGNED " input bytes, "
248 HOST_WIDE_INT_PRINT_UNSIGNED " uncompressed bytes", s,
249 lto_stats.num_input_il_bytes,
250 lto_stats.num_uncompressed_il_bytes);
251 if (lto_stats.num_input_il_bytes > 0)
253 const float dividend = (float) lto_stats.num_uncompressed_il_bytes;
254 const float divisor = (float) lto_stats.num_input_il_bytes;
255 fprintf (stderr, " (ratio: %f)", dividend / divisor);
257 fprintf (stderr, "\n");
260 for (i = 0; i < LTO_N_SECTION_TYPES; i++)
261 fprintf (stderr, "[%s] Size of mmap'd section %s: "
262 HOST_WIDE_INT_PRINT_UNSIGNED " bytes\n", s,
263 lto_section_name[i], lto_stats.section_size[i]);
267 /* Create a new bitpack. */
269 struct bitpack_d *
270 bitpack_create (void)
272 return XCNEW (struct bitpack_d);
276 /* Free the memory used by bitpack BP. */
278 void
279 bitpack_delete (struct bitpack_d *bp)
281 VEC_free (bitpack_word_t, heap, bp->values);
282 free (bp);
286 /* Return an index to the word in bitpack BP that contains the
287 next NBITS. */
289 static inline unsigned
290 bp_get_next_word (struct bitpack_d *bp, unsigned nbits)
292 unsigned last, ix;
294 /* In principle, the next word to use is determined by the
295 number of bits already processed in BP. */
296 ix = bp->num_bits / BITS_PER_BITPACK_WORD;
298 /* All the encoded bit patterns in BP are contiguous, therefore if
299 the next NBITS would straddle over two different words, move the
300 index to the next word and update the number of encoded bits
301 by adding up the hole of unused bits created by this move. */
302 bp->first_unused_bit %= BITS_PER_BITPACK_WORD;
303 last = bp->first_unused_bit + nbits - 1;
304 if (last >= BITS_PER_BITPACK_WORD)
306 ix++;
307 bp->num_bits += (BITS_PER_BITPACK_WORD - bp->first_unused_bit);
308 bp->first_unused_bit = 0;
311 return ix;
315 /* Pack NBITS of value VAL into bitpack BP. */
317 void
318 bp_pack_value (struct bitpack_d *bp, bitpack_word_t val, unsigned nbits)
320 unsigned ix;
321 bitpack_word_t word;
323 /* We cannot encode more bits than BITS_PER_BITPACK_WORD. */
324 gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
326 /* Compute which word will contain the next NBITS. */
327 ix = bp_get_next_word (bp, nbits);
328 if (ix >= VEC_length (bitpack_word_t, bp->values))
330 /* If there is no room left in the last word of the values
331 array, add a new word. Additionally, we should only
332 need to add a single word, since every pack operation cannot
333 use more bits than fit in a single word. */
334 gcc_assert (ix < VEC_length (bitpack_word_t, bp->values) + 1);
335 VEC_safe_push (bitpack_word_t, heap, bp->values, 0);
338 /* Grab the last word to pack VAL into. */
339 word = VEC_index (bitpack_word_t, bp->values, ix);
341 /* To fit VAL in WORD, we need to shift VAL to the left to
342 skip the bottom BP->FIRST_UNUSED_BIT bits. */
343 gcc_assert (BITS_PER_BITPACK_WORD >= bp->first_unused_bit + nbits);
344 val <<= bp->first_unused_bit;
346 /* Update WORD with VAL. */
347 word |= val;
349 /* Update BP. */
350 VEC_replace (bitpack_word_t, bp->values, ix, word);
351 bp->num_bits += nbits;
352 bp->first_unused_bit += nbits;
356 /* Unpack the next NBITS from bitpack BP. */
358 bitpack_word_t
359 bp_unpack_value (struct bitpack_d *bp, unsigned nbits)
361 bitpack_word_t val, word, mask;
362 unsigned ix;
364 /* We cannot decode more bits than BITS_PER_BITPACK_WORD. */
365 gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
367 /* Compute which word contains the next NBITS. */
368 ix = bp_get_next_word (bp, nbits);
369 word = VEC_index (bitpack_word_t, bp->values, ix);
371 /* Compute the mask to get NBITS from WORD. */
372 mask = (nbits == BITS_PER_BITPACK_WORD)
373 ? (bitpack_word_t) -1
374 : ((bitpack_word_t) 1 << nbits) - 1;
376 /* Shift WORD to the right to skip over the bits already decoded
377 in word. */
378 word >>= bp->first_unused_bit;
380 /* Apply the mask to obtain the requested value. */
381 val = word & mask;
383 /* Update BP->NUM_BITS for the next unpack operation. */
384 bp->num_bits += nbits;
385 bp->first_unused_bit += nbits;
387 return val;
391 /* Check that all the TS_* structures handled by the lto_output_* and
392 lto_input_* routines are exactly ALL the structures defined in
393 treestruct.def. */
395 static void
396 check_handled_ts_structures (void)
398 bool handled_p[LAST_TS_ENUM];
399 unsigned i;
401 memset (&handled_p, 0, sizeof (handled_p));
403 /* These are the TS_* structures that are either handled or
404 explicitly ignored by the streamer routines. */
405 handled_p[TS_BASE] = true;
406 handled_p[TS_COMMON] = true;
407 handled_p[TS_INT_CST] = true;
408 handled_p[TS_REAL_CST] = true;
409 handled_p[TS_FIXED_CST] = true;
410 handled_p[TS_VECTOR] = true;
411 handled_p[TS_STRING] = true;
412 handled_p[TS_COMPLEX] = true;
413 handled_p[TS_IDENTIFIER] = true;
414 handled_p[TS_DECL_MINIMAL] = true;
415 handled_p[TS_DECL_COMMON] = true;
416 handled_p[TS_DECL_WRTL] = true;
417 handled_p[TS_DECL_NON_COMMON] = true;
418 handled_p[TS_DECL_WITH_VIS] = true;
419 handled_p[TS_FIELD_DECL] = true;
420 handled_p[TS_VAR_DECL] = true;
421 handled_p[TS_PARM_DECL] = true;
422 handled_p[TS_LABEL_DECL] = true;
423 handled_p[TS_RESULT_DECL] = true;
424 handled_p[TS_CONST_DECL] = true;
425 handled_p[TS_TYPE_DECL] = true;
426 handled_p[TS_FUNCTION_DECL] = true;
427 handled_p[TS_TYPE] = true;
428 handled_p[TS_LIST] = true;
429 handled_p[TS_VEC] = true;
430 handled_p[TS_EXP] = true;
431 handled_p[TS_SSA_NAME] = true;
432 handled_p[TS_BLOCK] = true;
433 handled_p[TS_BINFO] = true;
434 handled_p[TS_STATEMENT_LIST] = true;
435 handled_p[TS_CONSTRUCTOR] = true;
436 handled_p[TS_OMP_CLAUSE] = true;
437 handled_p[TS_OPTIMIZATION] = true;
438 handled_p[TS_TARGET_OPTION] = true;
440 /* Anything not marked above will trigger the following assertion.
441 If this assertion triggers, it means that there is a new TS_*
442 structure that should be handled by the streamer. */
443 for (i = 0; i < LAST_TS_ENUM; i++)
444 gcc_assert (handled_p[i]);
448 /* Helper for lto_streamer_cache_insert_1. Add T to CACHE->NODES at
449 slot IX. Add OFFSET to CACHE->OFFSETS at slot IX. */
451 static void
452 lto_streamer_cache_add_to_node_array (struct lto_streamer_cache_d *cache,
453 int ix, tree t, unsigned offset)
455 gcc_assert (ix >= 0);
457 /* Grow the array of nodes and offsets to accomodate T at IX. */
458 if (ix >= (int) VEC_length (tree, cache->nodes))
460 size_t sz = ix + (20 + ix) / 4;
461 VEC_safe_grow_cleared (tree, heap, cache->nodes, sz);
462 VEC_safe_grow_cleared (unsigned, heap, cache->offsets, sz);
465 VEC_replace (tree, cache->nodes, ix, t);
466 VEC_replace (unsigned, cache->offsets, ix, offset);
470 /* Helper for lto_streamer_cache_insert and lto_streamer_cache_insert_at.
471 CACHE, T, IX_P and OFFSET_P are as in lto_streamer_cache_insert.
473 If INSERT_AT_NEXT_SLOT_P is true, T is inserted at the next available
474 slot in the cache. Otherwise, T is inserted at the position indicated
475 in *IX_P.
477 If T already existed in CACHE, return true. Otherwise,
478 return false. */
480 static bool
481 lto_streamer_cache_insert_1 (struct lto_streamer_cache_d *cache,
482 tree t, int *ix_p, unsigned *offset_p,
483 bool insert_at_next_slot_p)
485 void **slot;
486 struct tree_int_map d_entry, *entry;
487 int ix;
488 unsigned offset;
489 bool existed_p;
491 gcc_assert (t);
493 d_entry.base.from = t;
494 slot = htab_find_slot (cache->node_map, &d_entry, INSERT);
495 if (*slot == NULL)
497 /* Determine the next slot to use in the cache. */
498 if (insert_at_next_slot_p)
499 ix = cache->next_slot++;
500 else
501 ix = *ix_p;
503 entry = (struct tree_int_map *)pool_alloc (cache->node_map_entries);
504 entry->base.from = t;
505 entry->to = (unsigned) ix;
506 *slot = entry;
508 /* If no offset was given, store the invalid offset -1. */
509 offset = (offset_p) ? *offset_p : (unsigned) -1;
511 lto_streamer_cache_add_to_node_array (cache, ix, t, offset);
513 /* Indicate that the item was not present in the cache. */
514 existed_p = false;
516 else
518 entry = (struct tree_int_map *) *slot;
519 ix = (int) entry->to;
520 offset = VEC_index (unsigned, cache->offsets, ix);
522 if (!insert_at_next_slot_p && ix != *ix_p)
524 /* If the caller wants to insert T at a specific slot
525 location, and ENTRY->TO does not match *IX_P, add T to
526 the requested location slot. This situation arises when
527 streaming builtin functions.
529 For instance, on the writer side we could have two
530 FUNCTION_DECLS T1 and T2 that are represented by the same
531 builtin function. The reader will only instantiate the
532 canonical builtin, but since T1 and T2 had been
533 originally stored in different cache slots (S1 and S2),
534 the reader must be able to find the canonical builtin
535 function at slots S1 and S2. */
536 gcc_assert (lto_stream_as_builtin_p (t));
537 ix = *ix_p;
539 /* Since we are storing a builtin, the offset into the
540 stream is not necessary as we will not need to read
541 forward in the stream. */
542 lto_streamer_cache_add_to_node_array (cache, ix, t, -1);
545 /* Indicate that T was already in the cache. */
546 existed_p = true;
549 if (ix_p)
550 *ix_p = ix;
552 if (offset_p)
553 *offset_p = offset;
555 return existed_p;
559 /* Insert tree node T in CACHE. If T already existed in the cache
560 return true. Otherwise, return false.
562 If IX_P is non-null, update it with the index into the cache where
563 T has been stored.
565 *OFFSET_P represents the offset in the stream where T is physically
566 written out. The first time T is added to the cache, *OFFSET_P is
567 recorded in the cache together with T. But if T already existed
568 in the cache, *OFFSET_P is updated with the value that was recorded
569 the first time T was added to the cache.
571 If OFFSET_P is NULL, it is ignored. */
573 bool
574 lto_streamer_cache_insert (struct lto_streamer_cache_d *cache, tree t,
575 int *ix_p, unsigned *offset_p)
577 return lto_streamer_cache_insert_1 (cache, t, ix_p, offset_p, true);
581 /* Insert tree node T in CACHE at slot IX. If T already
582 existed in the cache return true. Otherwise, return false. */
584 bool
585 lto_streamer_cache_insert_at (struct lto_streamer_cache_d *cache,
586 tree t, int ix)
588 return lto_streamer_cache_insert_1 (cache, t, &ix, NULL, false);
592 /* Return true if tree node T exists in CACHE. If IX_P is
593 not NULL, write to *IX_P the index into the cache where T is stored
594 (-1 if T is not found). */
596 bool
597 lto_streamer_cache_lookup (struct lto_streamer_cache_d *cache, tree t,
598 int *ix_p)
600 void **slot;
601 struct tree_int_map d_slot;
602 bool retval;
603 int ix;
605 gcc_assert (t);
607 d_slot.base.from = t;
608 slot = htab_find_slot (cache->node_map, &d_slot, NO_INSERT);
609 if (slot == NULL)
611 retval = false;
612 ix = -1;
614 else
616 retval = true;
617 ix = (int) ((struct tree_int_map *) *slot)->to;
620 if (ix_p)
621 *ix_p = ix;
623 return retval;
627 /* Return the tree node at slot IX in CACHE. */
629 tree
630 lto_streamer_cache_get (struct lto_streamer_cache_d *cache, int ix)
632 gcc_assert (cache);
634 /* If the reader is requesting an index beyond the length of the
635 cache, it will need to read ahead. Return NULL_TREE to indicate
636 that. */
637 if ((unsigned) ix >= VEC_length (tree, cache->nodes))
638 return NULL_TREE;
640 return VEC_index (tree, cache->nodes, (unsigned) ix);
644 /* Record NODE in COMMON_NODES if it is not NULL and is not already in
645 SEEN_NODES. */
647 static void
648 lto_record_common_node (tree *nodep, VEC(tree, heap) **common_nodes,
649 struct pointer_set_t *seen_nodes)
651 tree node = *nodep;
653 if (node == NULL_TREE)
654 return;
656 if (TYPE_P (node))
657 *nodep = node = gimple_register_type (node);
659 /* Return if node is already seen. */
660 if (pointer_set_insert (seen_nodes, node))
661 return;
663 VEC_safe_push (tree, heap, *common_nodes, node);
665 if (tree_node_can_be_shared (node))
667 if (POINTER_TYPE_P (node)
668 || TREE_CODE (node) == COMPLEX_TYPE
669 || TREE_CODE (node) == ARRAY_TYPE)
670 lto_record_common_node (&TREE_TYPE (node), common_nodes, seen_nodes);
675 /* Generate a vector of common nodes and make sure they are merged
676 properly according to the the gimple type table. */
678 static VEC(tree,heap) *
679 lto_get_common_nodes (void)
681 unsigned i;
682 VEC(tree,heap) *common_nodes = NULL;
683 struct pointer_set_t *seen_nodes;
685 /* The MAIN_IDENTIFIER_NODE is normally set up by the front-end, but the
686 LTO back-end must agree. Currently, the only languages that set this
687 use the name "main". */
688 if (main_identifier_node)
690 const char *main_name = IDENTIFIER_POINTER (main_identifier_node);
691 gcc_assert (strcmp (main_name, "main") == 0);
693 else
694 main_identifier_node = get_identifier ("main");
696 gcc_assert (ptrdiff_type_node == integer_type_node);
698 /* FIXME lto. In the C++ front-end, fileptr_type_node is defined as a
699 variant copy of of ptr_type_node, rather than ptr_node itself. The
700 distinction should only be relevant to the front-end, so we always
701 use the C definition here in lto1.
703 These should be assured in pass_ipa_free_lang_data. */
704 gcc_assert (fileptr_type_node == ptr_type_node);
705 gcc_assert (TYPE_MAIN_VARIANT (fileptr_type_node) == ptr_type_node);
707 seen_nodes = pointer_set_create ();
709 /* Skip itk_char. char_type_node is shared with the appropriately
710 signed variant. */
711 for (i = itk_signed_char; i < itk_none; i++)
712 lto_record_common_node (&integer_types[i], &common_nodes, seen_nodes);
714 for (i = 0; i < TYPE_KIND_LAST; i++)
715 lto_record_common_node (&sizetype_tab[i], &common_nodes, seen_nodes);
717 for (i = 0; i < TI_MAX; i++)
718 lto_record_common_node (&global_trees[i], &common_nodes, seen_nodes);
720 pointer_set_destroy (seen_nodes);
722 return common_nodes;
726 /* Assign an index to tree node T and enter it in the streamer cache
727 CACHE. */
729 static void
730 preload_common_node (struct lto_streamer_cache_d *cache, tree t)
732 gcc_assert (t);
734 lto_streamer_cache_insert (cache, t, NULL, NULL);
736 /* The FIELD_DECLs of structures should be shared, so that every
737 COMPONENT_REF uses the same tree node when referencing a field.
738 Pointer equality between FIELD_DECLs is used by the alias
739 machinery to compute overlapping memory references (See
740 nonoverlapping_component_refs_p). */
741 if (TREE_CODE (t) == RECORD_TYPE)
743 tree f;
745 for (f = TYPE_FIELDS (t); f; f = TREE_CHAIN (f))
746 preload_common_node (cache, f);
751 /* Create a cache of pickled nodes. */
753 struct lto_streamer_cache_d *
754 lto_streamer_cache_create (void)
756 struct lto_streamer_cache_d *cache;
757 VEC(tree, heap) *common_nodes;
758 unsigned i;
759 tree node;
761 cache = XCNEW (struct lto_streamer_cache_d);
763 cache->node_map = htab_create (101, tree_int_map_hash, tree_int_map_eq, NULL);
765 cache->node_map_entries = create_alloc_pool ("node map",
766 sizeof (struct tree_int_map),
767 100);
769 /* Load all the well-known tree nodes that are always created by
770 the compiler on startup. This prevents writing them out
771 unnecessarily. */
772 common_nodes = lto_get_common_nodes ();
774 for (i = 0; VEC_iterate (tree, common_nodes, i, node); i++)
775 preload_common_node (cache, node);
777 VEC_free(tree, heap, common_nodes);
779 return cache;
783 /* Delete the streamer cache C. */
785 void
786 lto_streamer_cache_delete (struct lto_streamer_cache_d *c)
788 if (c == NULL)
789 return;
791 htab_delete (c->node_map);
792 free_alloc_pool (c->node_map_entries);
793 VEC_free (tree, heap, c->nodes);
794 VEC_free (unsigned, heap, c->offsets);
795 free (c);
799 #ifdef LTO_STREAMER_DEBUG
800 static htab_t tree_htab;
802 struct tree_hash_entry
804 tree key;
805 intptr_t value;
808 static hashval_t
809 hash_tree (const void *p)
811 const struct tree_hash_entry *e = (const struct tree_hash_entry *) p;
812 return htab_hash_pointer (e->key);
815 static int
816 eq_tree (const void *p1, const void *p2)
818 const struct tree_hash_entry *e1 = (const struct tree_hash_entry *) p1;
819 const struct tree_hash_entry *e2 = (const struct tree_hash_entry *) p2;
820 return (e1->key == e2->key);
822 #endif
824 /* Initialization common to the LTO reader and writer. */
826 void
827 lto_streamer_init (void)
829 /* Check that all the TS_* handled by the reader and writer routines
830 match exactly the structures defined in treestruct.def. When a
831 new TS_* astructure is added, the streamer should be updated to
832 handle it. */
833 check_handled_ts_structures ();
835 #ifdef LTO_STREAMER_DEBUG
836 tree_htab = htab_create (31, hash_tree, eq_tree, NULL);
837 #endif
841 /* Gate function for all LTO streaming passes. */
843 bool
844 gate_lto_out (void)
846 return ((flag_generate_lto || in_lto_p)
847 /* Don't bother doing anything if the program has errors. */
848 && !(errorcount || sorrycount));
852 #ifdef LTO_STREAMER_DEBUG
853 /* Add a mapping between T and ORIG_T, which is the numeric value of
854 the original address of T as it was seen by the LTO writer. This
855 mapping is useful when debugging streaming problems. A debugging
856 session can be started on both reader and writer using ORIG_T
857 as a breakpoint value in both sessions.
859 Note that this mapping is transient and only valid while T is
860 being reconstructed. Once T is fully built, the mapping is
861 removed. */
863 void
864 lto_orig_address_map (tree t, intptr_t orig_t)
866 struct tree_hash_entry ent;
867 struct tree_hash_entry **slot;
869 ent.key = t;
870 ent.value = orig_t;
871 slot
872 = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, INSERT);
873 gcc_assert (!*slot);
874 *slot = XNEW (struct tree_hash_entry);
875 **slot = ent;
879 /* Get the original address of T as it was seen by the writer. This
880 is only valid while T is being reconstructed. */
882 intptr_t
883 lto_orig_address_get (tree t)
885 struct tree_hash_entry ent;
886 struct tree_hash_entry **slot;
888 ent.key = t;
889 slot
890 = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, NO_INSERT);
891 return (slot ? (*slot)->value : 0);
895 /* Clear the mapping of T to its original address. */
897 void
898 lto_orig_address_remove (tree t)
900 struct tree_hash_entry ent;
901 struct tree_hash_entry **slot;
903 ent.key = t;
904 slot
905 = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, NO_INSERT);
906 gcc_assert (slot);
907 free (*slot);
908 htab_clear_slot (tree_htab, (PTR *)slot);
910 #endif
913 /* Check that the version MAJOR.MINOR is the correct version number. */
915 void
916 lto_check_version (int major, int minor)
918 if (major != LTO_major_version || minor != LTO_minor_version)
919 fatal_error ("bytecode stream generated with LTO version %d.%d instead "
920 "of the expected %d.%d",
921 major, minor,
922 LTO_major_version, LTO_minor_version);