Merge trunk version 217032 into gupc branch.
[official-gcc.git] / gcc / stor-layout.c
blobabdc5c171937253144184f19b45aaaed84c2c1d0
1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "stor-layout.h"
27 #include "stringpool.h"
28 #include "varasm.h"
29 #include "print-tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "hashtab.h"
34 #include "hash-set.h"
35 #include "vec.h"
36 #include "machmode.h"
37 #include "hard-reg-set.h"
38 #include "input.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "diagnostic-core.h"
42 #include "target.h"
43 #include "langhooks.h"
44 #include "regs.h"
45 #include "params.h"
46 #include "hash-map.h"
47 #include "is-a.h"
48 #include "plugin-api.h"
49 #include "ipa-ref.h"
50 #include "cgraph.h"
51 #include "tree-inline.h"
52 #include "tree-dump.h"
53 #include "gimplify.h"
55 /* Data type for the expressions representing sizes of data types.
56 It is the first integer type laid out. */
57 tree sizetype_tab[(int) stk_type_kind_last];
59 /* If nonzero, this is an upper limit on alignment of structure fields.
60 The value is measured in bits. */
61 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
63 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
64 in the address spaces' address_mode, not pointer_mode. Set only by
65 internal_reference_types called only by a front end. */
66 static int reference_types_internal = 0;
68 static tree self_referential_size (tree);
69 static void finalize_record_size (record_layout_info);
70 static void finalize_type_size (tree);
71 static void place_union_field (record_layout_info, tree);
72 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
73 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
74 HOST_WIDE_INT, tree);
75 #endif
76 extern void debug_rli (record_layout_info);
78 /* Show that REFERENCE_TYPES are internal and should use address_mode.
79 Called only by front end. */
81 void
82 internal_reference_types (void)
84 reference_types_internal = 1;
87 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
88 to serve as the actual size-expression for a type or decl. */
90 tree
91 variable_size (tree size)
93 /* Obviously. */
94 if (TREE_CONSTANT (size))
95 return size;
97 /* If the size is self-referential, we can't make a SAVE_EXPR (see
98 save_expr for the rationale). But we can do something else. */
99 if (CONTAINS_PLACEHOLDER_P (size))
100 return self_referential_size (size);
102 /* If we are in the global binding level, we can't make a SAVE_EXPR
103 since it may end up being shared across functions, so it is up
104 to the front-end to deal with this case. */
105 if (lang_hooks.decls.global_bindings_p ())
106 return size;
108 return save_expr (size);
111 /* An array of functions used for self-referential size computation. */
112 static GTY(()) vec<tree, va_gc> *size_functions;
114 /* Similar to copy_tree_r but do not copy component references involving
115 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
116 and substituted in substitute_in_expr. */
118 static tree
119 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
121 enum tree_code code = TREE_CODE (*tp);
123 /* Stop at types, decls, constants like copy_tree_r. */
124 if (TREE_CODE_CLASS (code) == tcc_type
125 || TREE_CODE_CLASS (code) == tcc_declaration
126 || TREE_CODE_CLASS (code) == tcc_constant)
128 *walk_subtrees = 0;
129 return NULL_TREE;
132 /* This is the pattern built in ada/make_aligning_type. */
133 else if (code == ADDR_EXPR
134 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
136 *walk_subtrees = 0;
137 return NULL_TREE;
140 /* Default case: the component reference. */
141 else if (code == COMPONENT_REF)
143 tree inner;
144 for (inner = TREE_OPERAND (*tp, 0);
145 REFERENCE_CLASS_P (inner);
146 inner = TREE_OPERAND (inner, 0))
149 if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
151 *walk_subtrees = 0;
152 return NULL_TREE;
156 /* We're not supposed to have them in self-referential size trees
157 because we wouldn't properly control when they are evaluated.
158 However, not creating superfluous SAVE_EXPRs requires accurate
159 tracking of readonly-ness all the way down to here, which we
160 cannot always guarantee in practice. So punt in this case. */
161 else if (code == SAVE_EXPR)
162 return error_mark_node;
164 else if (code == STATEMENT_LIST)
165 gcc_unreachable ();
167 return copy_tree_r (tp, walk_subtrees, data);
170 /* Given a SIZE expression that is self-referential, return an equivalent
171 expression to serve as the actual size expression for a type. */
173 static tree
174 self_referential_size (tree size)
176 static unsigned HOST_WIDE_INT fnno = 0;
177 vec<tree> self_refs = vNULL;
178 tree param_type_list = NULL, param_decl_list = NULL;
179 tree t, ref, return_type, fntype, fnname, fndecl;
180 unsigned int i;
181 char buf[128];
182 vec<tree, va_gc> *args = NULL;
184 /* Do not factor out simple operations. */
185 t = skip_simple_constant_arithmetic (size);
186 if (TREE_CODE (t) == CALL_EXPR)
187 return size;
189 /* Collect the list of self-references in the expression. */
190 find_placeholder_in_expr (size, &self_refs);
191 gcc_assert (self_refs.length () > 0);
193 /* Obtain a private copy of the expression. */
194 t = size;
195 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
196 return size;
197 size = t;
199 /* Build the parameter and argument lists in parallel; also
200 substitute the former for the latter in the expression. */
201 vec_alloc (args, self_refs.length ());
202 FOR_EACH_VEC_ELT (self_refs, i, ref)
204 tree subst, param_name, param_type, param_decl;
206 if (DECL_P (ref))
208 /* We shouldn't have true variables here. */
209 gcc_assert (TREE_READONLY (ref));
210 subst = ref;
212 /* This is the pattern built in ada/make_aligning_type. */
213 else if (TREE_CODE (ref) == ADDR_EXPR)
214 subst = ref;
215 /* Default case: the component reference. */
216 else
217 subst = TREE_OPERAND (ref, 1);
219 sprintf (buf, "p%d", i);
220 param_name = get_identifier (buf);
221 param_type = TREE_TYPE (ref);
222 param_decl
223 = build_decl (input_location, PARM_DECL, param_name, param_type);
224 DECL_ARG_TYPE (param_decl) = param_type;
225 DECL_ARTIFICIAL (param_decl) = 1;
226 TREE_READONLY (param_decl) = 1;
228 size = substitute_in_expr (size, subst, param_decl);
230 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
231 param_decl_list = chainon (param_decl, param_decl_list);
232 args->quick_push (ref);
235 self_refs.release ();
237 /* Append 'void' to indicate that the number of parameters is fixed. */
238 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
240 /* The 3 lists have been created in reverse order. */
241 param_type_list = nreverse (param_type_list);
242 param_decl_list = nreverse (param_decl_list);
244 /* Build the function type. */
245 return_type = TREE_TYPE (size);
246 fntype = build_function_type (return_type, param_type_list);
248 /* Build the function declaration. */
249 sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
250 fnname = get_file_function_name (buf);
251 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
252 for (t = param_decl_list; t; t = DECL_CHAIN (t))
253 DECL_CONTEXT (t) = fndecl;
254 DECL_ARGUMENTS (fndecl) = param_decl_list;
255 DECL_RESULT (fndecl)
256 = build_decl (input_location, RESULT_DECL, 0, return_type);
257 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
259 /* The function has been created by the compiler and we don't
260 want to emit debug info for it. */
261 DECL_ARTIFICIAL (fndecl) = 1;
262 DECL_IGNORED_P (fndecl) = 1;
264 /* It is supposed to be "const" and never throw. */
265 TREE_READONLY (fndecl) = 1;
266 TREE_NOTHROW (fndecl) = 1;
268 /* We want it to be inlined when this is deemed profitable, as
269 well as discarded if every call has been integrated. */
270 DECL_DECLARED_INLINE_P (fndecl) = 1;
272 /* It is made up of a unique return statement. */
273 DECL_INITIAL (fndecl) = make_node (BLOCK);
274 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
275 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
276 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
277 TREE_STATIC (fndecl) = 1;
279 /* Put it onto the list of size functions. */
280 vec_safe_push (size_functions, fndecl);
282 /* Replace the original expression with a call to the size function. */
283 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
286 /* Take, queue and compile all the size functions. It is essential that
287 the size functions be gimplified at the very end of the compilation
288 in order to guarantee transparent handling of self-referential sizes.
289 Otherwise the GENERIC inliner would not be able to inline them back
290 at each of their call sites, thus creating artificial non-constant
291 size expressions which would trigger nasty problems later on. */
293 void
294 finalize_size_functions (void)
296 unsigned int i;
297 tree fndecl;
299 for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++)
301 allocate_struct_function (fndecl, false);
302 set_cfun (NULL);
303 dump_function (TDI_original, fndecl);
304 gimplify_function_tree (fndecl);
305 dump_function (TDI_generic, fndecl);
306 cgraph_node::finalize_function (fndecl, false);
309 vec_free (size_functions);
312 /* Return the machine mode to use for a nonscalar of SIZE bits. The
313 mode must be in class MCLASS, and have exactly that many value bits;
314 it may have padding as well. If LIMIT is nonzero, modes of wider
315 than MAX_FIXED_MODE_SIZE will not be used. */
317 machine_mode
318 mode_for_size (unsigned int size, enum mode_class mclass, int limit)
320 machine_mode mode;
321 int i;
323 if (limit && size > MAX_FIXED_MODE_SIZE)
324 return BLKmode;
326 /* Get the first mode which has this size, in the specified class. */
327 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
328 mode = GET_MODE_WIDER_MODE (mode))
329 if (GET_MODE_PRECISION (mode) == size)
330 return mode;
332 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
333 for (i = 0; i < NUM_INT_N_ENTS; i ++)
334 if (int_n_data[i].bitsize == size
335 && int_n_enabled_p[i])
336 return int_n_data[i].m;
338 return BLKmode;
341 /* Similar, except passed a tree node. */
343 machine_mode
344 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
346 unsigned HOST_WIDE_INT uhwi;
347 unsigned int ui;
349 if (!tree_fits_uhwi_p (size))
350 return BLKmode;
351 uhwi = tree_to_uhwi (size);
352 ui = uhwi;
353 if (uhwi != ui)
354 return BLKmode;
355 return mode_for_size (ui, mclass, limit);
358 /* Similar, but never return BLKmode; return the narrowest mode that
359 contains at least the requested number of value bits. */
361 machine_mode
362 smallest_mode_for_size (unsigned int size, enum mode_class mclass)
364 machine_mode mode = VOIDmode;
365 int i;
367 /* Get the first mode which has at least this size, in the
368 specified class. */
369 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
370 mode = GET_MODE_WIDER_MODE (mode))
371 if (GET_MODE_PRECISION (mode) >= size)
372 break;
374 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
375 for (i = 0; i < NUM_INT_N_ENTS; i ++)
376 if (int_n_data[i].bitsize >= size
377 && int_n_data[i].bitsize < GET_MODE_PRECISION (mode)
378 && int_n_enabled_p[i])
379 mode = int_n_data[i].m;
381 if (mode == VOIDmode)
382 gcc_unreachable ();
384 return mode;
387 /* Find an integer mode of the exact same size, or BLKmode on failure. */
389 machine_mode
390 int_mode_for_mode (machine_mode mode)
392 switch (GET_MODE_CLASS (mode))
394 case MODE_INT:
395 case MODE_PARTIAL_INT:
396 break;
398 case MODE_COMPLEX_INT:
399 case MODE_COMPLEX_FLOAT:
400 case MODE_FLOAT:
401 case MODE_DECIMAL_FLOAT:
402 case MODE_VECTOR_INT:
403 case MODE_VECTOR_FLOAT:
404 case MODE_FRACT:
405 case MODE_ACCUM:
406 case MODE_UFRACT:
407 case MODE_UACCUM:
408 case MODE_VECTOR_FRACT:
409 case MODE_VECTOR_ACCUM:
410 case MODE_VECTOR_UFRACT:
411 case MODE_VECTOR_UACCUM:
412 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
413 break;
415 case MODE_RANDOM:
416 if (mode == BLKmode)
417 break;
419 /* ... fall through ... */
421 case MODE_CC:
422 default:
423 gcc_unreachable ();
426 return mode;
429 /* Find a mode that can be used for efficient bitwise operations on MODE.
430 Return BLKmode if no such mode exists. */
432 machine_mode
433 bitwise_mode_for_mode (machine_mode mode)
435 /* Quick exit if we already have a suitable mode. */
436 unsigned int bitsize = GET_MODE_BITSIZE (mode);
437 if (SCALAR_INT_MODE_P (mode) && bitsize <= MAX_FIXED_MODE_SIZE)
438 return mode;
440 /* Reuse the sanity checks from int_mode_for_mode. */
441 gcc_checking_assert ((int_mode_for_mode (mode), true));
443 /* Try to replace complex modes with complex modes. In general we
444 expect both components to be processed independently, so we only
445 care whether there is a register for the inner mode. */
446 if (COMPLEX_MODE_P (mode))
448 machine_mode trial = mode;
449 if (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT)
450 trial = mode_for_size (bitsize, MODE_COMPLEX_INT, false);
451 if (trial != BLKmode
452 && have_regs_of_mode[GET_MODE_INNER (trial)])
453 return trial;
456 /* Try to replace vector modes with vector modes. Also try using vector
457 modes if an integer mode would be too big. */
458 if (VECTOR_MODE_P (mode) || bitsize > MAX_FIXED_MODE_SIZE)
460 machine_mode trial = mode;
461 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
462 trial = mode_for_size (bitsize, MODE_VECTOR_INT, 0);
463 if (trial != BLKmode
464 && have_regs_of_mode[trial]
465 && targetm.vector_mode_supported_p (trial))
466 return trial;
469 /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */
470 return mode_for_size (bitsize, MODE_INT, true);
473 /* Find a type that can be used for efficient bitwise operations on MODE.
474 Return null if no such mode exists. */
476 tree
477 bitwise_type_for_mode (machine_mode mode)
479 mode = bitwise_mode_for_mode (mode);
480 if (mode == BLKmode)
481 return NULL_TREE;
483 unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode);
484 tree inner_type = build_nonstandard_integer_type (inner_size, true);
486 if (VECTOR_MODE_P (mode))
487 return build_vector_type_for_mode (inner_type, mode);
489 if (COMPLEX_MODE_P (mode))
490 return build_complex_type (inner_type);
492 gcc_checking_assert (GET_MODE_INNER (mode) == VOIDmode);
493 return inner_type;
496 /* Find a mode that is suitable for representing a vector with
497 NUNITS elements of mode INNERMODE. Returns BLKmode if there
498 is no suitable mode. */
500 machine_mode
501 mode_for_vector (machine_mode innermode, unsigned nunits)
503 machine_mode mode;
505 /* First, look for a supported vector type. */
506 if (SCALAR_FLOAT_MODE_P (innermode))
507 mode = MIN_MODE_VECTOR_FLOAT;
508 else if (SCALAR_FRACT_MODE_P (innermode))
509 mode = MIN_MODE_VECTOR_FRACT;
510 else if (SCALAR_UFRACT_MODE_P (innermode))
511 mode = MIN_MODE_VECTOR_UFRACT;
512 else if (SCALAR_ACCUM_MODE_P (innermode))
513 mode = MIN_MODE_VECTOR_ACCUM;
514 else if (SCALAR_UACCUM_MODE_P (innermode))
515 mode = MIN_MODE_VECTOR_UACCUM;
516 else
517 mode = MIN_MODE_VECTOR_INT;
519 /* Do not check vector_mode_supported_p here. We'll do that
520 later in vector_type_mode. */
521 for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
522 if (GET_MODE_NUNITS (mode) == nunits
523 && GET_MODE_INNER (mode) == innermode)
524 break;
526 /* For integers, try mapping it to a same-sized scalar mode. */
527 if (mode == VOIDmode
528 && GET_MODE_CLASS (innermode) == MODE_INT)
529 mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
530 MODE_INT, 0);
532 if (mode == VOIDmode
533 || (GET_MODE_CLASS (mode) == MODE_INT
534 && !have_regs_of_mode[mode]))
535 return BLKmode;
537 return mode;
540 /* Return the alignment of MODE. This will be bounded by 1 and
541 BIGGEST_ALIGNMENT. */
543 unsigned int
544 get_mode_alignment (machine_mode mode)
546 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
549 /* Return the precision of the mode, or for a complex or vector mode the
550 precision of the mode of its elements. */
552 unsigned int
553 element_precision (machine_mode mode)
555 if (COMPLEX_MODE_P (mode) || VECTOR_MODE_P (mode))
556 mode = GET_MODE_INNER (mode);
558 return GET_MODE_PRECISION (mode);
561 /* Return the natural mode of an array, given that it is SIZE bytes in
562 total and has elements of type ELEM_TYPE. */
564 static machine_mode
565 mode_for_array (tree elem_type, tree size)
567 tree elem_size;
568 unsigned HOST_WIDE_INT int_size, int_elem_size;
569 bool limit_p;
571 /* One-element arrays get the component type's mode. */
572 elem_size = TYPE_SIZE (elem_type);
573 if (simple_cst_equal (size, elem_size))
574 return TYPE_MODE (elem_type);
576 limit_p = true;
577 if (tree_fits_uhwi_p (size) && tree_fits_uhwi_p (elem_size))
579 int_size = tree_to_uhwi (size);
580 int_elem_size = tree_to_uhwi (elem_size);
581 if (int_elem_size > 0
582 && int_size % int_elem_size == 0
583 && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
584 int_size / int_elem_size))
585 limit_p = false;
587 return mode_for_size_tree (size, MODE_INT, limit_p);
590 /* Subroutine of layout_decl: Force alignment required for the data type.
591 But if the decl itself wants greater alignment, don't override that. */
593 static inline void
594 do_type_align (tree type, tree decl)
596 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
598 DECL_ALIGN (decl) = TYPE_ALIGN (type);
599 if (TREE_CODE (decl) == FIELD_DECL)
600 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
604 /* Set the size, mode and alignment of a ..._DECL node.
605 TYPE_DECL does need this for C++.
606 Note that LABEL_DECL and CONST_DECL nodes do not need this,
607 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
608 Don't call layout_decl for them.
610 KNOWN_ALIGN is the amount of alignment we can assume this
611 decl has with no special effort. It is relevant only for FIELD_DECLs
612 and depends on the previous fields.
613 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
614 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
615 the record will be aligned to suit. */
617 void
618 layout_decl (tree decl, unsigned int known_align)
620 tree type = TREE_TYPE (decl);
621 enum tree_code code = TREE_CODE (decl);
622 rtx rtl = NULL_RTX;
623 location_t loc = DECL_SOURCE_LOCATION (decl);
625 if (code == CONST_DECL)
626 return;
628 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
629 || code == TYPE_DECL ||code == FIELD_DECL);
631 rtl = DECL_RTL_IF_SET (decl);
633 if (type == error_mark_node)
634 type = void_type_node;
636 /* Usually the size and mode come from the data type without change,
637 however, the front-end may set the explicit width of the field, so its
638 size may not be the same as the size of its type. This happens with
639 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
640 also happens with other fields. For example, the C++ front-end creates
641 zero-sized fields corresponding to empty base classes, and depends on
642 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
643 size in bytes from the size in bits. If we have already set the mode,
644 don't set it again since we can be called twice for FIELD_DECLs. */
646 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
647 if (DECL_MODE (decl) == VOIDmode)
648 DECL_MODE (decl) = TYPE_MODE (type);
650 if (lang_hooks.decls.layout_decl_p (decl, type))
652 lang_hooks.decls.layout_decl (decl, type);
654 else if (DECL_SIZE (decl) == 0)
656 DECL_SIZE (decl) = TYPE_SIZE (type);
657 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
659 else if (DECL_SIZE_UNIT (decl) == 0)
660 DECL_SIZE_UNIT (decl)
661 = fold_convert_loc (loc, sizetype,
662 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
663 bitsize_unit_node));
665 if (code != FIELD_DECL)
666 /* For non-fields, update the alignment from the type. */
667 do_type_align (type, decl);
668 else
669 /* For fields, it's a bit more complicated... */
671 bool old_user_align = DECL_USER_ALIGN (decl);
672 bool zero_bitfield = false;
673 bool packed_p = DECL_PACKED (decl);
674 unsigned int mfa;
676 if (DECL_BIT_FIELD (decl))
678 DECL_BIT_FIELD_TYPE (decl) = type;
680 /* A zero-length bit-field affects the alignment of the next
681 field. In essence such bit-fields are not influenced by
682 any packing due to #pragma pack or attribute packed. */
683 if (integer_zerop (DECL_SIZE (decl))
684 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
686 zero_bitfield = true;
687 packed_p = false;
688 #ifdef PCC_BITFIELD_TYPE_MATTERS
689 if (PCC_BITFIELD_TYPE_MATTERS)
690 do_type_align (type, decl);
691 else
692 #endif
694 #ifdef EMPTY_FIELD_BOUNDARY
695 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
697 DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
698 DECL_USER_ALIGN (decl) = 0;
700 #endif
704 /* See if we can use an ordinary integer mode for a bit-field.
705 Conditions are: a fixed size that is correct for another mode,
706 occupying a complete byte or bytes on proper boundary. */
707 if (TYPE_SIZE (type) != 0
708 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
709 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
711 machine_mode xmode
712 = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
713 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
715 if (xmode != BLKmode
716 && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
717 && (known_align == 0 || known_align >= xalign))
719 DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
720 DECL_MODE (decl) = xmode;
721 DECL_BIT_FIELD (decl) = 0;
725 /* Turn off DECL_BIT_FIELD if we won't need it set. */
726 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
727 && known_align >= TYPE_ALIGN (type)
728 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
729 DECL_BIT_FIELD (decl) = 0;
731 else if (packed_p && DECL_USER_ALIGN (decl))
732 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
733 round up; we'll reduce it again below. We want packing to
734 supersede USER_ALIGN inherited from the type, but defer to
735 alignment explicitly specified on the field decl. */;
736 else
737 do_type_align (type, decl);
739 /* If the field is packed and not explicitly aligned, give it the
740 minimum alignment. Note that do_type_align may set
741 DECL_USER_ALIGN, so we need to check old_user_align instead. */
742 if (packed_p
743 && !old_user_align)
744 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
746 if (! packed_p && ! DECL_USER_ALIGN (decl))
748 /* Some targets (i.e. i386, VMS) limit struct field alignment
749 to a lower boundary than alignment of variables unless
750 it was overridden by attribute aligned. */
751 #ifdef BIGGEST_FIELD_ALIGNMENT
752 DECL_ALIGN (decl)
753 = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
754 #endif
755 #ifdef ADJUST_FIELD_ALIGN
756 DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
757 #endif
760 if (zero_bitfield)
761 mfa = initial_max_fld_align * BITS_PER_UNIT;
762 else
763 mfa = maximum_field_alignment;
764 /* Should this be controlled by DECL_USER_ALIGN, too? */
765 if (mfa != 0)
766 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
769 /* Evaluate nonconstant size only once, either now or as soon as safe. */
770 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
771 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
772 if (DECL_SIZE_UNIT (decl) != 0
773 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
774 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
776 /* If requested, warn about definitions of large data objects. */
777 if (warn_larger_than
778 && (code == VAR_DECL || code == PARM_DECL)
779 && ! DECL_EXTERNAL (decl))
781 tree size = DECL_SIZE_UNIT (decl);
783 if (size != 0 && TREE_CODE (size) == INTEGER_CST
784 && compare_tree_int (size, larger_than_size) > 0)
786 int size_as_int = TREE_INT_CST_LOW (size);
788 if (compare_tree_int (size, size_as_int) == 0)
789 warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
790 else
791 warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
792 decl, larger_than_size);
796 /* If the RTL was already set, update its mode and mem attributes. */
797 if (rtl)
799 PUT_MODE (rtl, DECL_MODE (decl));
800 SET_DECL_RTL (decl, 0);
801 set_mem_attributes (rtl, decl, 1);
802 SET_DECL_RTL (decl, rtl);
806 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
807 a previous call to layout_decl and calls it again. */
809 void
810 relayout_decl (tree decl)
812 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
813 DECL_MODE (decl) = VOIDmode;
814 if (!DECL_USER_ALIGN (decl))
815 DECL_ALIGN (decl) = 0;
816 SET_DECL_RTL (decl, 0);
818 layout_decl (decl, 0);
821 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
822 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
823 is to be passed to all other layout functions for this record. It is the
824 responsibility of the caller to call `free' for the storage returned.
825 Note that garbage collection is not permitted until we finish laying
826 out the record. */
828 record_layout_info
829 start_record_layout (tree t)
831 record_layout_info rli = XNEW (struct record_layout_info_s);
833 rli->t = t;
835 /* If the type has a minimum specified alignment (via an attribute
836 declaration, for example) use it -- otherwise, start with a
837 one-byte alignment. */
838 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
839 rli->unpacked_align = rli->record_align;
840 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
842 #ifdef STRUCTURE_SIZE_BOUNDARY
843 /* Packed structures don't need to have minimum size. */
844 if (! TYPE_PACKED (t))
846 unsigned tmp;
848 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
849 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
850 if (maximum_field_alignment != 0)
851 tmp = MIN (tmp, maximum_field_alignment);
852 rli->record_align = MAX (rli->record_align, tmp);
854 #endif
856 rli->offset = size_zero_node;
857 rli->bitpos = bitsize_zero_node;
858 rli->prev_field = 0;
859 rli->pending_statics = 0;
860 rli->packed_maybe_necessary = 0;
861 rli->remaining_in_alignment = 0;
863 return rli;
866 /* Return the combined bit position for the byte offset OFFSET and the
867 bit position BITPOS.
869 These functions operate on byte and bit positions present in FIELD_DECLs
870 and assume that these expressions result in no (intermediate) overflow.
871 This assumption is necessary to fold the expressions as much as possible,
872 so as to avoid creating artificially variable-sized types in languages
873 supporting variable-sized types like Ada. */
875 tree
876 bit_from_pos (tree offset, tree bitpos)
878 if (TREE_CODE (offset) == PLUS_EXPR)
879 offset = size_binop (PLUS_EXPR,
880 fold_convert (bitsizetype, TREE_OPERAND (offset, 0)),
881 fold_convert (bitsizetype, TREE_OPERAND (offset, 1)));
882 else
883 offset = fold_convert (bitsizetype, offset);
884 return size_binop (PLUS_EXPR, bitpos,
885 size_binop (MULT_EXPR, offset, bitsize_unit_node));
888 /* Return the combined truncated byte position for the byte offset OFFSET and
889 the bit position BITPOS. */
891 tree
892 byte_from_pos (tree offset, tree bitpos)
894 tree bytepos;
895 if (TREE_CODE (bitpos) == MULT_EXPR
896 && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
897 bytepos = TREE_OPERAND (bitpos, 0);
898 else
899 bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
900 return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
903 /* Split the bit position POS into a byte offset *POFFSET and a bit
904 position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */
906 void
907 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
908 tree pos)
910 tree toff_align = bitsize_int (off_align);
911 if (TREE_CODE (pos) == MULT_EXPR
912 && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
914 *poffset = size_binop (MULT_EXPR,
915 fold_convert (sizetype, TREE_OPERAND (pos, 0)),
916 size_int (off_align / BITS_PER_UNIT));
917 *pbitpos = bitsize_zero_node;
919 else
921 *poffset = size_binop (MULT_EXPR,
922 fold_convert (sizetype,
923 size_binop (FLOOR_DIV_EXPR, pos,
924 toff_align)),
925 size_int (off_align / BITS_PER_UNIT));
926 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
930 /* Given a pointer to bit and byte offsets and an offset alignment,
931 normalize the offsets so they are within the alignment. */
933 void
934 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
936 /* If the bit position is now larger than it should be, adjust it
937 downwards. */
938 if (compare_tree_int (*pbitpos, off_align) >= 0)
940 tree offset, bitpos;
941 pos_from_bit (&offset, &bitpos, off_align, *pbitpos);
942 *poffset = size_binop (PLUS_EXPR, *poffset, offset);
943 *pbitpos = bitpos;
947 /* Print debugging information about the information in RLI. */
949 DEBUG_FUNCTION void
950 debug_rli (record_layout_info rli)
952 print_node_brief (stderr, "type", rli->t, 0);
953 print_node_brief (stderr, "\noffset", rli->offset, 0);
954 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
956 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
957 rli->record_align, rli->unpacked_align,
958 rli->offset_align);
960 /* The ms_struct code is the only that uses this. */
961 if (targetm.ms_bitfield_layout_p (rli->t))
962 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
964 if (rli->packed_maybe_necessary)
965 fprintf (stderr, "packed may be necessary\n");
967 if (!vec_safe_is_empty (rli->pending_statics))
969 fprintf (stderr, "pending statics:\n");
970 debug_vec_tree (rli->pending_statics);
974 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
975 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
977 void
978 normalize_rli (record_layout_info rli)
980 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
983 /* Returns the size in bytes allocated so far. */
985 tree
986 rli_size_unit_so_far (record_layout_info rli)
988 return byte_from_pos (rli->offset, rli->bitpos);
991 /* Returns the size in bits allocated so far. */
993 tree
994 rli_size_so_far (record_layout_info rli)
996 return bit_from_pos (rli->offset, rli->bitpos);
999 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
1000 the next available location within the record is given by KNOWN_ALIGN.
1001 Update the variable alignment fields in RLI, and return the alignment
1002 to give the FIELD. */
1004 unsigned int
1005 update_alignment_for_field (record_layout_info rli, tree field,
1006 unsigned int known_align)
1008 /* The alignment required for FIELD. */
1009 unsigned int desired_align;
1010 /* The type of this field. */
1011 tree type = TREE_TYPE (field);
1012 /* True if the field was explicitly aligned by the user. */
1013 bool user_align;
1014 bool is_bitfield;
1016 /* Do not attempt to align an ERROR_MARK node */
1017 if (TREE_CODE (type) == ERROR_MARK)
1018 return 0;
1020 /* Lay out the field so we know what alignment it needs. */
1021 layout_decl (field, known_align);
1022 desired_align = DECL_ALIGN (field);
1023 user_align = DECL_USER_ALIGN (field);
1025 is_bitfield = (type != error_mark_node
1026 && DECL_BIT_FIELD_TYPE (field)
1027 && ! integer_zerop (TYPE_SIZE (type)));
1029 /* Record must have at least as much alignment as any field.
1030 Otherwise, the alignment of the field within the record is
1031 meaningless. */
1032 if (targetm.ms_bitfield_layout_p (rli->t))
1034 /* Here, the alignment of the underlying type of a bitfield can
1035 affect the alignment of a record; even a zero-sized field
1036 can do this. The alignment should be to the alignment of
1037 the type, except that for zero-size bitfields this only
1038 applies if there was an immediately prior, nonzero-size
1039 bitfield. (That's the way it is, experimentally.) */
1040 if ((!is_bitfield && !DECL_PACKED (field))
1041 || ((DECL_SIZE (field) == NULL_TREE
1042 || !integer_zerop (DECL_SIZE (field)))
1043 ? !DECL_PACKED (field)
1044 : (rli->prev_field
1045 && DECL_BIT_FIELD_TYPE (rli->prev_field)
1046 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
1048 unsigned int type_align = TYPE_ALIGN (type);
1049 type_align = MAX (type_align, desired_align);
1050 if (maximum_field_alignment != 0)
1051 type_align = MIN (type_align, maximum_field_alignment);
1052 rli->record_align = MAX (rli->record_align, type_align);
1053 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1056 #ifdef PCC_BITFIELD_TYPE_MATTERS
1057 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
1059 /* Named bit-fields cause the entire structure to have the
1060 alignment implied by their type. Some targets also apply the same
1061 rules to unnamed bitfields. */
1062 if (DECL_NAME (field) != 0
1063 || targetm.align_anon_bitfield ())
1065 unsigned int type_align = TYPE_ALIGN (type);
1067 #ifdef ADJUST_FIELD_ALIGN
1068 if (! TYPE_USER_ALIGN (type))
1069 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1070 #endif
1072 /* Targets might chose to handle unnamed and hence possibly
1073 zero-width bitfield. Those are not influenced by #pragmas
1074 or packed attributes. */
1075 if (integer_zerop (DECL_SIZE (field)))
1077 if (initial_max_fld_align)
1078 type_align = MIN (type_align,
1079 initial_max_fld_align * BITS_PER_UNIT);
1081 else if (maximum_field_alignment != 0)
1082 type_align = MIN (type_align, maximum_field_alignment);
1083 else if (DECL_PACKED (field))
1084 type_align = MIN (type_align, BITS_PER_UNIT);
1086 /* The alignment of the record is increased to the maximum
1087 of the current alignment, the alignment indicated on the
1088 field (i.e., the alignment specified by an __aligned__
1089 attribute), and the alignment indicated by the type of
1090 the field. */
1091 rli->record_align = MAX (rli->record_align, desired_align);
1092 rli->record_align = MAX (rli->record_align, type_align);
1094 if (warn_packed)
1095 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1096 user_align |= TYPE_USER_ALIGN (type);
1099 #endif
1100 else
1102 rli->record_align = MAX (rli->record_align, desired_align);
1103 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1106 TYPE_USER_ALIGN (rli->t) |= user_align;
1108 return desired_align;
1111 /* Called from place_field to handle unions. */
1113 static void
1114 place_union_field (record_layout_info rli, tree field)
1116 update_alignment_for_field (rli, field, /*known_align=*/0);
1118 DECL_FIELD_OFFSET (field) = size_zero_node;
1119 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1120 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1122 /* If this is an ERROR_MARK return *after* having set the
1123 field at the start of the union. This helps when parsing
1124 invalid fields. */
1125 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1126 return;
1128 /* We assume the union's size will be a multiple of a byte so we don't
1129 bother with BITPOS. */
1130 if (TREE_CODE (rli->t) == UNION_TYPE)
1131 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1132 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1133 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1134 DECL_SIZE_UNIT (field), rli->offset);
1137 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
1138 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1139 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1140 units of alignment than the underlying TYPE. */
1141 static int
1142 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1143 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1145 /* Note that the calculation of OFFSET might overflow; we calculate it so
1146 that we still get the right result as long as ALIGN is a power of two. */
1147 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1149 offset = offset % align;
1150 return ((offset + size + align - 1) / align
1151 > tree_to_uhwi (TYPE_SIZE (type)) / align);
1153 #endif
1155 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
1156 is a FIELD_DECL to be added after those fields already present in
1157 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1158 callers that desire that behavior must manually perform that step.) */
1160 void
1161 place_field (record_layout_info rli, tree field)
1163 /* The alignment required for FIELD. */
1164 unsigned int desired_align;
1165 /* The alignment FIELD would have if we just dropped it into the
1166 record as it presently stands. */
1167 unsigned int known_align;
1168 unsigned int actual_align;
1169 /* The type of this field. */
1170 tree type = TREE_TYPE (field);
1172 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1174 /* If FIELD is static, then treat it like a separate variable, not
1175 really like a structure field. If it is a FUNCTION_DECL, it's a
1176 method. In both cases, all we do is lay out the decl, and we do
1177 it *after* the record is laid out. */
1178 if (TREE_CODE (field) == VAR_DECL)
1180 vec_safe_push (rli->pending_statics, field);
1181 return;
1184 /* Enumerators and enum types which are local to this class need not
1185 be laid out. Likewise for initialized constant fields. */
1186 else if (TREE_CODE (field) != FIELD_DECL)
1187 return;
1189 /* Unions are laid out very differently than records, so split
1190 that code off to another function. */
1191 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1193 place_union_field (rli, field);
1194 return;
1197 else if (TREE_CODE (type) == ERROR_MARK)
1199 /* Place this field at the current allocation position, so we
1200 maintain monotonicity. */
1201 DECL_FIELD_OFFSET (field) = rli->offset;
1202 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1203 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1204 return;
1207 /* Work out the known alignment so far. Note that A & (-A) is the
1208 value of the least-significant bit in A that is one. */
1209 if (! integer_zerop (rli->bitpos))
1210 known_align = (tree_to_uhwi (rli->bitpos)
1211 & - tree_to_uhwi (rli->bitpos));
1212 else if (integer_zerop (rli->offset))
1213 known_align = 0;
1214 else if (tree_fits_uhwi_p (rli->offset))
1215 known_align = (BITS_PER_UNIT
1216 * (tree_to_uhwi (rli->offset)
1217 & - tree_to_uhwi (rli->offset)));
1218 else
1219 known_align = rli->offset_align;
1221 desired_align = update_alignment_for_field (rli, field, known_align);
1222 if (known_align == 0)
1223 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1225 if (warn_packed && DECL_PACKED (field))
1227 if (known_align >= TYPE_ALIGN (type))
1229 if (TYPE_ALIGN (type) > desired_align)
1231 if (STRICT_ALIGNMENT)
1232 warning (OPT_Wattributes, "packed attribute causes "
1233 "inefficient alignment for %q+D", field);
1234 /* Don't warn if DECL_PACKED was set by the type. */
1235 else if (!TYPE_PACKED (rli->t))
1236 warning (OPT_Wattributes, "packed attribute is "
1237 "unnecessary for %q+D", field);
1240 else
1241 rli->packed_maybe_necessary = 1;
1244 /* Does this field automatically have alignment it needs by virtue
1245 of the fields that precede it and the record's own alignment? */
1246 if (known_align < desired_align)
1248 /* No, we need to skip space before this field.
1249 Bump the cumulative size to multiple of field alignment. */
1251 if (!targetm.ms_bitfield_layout_p (rli->t)
1252 && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1253 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1255 /* If the alignment is still within offset_align, just align
1256 the bit position. */
1257 if (desired_align < rli->offset_align)
1258 rli->bitpos = round_up (rli->bitpos, desired_align);
1259 else
1261 /* First adjust OFFSET by the partial bits, then align. */
1262 rli->offset
1263 = size_binop (PLUS_EXPR, rli->offset,
1264 fold_convert (sizetype,
1265 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1266 bitsize_unit_node)));
1267 rli->bitpos = bitsize_zero_node;
1269 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1272 if (! TREE_CONSTANT (rli->offset))
1273 rli->offset_align = desired_align;
1274 if (targetm.ms_bitfield_layout_p (rli->t))
1275 rli->prev_field = NULL;
1278 /* Handle compatibility with PCC. Note that if the record has any
1279 variable-sized fields, we need not worry about compatibility. */
1280 #ifdef PCC_BITFIELD_TYPE_MATTERS
1281 if (PCC_BITFIELD_TYPE_MATTERS
1282 && ! targetm.ms_bitfield_layout_p (rli->t)
1283 && TREE_CODE (field) == FIELD_DECL
1284 && type != error_mark_node
1285 && DECL_BIT_FIELD (field)
1286 && (! DECL_PACKED (field)
1287 /* Enter for these packed fields only to issue a warning. */
1288 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1289 && maximum_field_alignment == 0
1290 && ! integer_zerop (DECL_SIZE (field))
1291 && tree_fits_uhwi_p (DECL_SIZE (field))
1292 && tree_fits_uhwi_p (rli->offset)
1293 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1295 unsigned int type_align = TYPE_ALIGN (type);
1296 tree dsize = DECL_SIZE (field);
1297 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1298 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1299 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1301 #ifdef ADJUST_FIELD_ALIGN
1302 if (! TYPE_USER_ALIGN (type))
1303 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1304 #endif
1306 /* A bit field may not span more units of alignment of its type
1307 than its type itself. Advance to next boundary if necessary. */
1308 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1310 if (DECL_PACKED (field))
1312 if (warn_packed_bitfield_compat == 1)
1313 inform
1314 (input_location,
1315 "offset of packed bit-field %qD has changed in GCC 4.4",
1316 field);
1318 else
1319 rli->bitpos = round_up (rli->bitpos, type_align);
1322 if (! DECL_PACKED (field))
1323 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1325 #endif
1327 #ifdef BITFIELD_NBYTES_LIMITED
1328 if (BITFIELD_NBYTES_LIMITED
1329 && ! targetm.ms_bitfield_layout_p (rli->t)
1330 && TREE_CODE (field) == FIELD_DECL
1331 && type != error_mark_node
1332 && DECL_BIT_FIELD_TYPE (field)
1333 && ! DECL_PACKED (field)
1334 && ! integer_zerop (DECL_SIZE (field))
1335 && tree_fits_uhwi_p (DECL_SIZE (field))
1336 && tree_fits_uhwi_p (rli->offset)
1337 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1339 unsigned int type_align = TYPE_ALIGN (type);
1340 tree dsize = DECL_SIZE (field);
1341 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1342 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1343 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1345 #ifdef ADJUST_FIELD_ALIGN
1346 if (! TYPE_USER_ALIGN (type))
1347 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1348 #endif
1350 if (maximum_field_alignment != 0)
1351 type_align = MIN (type_align, maximum_field_alignment);
1352 /* ??? This test is opposite the test in the containing if
1353 statement, so this code is unreachable currently. */
1354 else if (DECL_PACKED (field))
1355 type_align = MIN (type_align, BITS_PER_UNIT);
1357 /* A bit field may not span the unit of alignment of its type.
1358 Advance to next boundary if necessary. */
1359 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1360 rli->bitpos = round_up (rli->bitpos, type_align);
1362 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1364 #endif
1366 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1367 A subtlety:
1368 When a bit field is inserted into a packed record, the whole
1369 size of the underlying type is used by one or more same-size
1370 adjacent bitfields. (That is, if its long:3, 32 bits is
1371 used in the record, and any additional adjacent long bitfields are
1372 packed into the same chunk of 32 bits. However, if the size
1373 changes, a new field of that size is allocated.) In an unpacked
1374 record, this is the same as using alignment, but not equivalent
1375 when packing.
1377 Note: for compatibility, we use the type size, not the type alignment
1378 to determine alignment, since that matches the documentation */
1380 if (targetm.ms_bitfield_layout_p (rli->t))
1382 tree prev_saved = rli->prev_field;
1383 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1385 /* This is a bitfield if it exists. */
1386 if (rli->prev_field)
1388 /* If both are bitfields, nonzero, and the same size, this is
1389 the middle of a run. Zero declared size fields are special
1390 and handled as "end of run". (Note: it's nonzero declared
1391 size, but equal type sizes!) (Since we know that both
1392 the current and previous fields are bitfields by the
1393 time we check it, DECL_SIZE must be present for both.) */
1394 if (DECL_BIT_FIELD_TYPE (field)
1395 && !integer_zerop (DECL_SIZE (field))
1396 && !integer_zerop (DECL_SIZE (rli->prev_field))
1397 && tree_fits_shwi_p (DECL_SIZE (rli->prev_field))
1398 && tree_fits_uhwi_p (TYPE_SIZE (type))
1399 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1401 /* We're in the middle of a run of equal type size fields; make
1402 sure we realign if we run out of bits. (Not decl size,
1403 type size!) */
1404 HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field));
1406 if (rli->remaining_in_alignment < bitsize)
1408 HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type));
1410 /* out of bits; bump up to next 'word'. */
1411 rli->bitpos
1412 = size_binop (PLUS_EXPR, rli->bitpos,
1413 bitsize_int (rli->remaining_in_alignment));
1414 rli->prev_field = field;
1415 if (typesize < bitsize)
1416 rli->remaining_in_alignment = 0;
1417 else
1418 rli->remaining_in_alignment = typesize - bitsize;
1420 else
1421 rli->remaining_in_alignment -= bitsize;
1423 else
1425 /* End of a run: if leaving a run of bitfields of the same type
1426 size, we have to "use up" the rest of the bits of the type
1427 size.
1429 Compute the new position as the sum of the size for the prior
1430 type and where we first started working on that type.
1431 Note: since the beginning of the field was aligned then
1432 of course the end will be too. No round needed. */
1434 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1436 rli->bitpos
1437 = size_binop (PLUS_EXPR, rli->bitpos,
1438 bitsize_int (rli->remaining_in_alignment));
1440 else
1441 /* We "use up" size zero fields; the code below should behave
1442 as if the prior field was not a bitfield. */
1443 prev_saved = NULL;
1445 /* Cause a new bitfield to be captured, either this time (if
1446 currently a bitfield) or next time we see one. */
1447 if (!DECL_BIT_FIELD_TYPE (field)
1448 || integer_zerop (DECL_SIZE (field)))
1449 rli->prev_field = NULL;
1452 normalize_rli (rli);
1455 /* If we're starting a new run of same type size bitfields
1456 (or a run of non-bitfields), set up the "first of the run"
1457 fields.
1459 That is, if the current field is not a bitfield, or if there
1460 was a prior bitfield the type sizes differ, or if there wasn't
1461 a prior bitfield the size of the current field is nonzero.
1463 Note: we must be sure to test ONLY the type size if there was
1464 a prior bitfield and ONLY for the current field being zero if
1465 there wasn't. */
1467 if (!DECL_BIT_FIELD_TYPE (field)
1468 || (prev_saved != NULL
1469 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1470 : !integer_zerop (DECL_SIZE (field)) ))
1472 /* Never smaller than a byte for compatibility. */
1473 unsigned int type_align = BITS_PER_UNIT;
1475 /* (When not a bitfield), we could be seeing a flex array (with
1476 no DECL_SIZE). Since we won't be using remaining_in_alignment
1477 until we see a bitfield (and come by here again) we just skip
1478 calculating it. */
1479 if (DECL_SIZE (field) != NULL
1480 && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field)))
1481 && tree_fits_uhwi_p (DECL_SIZE (field)))
1483 unsigned HOST_WIDE_INT bitsize
1484 = tree_to_uhwi (DECL_SIZE (field));
1485 unsigned HOST_WIDE_INT typesize
1486 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)));
1488 if (typesize < bitsize)
1489 rli->remaining_in_alignment = 0;
1490 else
1491 rli->remaining_in_alignment = typesize - bitsize;
1494 /* Now align (conventionally) for the new type. */
1495 type_align = TYPE_ALIGN (TREE_TYPE (field));
1497 if (maximum_field_alignment != 0)
1498 type_align = MIN (type_align, maximum_field_alignment);
1500 rli->bitpos = round_up (rli->bitpos, type_align);
1502 /* If we really aligned, don't allow subsequent bitfields
1503 to undo that. */
1504 rli->prev_field = NULL;
1508 /* Offset so far becomes the position of this field after normalizing. */
1509 normalize_rli (rli);
1510 DECL_FIELD_OFFSET (field) = rli->offset;
1511 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1512 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1514 /* Evaluate nonconstant offsets only once, either now or as soon as safe. */
1515 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST)
1516 DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field));
1518 /* If this field ended up more aligned than we thought it would be (we
1519 approximate this by seeing if its position changed), lay out the field
1520 again; perhaps we can use an integral mode for it now. */
1521 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1522 actual_align = (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
1523 & - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
1524 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1525 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1526 else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
1527 actual_align = (BITS_PER_UNIT
1528 * (tree_to_uhwi (DECL_FIELD_OFFSET (field))
1529 & - tree_to_uhwi (DECL_FIELD_OFFSET (field))));
1530 else
1531 actual_align = DECL_OFFSET_ALIGN (field);
1532 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1533 store / extract bit field operations will check the alignment of the
1534 record against the mode of bit fields. */
1536 if (known_align != actual_align)
1537 layout_decl (field, actual_align);
1539 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1540 rli->prev_field = field;
1542 /* Now add size of this field to the size of the record. If the size is
1543 not constant, treat the field as being a multiple of bytes and just
1544 adjust the offset, resetting the bit position. Otherwise, apportion the
1545 size amongst the bit position and offset. First handle the case of an
1546 unspecified size, which can happen when we have an invalid nested struct
1547 definition, such as struct j { struct j { int i; } }. The error message
1548 is printed in finish_struct. */
1549 if (DECL_SIZE (field) == 0)
1550 /* Do nothing. */;
1551 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1552 || TREE_OVERFLOW (DECL_SIZE (field)))
1554 rli->offset
1555 = size_binop (PLUS_EXPR, rli->offset,
1556 fold_convert (sizetype,
1557 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1558 bitsize_unit_node)));
1559 rli->offset
1560 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1561 rli->bitpos = bitsize_zero_node;
1562 rli->offset_align = MIN (rli->offset_align, desired_align);
1564 else if (targetm.ms_bitfield_layout_p (rli->t))
1566 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1568 /* If we ended a bitfield before the full length of the type then
1569 pad the struct out to the full length of the last type. */
1570 if ((DECL_CHAIN (field) == NULL
1571 || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
1572 && DECL_BIT_FIELD_TYPE (field)
1573 && !integer_zerop (DECL_SIZE (field)))
1574 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1575 bitsize_int (rli->remaining_in_alignment));
1577 normalize_rli (rli);
1579 else
1581 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1582 normalize_rli (rli);
1586 /* Assuming that all the fields have been laid out, this function uses
1587 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1588 indicated by RLI. */
1590 static void
1591 finalize_record_size (record_layout_info rli)
1593 tree unpadded_size, unpadded_size_unit;
1595 /* Now we want just byte and bit offsets, so set the offset alignment
1596 to be a byte and then normalize. */
1597 rli->offset_align = BITS_PER_UNIT;
1598 normalize_rli (rli);
1600 /* Determine the desired alignment. */
1601 #ifdef ROUND_TYPE_ALIGN
1602 TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1603 rli->record_align);
1604 #else
1605 TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1606 #endif
1608 /* Compute the size so far. Be sure to allow for extra bits in the
1609 size in bytes. We have guaranteed above that it will be no more
1610 than a single byte. */
1611 unpadded_size = rli_size_so_far (rli);
1612 unpadded_size_unit = rli_size_unit_so_far (rli);
1613 if (! integer_zerop (rli->bitpos))
1614 unpadded_size_unit
1615 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1617 if (TREE_CODE (unpadded_size_unit) == INTEGER_CST
1618 && !TREE_OVERFLOW (unpadded_size_unit)
1619 && !valid_constant_size_p (unpadded_size_unit))
1620 error ("type %qT is too large", rli->t);
1622 /* Round the size up to be a multiple of the required alignment. */
1623 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1624 TYPE_SIZE_UNIT (rli->t)
1625 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1627 if (TREE_CONSTANT (unpadded_size)
1628 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1629 && input_location != BUILTINS_LOCATION)
1630 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1632 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1633 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1634 && TREE_CONSTANT (unpadded_size))
1636 tree unpacked_size;
1638 #ifdef ROUND_TYPE_ALIGN
1639 rli->unpacked_align
1640 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1641 #else
1642 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1643 #endif
1645 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1646 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1648 if (TYPE_NAME (rli->t))
1650 tree name;
1652 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1653 name = TYPE_NAME (rli->t);
1654 else
1655 name = DECL_NAME (TYPE_NAME (rli->t));
1657 if (STRICT_ALIGNMENT)
1658 warning (OPT_Wpacked, "packed attribute causes inefficient "
1659 "alignment for %qE", name);
1660 else
1661 warning (OPT_Wpacked,
1662 "packed attribute is unnecessary for %qE", name);
1664 else
1666 if (STRICT_ALIGNMENT)
1667 warning (OPT_Wpacked,
1668 "packed attribute causes inefficient alignment");
1669 else
1670 warning (OPT_Wpacked, "packed attribute is unnecessary");
1676 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1678 void
1679 compute_record_mode (tree type)
1681 tree field;
1682 machine_mode mode = VOIDmode;
1684 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1685 However, if possible, we use a mode that fits in a register
1686 instead, in order to allow for better optimization down the
1687 line. */
1688 SET_TYPE_MODE (type, BLKmode);
1690 if (! tree_fits_uhwi_p (TYPE_SIZE (type)))
1691 return;
1693 /* A record which has any BLKmode members must itself be
1694 BLKmode; it can't go in a register. Unless the member is
1695 BLKmode only because it isn't aligned. */
1696 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1698 if (TREE_CODE (field) != FIELD_DECL)
1699 continue;
1701 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1702 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1703 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1704 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1705 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1706 || ! tree_fits_uhwi_p (bit_position (field))
1707 || DECL_SIZE (field) == 0
1708 || ! tree_fits_uhwi_p (DECL_SIZE (field)))
1709 return;
1711 /* If this field is the whole struct, remember its mode so
1712 that, say, we can put a double in a class into a DF
1713 register instead of forcing it to live in the stack. */
1714 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1715 mode = DECL_MODE (field);
1717 /* With some targets, it is sub-optimal to access an aligned
1718 BLKmode structure as a scalar. */
1719 if (targetm.member_type_forces_blk (field, mode))
1720 return;
1723 /* If we only have one real field; use its mode if that mode's size
1724 matches the type's size. This only applies to RECORD_TYPE. This
1725 does not apply to unions. */
1726 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1727 && tree_fits_uhwi_p (TYPE_SIZE (type))
1728 && GET_MODE_BITSIZE (mode) == tree_to_uhwi (TYPE_SIZE (type)))
1729 SET_TYPE_MODE (type, mode);
1730 else
1731 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1733 /* If structure's known alignment is less than what the scalar
1734 mode would need, and it matters, then stick with BLKmode. */
1735 if (TYPE_MODE (type) != BLKmode
1736 && STRICT_ALIGNMENT
1737 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1738 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1740 /* If this is the only reason this type is BLKmode, then
1741 don't force containing types to be BLKmode. */
1742 TYPE_NO_FORCE_BLK (type) = 1;
1743 SET_TYPE_MODE (type, BLKmode);
1747 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1748 out. */
1750 static void
1751 finalize_type_size (tree type)
1753 /* Normally, use the alignment corresponding to the mode chosen.
1754 However, where strict alignment is not required, avoid
1755 over-aligning structures, since most compilers do not do this
1756 alignment. */
1758 if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1759 && (STRICT_ALIGNMENT
1760 || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1761 && TREE_CODE (type) != QUAL_UNION_TYPE
1762 && TREE_CODE (type) != ARRAY_TYPE)))
1764 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1766 /* Don't override a larger alignment requirement coming from a user
1767 alignment of one of the fields. */
1768 if (mode_align >= TYPE_ALIGN (type))
1770 TYPE_ALIGN (type) = mode_align;
1771 TYPE_USER_ALIGN (type) = 0;
1775 /* Do machine-dependent extra alignment. */
1776 #ifdef ROUND_TYPE_ALIGN
1777 TYPE_ALIGN (type)
1778 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1779 #endif
1781 /* If we failed to find a simple way to calculate the unit size
1782 of the type, find it by division. */
1783 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1784 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1785 result will fit in sizetype. We will get more efficient code using
1786 sizetype, so we force a conversion. */
1787 TYPE_SIZE_UNIT (type)
1788 = fold_convert (sizetype,
1789 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1790 bitsize_unit_node));
1792 if (TYPE_SIZE (type) != 0)
1794 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1795 TYPE_SIZE_UNIT (type)
1796 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1799 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1800 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1801 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1802 if (TYPE_SIZE_UNIT (type) != 0
1803 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1804 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1806 /* Also layout any other variants of the type. */
1807 if (TYPE_NEXT_VARIANT (type)
1808 || type != TYPE_MAIN_VARIANT (type))
1810 tree variant;
1811 /* Record layout info of this variant. */
1812 tree size = TYPE_SIZE (type);
1813 tree size_unit = TYPE_SIZE_UNIT (type);
1814 unsigned int align = TYPE_ALIGN (type);
1815 unsigned int precision = TYPE_PRECISION (type);
1816 unsigned int user_align = TYPE_USER_ALIGN (type);
1817 machine_mode mode = TYPE_MODE (type);
1819 /* Copy it into all variants. */
1820 for (variant = TYPE_MAIN_VARIANT (type);
1821 variant != 0;
1822 variant = TYPE_NEXT_VARIANT (variant))
1824 TYPE_SIZE (variant) = size;
1825 TYPE_SIZE_UNIT (variant) = size_unit;
1826 TYPE_ALIGN (variant) = align;
1827 TYPE_PRECISION (variant) = precision;
1828 TYPE_USER_ALIGN (variant) = user_align;
1829 SET_TYPE_MODE (variant, mode);
1834 /* Return a new underlying object for a bitfield started with FIELD. */
1836 static tree
1837 start_bitfield_representative (tree field)
1839 tree repr = make_node (FIELD_DECL);
1840 DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
1841 /* Force the representative to begin at a BITS_PER_UNIT aligned
1842 boundary - C++ may use tail-padding of a base object to
1843 continue packing bits so the bitfield region does not start
1844 at bit zero (see g++.dg/abi/bitfield5.C for example).
1845 Unallocated bits may happen for other reasons as well,
1846 for example Ada which allows explicit bit-granular structure layout. */
1847 DECL_FIELD_BIT_OFFSET (repr)
1848 = size_binop (BIT_AND_EXPR,
1849 DECL_FIELD_BIT_OFFSET (field),
1850 bitsize_int (~(BITS_PER_UNIT - 1)));
1851 SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
1852 DECL_SIZE (repr) = DECL_SIZE (field);
1853 DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
1854 DECL_PACKED (repr) = DECL_PACKED (field);
1855 DECL_CONTEXT (repr) = DECL_CONTEXT (field);
1856 return repr;
1859 /* Finish up a bitfield group that was started by creating the underlying
1860 object REPR with the last field in the bitfield group FIELD. */
1862 static void
1863 finish_bitfield_representative (tree repr, tree field)
1865 unsigned HOST_WIDE_INT bitsize, maxbitsize;
1866 machine_mode mode;
1867 tree nextf, size;
1869 size = size_diffop (DECL_FIELD_OFFSET (field),
1870 DECL_FIELD_OFFSET (repr));
1871 while (TREE_CODE (size) == COMPOUND_EXPR)
1872 size = TREE_OPERAND (size, 1);
1873 gcc_assert (tree_fits_uhwi_p (size));
1874 bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT
1875 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
1876 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))
1877 + tree_to_uhwi (DECL_SIZE (field)));
1879 /* Round up bitsize to multiples of BITS_PER_UNIT. */
1880 bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
1882 /* Now nothing tells us how to pad out bitsize ... */
1883 nextf = DECL_CHAIN (field);
1884 while (nextf && TREE_CODE (nextf) != FIELD_DECL)
1885 nextf = DECL_CHAIN (nextf);
1886 if (nextf)
1888 tree maxsize;
1889 /* If there was an error, the field may be not laid out
1890 correctly. Don't bother to do anything. */
1891 if (TREE_TYPE (nextf) == error_mark_node)
1892 return;
1893 maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
1894 DECL_FIELD_OFFSET (repr));
1895 if (tree_fits_uhwi_p (maxsize))
1897 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
1898 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf))
1899 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
1900 /* If the group ends within a bitfield nextf does not need to be
1901 aligned to BITS_PER_UNIT. Thus round up. */
1902 maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
1904 else
1905 maxbitsize = bitsize;
1907 else
1909 /* ??? If you consider that tail-padding of this struct might be
1910 re-used when deriving from it we cannot really do the following
1911 and thus need to set maxsize to bitsize? Also we cannot
1912 generally rely on maxsize to fold to an integer constant, so
1913 use bitsize as fallback for this case. */
1914 tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)),
1915 DECL_FIELD_OFFSET (repr));
1916 if (tree_fits_uhwi_p (maxsize))
1917 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
1918 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
1919 else
1920 maxbitsize = bitsize;
1923 /* Only if we don't artificially break up the representative in
1924 the middle of a large bitfield with different possibly
1925 overlapping representatives. And all representatives start
1926 at byte offset. */
1927 gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
1929 /* Find the smallest nice mode to use. */
1930 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1931 mode = GET_MODE_WIDER_MODE (mode))
1932 if (GET_MODE_BITSIZE (mode) >= bitsize)
1933 break;
1934 if (mode != VOIDmode
1935 && (GET_MODE_BITSIZE (mode) > maxbitsize
1936 || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE))
1937 mode = VOIDmode;
1939 if (mode == VOIDmode)
1941 /* We really want a BLKmode representative only as a last resort,
1942 considering the member b in
1943 struct { int a : 7; int b : 17; int c; } __attribute__((packed));
1944 Otherwise we simply want to split the representative up
1945 allowing for overlaps within the bitfield region as required for
1946 struct { int a : 7; int b : 7;
1947 int c : 10; int d; } __attribute__((packed));
1948 [0, 15] HImode for a and b, [8, 23] HImode for c. */
1949 DECL_SIZE (repr) = bitsize_int (bitsize);
1950 DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
1951 DECL_MODE (repr) = BLKmode;
1952 TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
1953 bitsize / BITS_PER_UNIT);
1955 else
1957 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
1958 DECL_SIZE (repr) = bitsize_int (modesize);
1959 DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
1960 DECL_MODE (repr) = mode;
1961 TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
1964 /* Remember whether the bitfield group is at the end of the
1965 structure or not. */
1966 DECL_CHAIN (repr) = nextf;
1969 /* Compute and set FIELD_DECLs for the underlying objects we should
1970 use for bitfield access for the structure T. */
1972 void
1973 finish_bitfield_layout (tree t)
1975 tree field, prev;
1976 tree repr = NULL_TREE;
1978 /* Unions would be special, for the ease of type-punning optimizations
1979 we could use the underlying type as hint for the representative
1980 if the bitfield would fit and the representative would not exceed
1981 the union in size. */
1982 if (TREE_CODE (t) != RECORD_TYPE)
1983 return;
1985 for (prev = NULL_TREE, field = TYPE_FIELDS (t);
1986 field; field = DECL_CHAIN (field))
1988 if (TREE_CODE (field) != FIELD_DECL)
1989 continue;
1991 /* In the C++ memory model, consecutive bit fields in a structure are
1992 considered one memory location and updating a memory location
1993 may not store into adjacent memory locations. */
1994 if (!repr
1995 && DECL_BIT_FIELD_TYPE (field))
1997 /* Start new representative. */
1998 repr = start_bitfield_representative (field);
2000 else if (repr
2001 && ! DECL_BIT_FIELD_TYPE (field))
2003 /* Finish off new representative. */
2004 finish_bitfield_representative (repr, prev);
2005 repr = NULL_TREE;
2007 else if (DECL_BIT_FIELD_TYPE (field))
2009 gcc_assert (repr != NULL_TREE);
2011 /* Zero-size bitfields finish off a representative and
2012 do not have a representative themselves. This is
2013 required by the C++ memory model. */
2014 if (integer_zerop (DECL_SIZE (field)))
2016 finish_bitfield_representative (repr, prev);
2017 repr = NULL_TREE;
2020 /* We assume that either DECL_FIELD_OFFSET of the representative
2021 and each bitfield member is a constant or they are equal.
2022 This is because we need to be able to compute the bit-offset
2023 of each field relative to the representative in get_bit_range
2024 during RTL expansion.
2025 If these constraints are not met, simply force a new
2026 representative to be generated. That will at most
2027 generate worse code but still maintain correctness with
2028 respect to the C++ memory model. */
2029 else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))
2030 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
2031 || operand_equal_p (DECL_FIELD_OFFSET (repr),
2032 DECL_FIELD_OFFSET (field), 0)))
2034 finish_bitfield_representative (repr, prev);
2035 repr = start_bitfield_representative (field);
2038 else
2039 continue;
2041 if (repr)
2042 DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
2044 prev = field;
2047 if (repr)
2048 finish_bitfield_representative (repr, prev);
2051 /* Do all of the work required to layout the type indicated by RLI,
2052 once the fields have been laid out. This function will call `free'
2053 for RLI, unless FREE_P is false. Passing a value other than false
2054 for FREE_P is bad practice; this option only exists to support the
2055 G++ 3.2 ABI. */
2057 void
2058 finish_record_layout (record_layout_info rli, int free_p)
2060 tree variant;
2062 /* Compute the final size. */
2063 finalize_record_size (rli);
2065 /* Compute the TYPE_MODE for the record. */
2066 compute_record_mode (rli->t);
2068 /* Perform any last tweaks to the TYPE_SIZE, etc. */
2069 finalize_type_size (rli->t);
2071 /* Compute bitfield representatives. */
2072 finish_bitfield_layout (rli->t);
2074 /* Propagate TYPE_PACKED to variants. With C++ templates,
2075 handle_packed_attribute is too early to do this. */
2076 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
2077 variant = TYPE_NEXT_VARIANT (variant))
2078 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
2080 /* Lay out any static members. This is done now because their type
2081 may use the record's type. */
2082 while (!vec_safe_is_empty (rli->pending_statics))
2083 layout_decl (rli->pending_statics->pop (), 0);
2085 /* Clean up. */
2086 if (free_p)
2088 vec_free (rli->pending_statics);
2089 free (rli);
2094 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
2095 NAME, its fields are chained in reverse on FIELDS.
2097 If ALIGN_TYPE is non-null, it is given the same alignment as
2098 ALIGN_TYPE. */
2100 void
2101 finish_builtin_struct (tree type, const char *name, tree fields,
2102 tree align_type)
2104 tree tail, next;
2106 for (tail = NULL_TREE; fields; tail = fields, fields = next)
2108 DECL_FIELD_CONTEXT (fields) = type;
2109 next = DECL_CHAIN (fields);
2110 DECL_CHAIN (fields) = tail;
2112 TYPE_FIELDS (type) = tail;
2114 if (align_type)
2116 TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
2117 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
2120 layout_type (type);
2121 #if 0 /* not yet, should get fixed properly later */
2122 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
2123 #else
2124 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
2125 TYPE_DECL, get_identifier (name), type);
2126 #endif
2127 TYPE_STUB_DECL (type) = TYPE_NAME (type);
2128 layout_decl (TYPE_NAME (type), 0);
2131 /* Calculate the mode, size, and alignment for TYPE.
2132 For an array type, calculate the element separation as well.
2133 Record TYPE on the chain of permanent or temporary types
2134 so that dbxout will find out about it.
2136 TYPE_SIZE of a type is nonzero if the type has been laid out already.
2137 layout_type does nothing on such a type.
2139 If the type is incomplete, its TYPE_SIZE remains zero. */
2141 void
2142 layout_type (tree type)
2144 gcc_assert (type);
2146 if (type == error_mark_node)
2147 return;
2149 /* Do nothing if type has been laid out before. */
2150 if (TYPE_SIZE (type))
2151 return;
2153 switch (TREE_CODE (type))
2155 case LANG_TYPE:
2156 /* This kind of type is the responsibility
2157 of the language-specific code. */
2158 gcc_unreachable ();
2160 case BOOLEAN_TYPE:
2161 case INTEGER_TYPE:
2162 case ENUMERAL_TYPE:
2163 SET_TYPE_MODE (type,
2164 smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
2165 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2166 /* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */
2167 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2168 break;
2170 case REAL_TYPE:
2171 SET_TYPE_MODE (type,
2172 mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
2173 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2174 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2175 break;
2177 case FIXED_POINT_TYPE:
2178 /* TYPE_MODE (type) has been set already. */
2179 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2180 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2181 break;
2183 case COMPLEX_TYPE:
2184 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2185 SET_TYPE_MODE (type,
2186 mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
2187 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
2188 ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
2189 0));
2190 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2191 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2192 break;
2194 case VECTOR_TYPE:
2196 int nunits = TYPE_VECTOR_SUBPARTS (type);
2197 tree innertype = TREE_TYPE (type);
2199 gcc_assert (!(nunits & (nunits - 1)));
2201 /* Find an appropriate mode for the vector type. */
2202 if (TYPE_MODE (type) == VOIDmode)
2203 SET_TYPE_MODE (type,
2204 mode_for_vector (TYPE_MODE (innertype), nunits));
2206 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
2207 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2208 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
2209 TYPE_SIZE_UNIT (innertype),
2210 size_int (nunits));
2211 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
2212 bitsize_int (nunits));
2214 /* For vector types, we do not default to the mode's alignment.
2215 Instead, query a target hook, defaulting to natural alignment.
2216 This prevents ABI changes depending on whether or not native
2217 vector modes are supported. */
2218 TYPE_ALIGN (type) = targetm.vector_alignment (type);
2220 /* However, if the underlying mode requires a bigger alignment than
2221 what the target hook provides, we cannot use the mode. For now,
2222 simply reject that case. */
2223 gcc_assert (TYPE_ALIGN (type)
2224 >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
2225 break;
2228 case VOID_TYPE:
2229 /* This is an incomplete type and so doesn't have a size. */
2230 TYPE_ALIGN (type) = 1;
2231 TYPE_USER_ALIGN (type) = 0;
2232 SET_TYPE_MODE (type, VOIDmode);
2233 break;
2235 case OFFSET_TYPE:
2236 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
2237 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
2238 /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
2239 integral, which may be an __intN. */
2240 SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
2241 TYPE_PRECISION (type) = POINTER_SIZE;
2242 break;
2244 case FUNCTION_TYPE:
2245 case METHOD_TYPE:
2246 /* It's hard to see what the mode and size of a function ought to
2247 be, but we do know the alignment is FUNCTION_BOUNDARY, so
2248 make it consistent with that. */
2249 SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
2250 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
2251 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2252 break;
2254 case POINTER_TYPE:
2255 case REFERENCE_TYPE:
2257 machine_mode mode = TYPE_MODE (type);
2258 if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
2260 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
2261 mode = targetm.addr_space.address_mode (as);
2264 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2265 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2266 TYPE_UNSIGNED (type) = 1;
2267 TYPE_PRECISION (type) = GET_MODE_PRECISION (mode);
2269 break;
2271 case ARRAY_TYPE:
2273 tree index = TYPE_DOMAIN (type);
2274 tree element = TREE_TYPE (type);
2276 build_pointer_type (element);
2278 /* We need to know both bounds in order to compute the size. */
2279 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2280 && TYPE_SIZE (element))
2282 tree ub = TYPE_MAX_VALUE (index);
2283 tree lb = TYPE_MIN_VALUE (index);
2284 tree element_size = TYPE_SIZE (element);
2285 tree length;
2287 /* Make sure that an array of zero-sized element is zero-sized
2288 regardless of its extent. */
2289 if (integer_zerop (element_size))
2290 length = size_zero_node;
2292 /* The computation should happen in the original signedness so
2293 that (possible) negative values are handled appropriately
2294 when determining overflow. */
2295 else
2297 /* ??? When it is obvious that the range is signed
2298 represent it using ssizetype. */
2299 if (TREE_CODE (lb) == INTEGER_CST
2300 && TREE_CODE (ub) == INTEGER_CST
2301 && TYPE_UNSIGNED (TREE_TYPE (lb))
2302 && tree_int_cst_lt (ub, lb))
2304 lb = wide_int_to_tree (ssizetype,
2305 offset_int::from (lb, SIGNED));
2306 ub = wide_int_to_tree (ssizetype,
2307 offset_int::from (ub, SIGNED));
2309 length
2310 = fold_convert (sizetype,
2311 size_binop (PLUS_EXPR,
2312 build_int_cst (TREE_TYPE (lb), 1),
2313 size_binop (MINUS_EXPR, ub, lb)));
2316 /* ??? We have no way to distinguish a null-sized array from an
2317 array spanning the whole sizetype range, so we arbitrarily
2318 decide that [0, -1] is the only valid representation. */
2319 if (integer_zerop (length)
2320 && TREE_OVERFLOW (length)
2321 && integer_zerop (lb))
2322 length = size_zero_node;
2324 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2325 fold_convert (bitsizetype,
2326 length));
2328 /* If we know the size of the element, calculate the total size
2329 directly, rather than do some division thing below. This
2330 optimization helps Fortran assumed-size arrays (where the
2331 size of the array is determined at runtime) substantially. */
2332 if (TYPE_SIZE_UNIT (element))
2333 TYPE_SIZE_UNIT (type)
2334 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2337 /* Now round the alignment and size,
2338 using machine-dependent criteria if any. */
2340 #ifdef ROUND_TYPE_ALIGN
2341 TYPE_ALIGN (type)
2342 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
2343 #else
2344 TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
2345 #endif
2346 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2347 SET_TYPE_MODE (type, BLKmode);
2348 if (TYPE_SIZE (type) != 0
2349 && ! targetm.member_type_forces_blk (type, VOIDmode)
2350 /* BLKmode elements force BLKmode aggregate;
2351 else extract/store fields may lose. */
2352 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2353 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2355 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2356 TYPE_SIZE (type)));
2357 if (TYPE_MODE (type) != BLKmode
2358 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2359 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2361 TYPE_NO_FORCE_BLK (type) = 1;
2362 SET_TYPE_MODE (type, BLKmode);
2365 /* When the element size is constant, check that it is at least as
2366 large as the element alignment. */
2367 if (TYPE_SIZE_UNIT (element)
2368 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2369 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2370 TYPE_ALIGN_UNIT. */
2371 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2372 && !integer_zerop (TYPE_SIZE_UNIT (element))
2373 && compare_tree_int (TYPE_SIZE_UNIT (element),
2374 TYPE_ALIGN_UNIT (element)) < 0)
2375 error ("alignment of array elements is greater than element size");
2376 break;
2379 case RECORD_TYPE:
2380 case UNION_TYPE:
2381 case QUAL_UNION_TYPE:
2383 tree field;
2384 record_layout_info rli;
2386 /* Initialize the layout information. */
2387 rli = start_record_layout (type);
2389 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2390 in the reverse order in building the COND_EXPR that denotes
2391 its size. We reverse them again later. */
2392 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2393 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2395 /* Place all the fields. */
2396 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2397 place_field (rli, field);
2399 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2400 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2402 /* Finish laying out the record. */
2403 finish_record_layout (rli, /*free_p=*/true);
2405 break;
2407 default:
2408 gcc_unreachable ();
2411 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2412 records and unions, finish_record_layout already called this
2413 function. */
2414 if (TREE_CODE (type) != RECORD_TYPE
2415 && TREE_CODE (type) != UNION_TYPE
2416 && TREE_CODE (type) != QUAL_UNION_TYPE)
2417 finalize_type_size (type);
2419 /* We should never see alias sets on incomplete aggregates. And we
2420 should not call layout_type on not incomplete aggregates. */
2421 if (AGGREGATE_TYPE_P (type))
2422 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2425 /* Return the least alignment required for type TYPE. */
2427 unsigned int
2428 min_align_of_type (tree type)
2430 unsigned int align = TYPE_ALIGN (type);
2431 align = MIN (align, BIGGEST_ALIGNMENT);
2432 if (!TYPE_USER_ALIGN (type))
2434 #ifdef BIGGEST_FIELD_ALIGNMENT
2435 align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
2436 #endif
2437 unsigned int field_align = align;
2438 #ifdef ADJUST_FIELD_ALIGN
2439 tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE, type);
2440 field_align = ADJUST_FIELD_ALIGN (field, field_align);
2441 ggc_free (field);
2442 #endif
2443 align = MIN (align, field_align);
2445 return align / BITS_PER_UNIT;
2448 /* Vector types need to re-check the target flags each time we report
2449 the machine mode. We need to do this because attribute target can
2450 change the result of vector_mode_supported_p and have_regs_of_mode
2451 on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
2452 change on a per-function basis. */
2453 /* ??? Possibly a better solution is to run through all the types
2454 referenced by a function and re-compute the TYPE_MODE once, rather
2455 than make the TYPE_MODE macro call a function. */
2457 machine_mode
2458 vector_type_mode (const_tree t)
2460 machine_mode mode;
2462 gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
2464 mode = t->type_common.mode;
2465 if (VECTOR_MODE_P (mode)
2466 && (!targetm.vector_mode_supported_p (mode)
2467 || !have_regs_of_mode[mode]))
2469 machine_mode innermode = TREE_TYPE (t)->type_common.mode;
2471 /* For integers, try mapping it to a same-sized scalar mode. */
2472 if (GET_MODE_CLASS (innermode) == MODE_INT)
2474 mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
2475 * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
2477 if (mode != VOIDmode && have_regs_of_mode[mode])
2478 return mode;
2481 return BLKmode;
2484 return mode;
2487 /* Create and return a type for signed integers of PRECISION bits. */
2489 tree
2490 make_signed_type (int precision)
2492 tree type = make_node (INTEGER_TYPE);
2494 TYPE_PRECISION (type) = precision;
2496 fixup_signed_type (type);
2497 return type;
2500 /* Create and return a type for unsigned integers of PRECISION bits. */
2502 tree
2503 make_unsigned_type (int precision)
2505 tree type = make_node (INTEGER_TYPE);
2507 TYPE_PRECISION (type) = precision;
2509 fixup_unsigned_type (type);
2510 return type;
2513 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2514 and SATP. */
2516 tree
2517 make_fract_type (int precision, int unsignedp, int satp)
2519 tree type = make_node (FIXED_POINT_TYPE);
2521 TYPE_PRECISION (type) = precision;
2523 if (satp)
2524 TYPE_SATURATING (type) = 1;
2526 /* Lay out the type: set its alignment, size, etc. */
2527 if (unsignedp)
2529 TYPE_UNSIGNED (type) = 1;
2530 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
2532 else
2533 SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
2534 layout_type (type);
2536 return type;
2539 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2540 and SATP. */
2542 tree
2543 make_accum_type (int precision, int unsignedp, int satp)
2545 tree type = make_node (FIXED_POINT_TYPE);
2547 TYPE_PRECISION (type) = precision;
2549 if (satp)
2550 TYPE_SATURATING (type) = 1;
2552 /* Lay out the type: set its alignment, size, etc. */
2553 if (unsignedp)
2555 TYPE_UNSIGNED (type) = 1;
2556 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2558 else
2559 SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2560 layout_type (type);
2562 return type;
2565 /* Initialize sizetypes so layout_type can use them. */
2567 void
2568 initialize_sizetypes (void)
2570 int precision, bprecision;
2572 /* Get sizetypes precision from the SIZE_TYPE target macro. */
2573 if (strcmp (SIZETYPE, "unsigned int") == 0)
2574 precision = INT_TYPE_SIZE;
2575 else if (strcmp (SIZETYPE, "long unsigned int") == 0)
2576 precision = LONG_TYPE_SIZE;
2577 else if (strcmp (SIZETYPE, "long long unsigned int") == 0)
2578 precision = LONG_LONG_TYPE_SIZE;
2579 else if (strcmp (SIZETYPE, "short unsigned int") == 0)
2580 precision = SHORT_TYPE_SIZE;
2581 else
2583 int i;
2585 precision = -1;
2586 for (i = 0; i < NUM_INT_N_ENTS; i++)
2587 if (int_n_enabled_p[i])
2589 char name[50];
2590 sprintf (name, "__int%d unsigned", int_n_data[i].bitsize);
2592 if (strcmp (name, SIZETYPE) == 0)
2594 precision = int_n_data[i].bitsize;
2597 if (precision == -1)
2598 gcc_unreachable ();
2601 bprecision
2602 = MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
2603 bprecision
2604 = GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT));
2605 if (bprecision > HOST_BITS_PER_DOUBLE_INT)
2606 bprecision = HOST_BITS_PER_DOUBLE_INT;
2608 /* Create stubs for sizetype and bitsizetype so we can create constants. */
2609 sizetype = make_node (INTEGER_TYPE);
2610 TYPE_NAME (sizetype) = get_identifier ("sizetype");
2611 TYPE_PRECISION (sizetype) = precision;
2612 TYPE_UNSIGNED (sizetype) = 1;
2613 bitsizetype = make_node (INTEGER_TYPE);
2614 TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2615 TYPE_PRECISION (bitsizetype) = bprecision;
2616 TYPE_UNSIGNED (bitsizetype) = 1;
2618 /* Now layout both types manually. */
2619 SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT));
2620 TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype));
2621 TYPE_SIZE (sizetype) = bitsize_int (precision);
2622 TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
2623 set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
2625 SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
2626 TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
2627 TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2628 TYPE_SIZE_UNIT (bitsizetype)
2629 = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
2630 set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED);
2632 /* Create the signed variants of *sizetype. */
2633 ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2634 TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2635 sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2636 TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2639 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2640 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2641 for TYPE, based on the PRECISION and whether or not the TYPE
2642 IS_UNSIGNED. PRECISION need not correspond to a width supported
2643 natively by the hardware; for example, on a machine with 8-bit,
2644 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2645 61. */
2647 void
2648 set_min_and_max_values_for_integral_type (tree type,
2649 int precision,
2650 signop sgn)
2652 /* For bitfields with zero width we end up creating integer types
2653 with zero precision. Don't assign any minimum/maximum values
2654 to those types, they don't have any valid value. */
2655 if (precision < 1)
2656 return;
2658 TYPE_MIN_VALUE (type)
2659 = wide_int_to_tree (type, wi::min_value (precision, sgn));
2660 TYPE_MAX_VALUE (type)
2661 = wide_int_to_tree (type, wi::max_value (precision, sgn));
2664 /* Set the extreme values of TYPE based on its precision in bits,
2665 then lay it out. Used when make_signed_type won't do
2666 because the tree code is not INTEGER_TYPE.
2667 E.g. for Pascal, when the -fsigned-char option is given. */
2669 void
2670 fixup_signed_type (tree type)
2672 int precision = TYPE_PRECISION (type);
2674 set_min_and_max_values_for_integral_type (type, precision, SIGNED);
2676 /* Lay out the type: set its alignment, size, etc. */
2677 layout_type (type);
2680 /* Set the extreme values of TYPE based on its precision in bits,
2681 then lay it out. This is used both in `make_unsigned_type'
2682 and for enumeral types. */
2684 void
2685 fixup_unsigned_type (tree type)
2687 int precision = TYPE_PRECISION (type);
2689 TYPE_UNSIGNED (type) = 1;
2691 set_min_and_max_values_for_integral_type (type, precision, UNSIGNED);
2693 /* Lay out the type: set its alignment, size, etc. */
2694 layout_type (type);
2697 /* Construct an iterator for a bitfield that spans BITSIZE bits,
2698 starting at BITPOS.
2700 BITREGION_START is the bit position of the first bit in this
2701 sequence of bit fields. BITREGION_END is the last bit in this
2702 sequence. If these two fields are non-zero, we should restrict the
2703 memory access to that range. Otherwise, we are allowed to touch
2704 any adjacent non bit-fields.
2706 ALIGN is the alignment of the underlying object in bits.
2707 VOLATILEP says whether the bitfield is volatile. */
2709 bit_field_mode_iterator
2710 ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
2711 HOST_WIDE_INT bitregion_start,
2712 HOST_WIDE_INT bitregion_end,
2713 unsigned int align, bool volatilep)
2714 : m_mode (GET_CLASS_NARROWEST_MODE (MODE_INT)), m_bitsize (bitsize),
2715 m_bitpos (bitpos), m_bitregion_start (bitregion_start),
2716 m_bitregion_end (bitregion_end), m_align (align),
2717 m_volatilep (volatilep), m_count (0)
2719 if (!m_bitregion_end)
2721 /* We can assume that any aligned chunk of ALIGN bits that overlaps
2722 the bitfield is mapped and won't trap, provided that ALIGN isn't
2723 too large. The cap is the biggest required alignment for data,
2724 or at least the word size. And force one such chunk at least. */
2725 unsigned HOST_WIDE_INT units
2726 = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
2727 if (bitsize <= 0)
2728 bitsize = 1;
2729 m_bitregion_end = bitpos + bitsize + units - 1;
2730 m_bitregion_end -= m_bitregion_end % units + 1;
2734 /* Calls to this function return successively larger modes that can be used
2735 to represent the bitfield. Return true if another bitfield mode is
2736 available, storing it in *OUT_MODE if so. */
2738 bool
2739 bit_field_mode_iterator::next_mode (machine_mode *out_mode)
2741 for (; m_mode != VOIDmode; m_mode = GET_MODE_WIDER_MODE (m_mode))
2743 unsigned int unit = GET_MODE_BITSIZE (m_mode);
2745 /* Skip modes that don't have full precision. */
2746 if (unit != GET_MODE_PRECISION (m_mode))
2747 continue;
2749 /* Stop if the mode is too wide to handle efficiently. */
2750 if (unit > MAX_FIXED_MODE_SIZE)
2751 break;
2753 /* Don't deliver more than one multiword mode; the smallest one
2754 should be used. */
2755 if (m_count > 0 && unit > BITS_PER_WORD)
2756 break;
2758 /* Skip modes that are too small. */
2759 unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit;
2760 unsigned HOST_WIDE_INT subend = substart + m_bitsize;
2761 if (subend > unit)
2762 continue;
2764 /* Stop if the mode goes outside the bitregion. */
2765 HOST_WIDE_INT start = m_bitpos - substart;
2766 if (m_bitregion_start && start < m_bitregion_start)
2767 break;
2768 HOST_WIDE_INT end = start + unit;
2769 if (end > m_bitregion_end + 1)
2770 break;
2772 /* Stop if the mode requires too much alignment. */
2773 if (GET_MODE_ALIGNMENT (m_mode) > m_align
2774 && SLOW_UNALIGNED_ACCESS (m_mode, m_align))
2775 break;
2777 *out_mode = m_mode;
2778 m_mode = GET_MODE_WIDER_MODE (m_mode);
2779 m_count++;
2780 return true;
2782 return false;
2785 /* Return true if smaller modes are generally preferred for this kind
2786 of bitfield. */
2788 bool
2789 bit_field_mode_iterator::prefer_smaller_modes ()
2791 return (m_volatilep
2792 ? targetm.narrow_volatile_bitfield ()
2793 : !SLOW_BYTE_ACCESS);
2796 /* Find the best machine mode to use when referencing a bit field of length
2797 BITSIZE bits starting at BITPOS.
2799 BITREGION_START is the bit position of the first bit in this
2800 sequence of bit fields. BITREGION_END is the last bit in this
2801 sequence. If these two fields are non-zero, we should restrict the
2802 memory access to that range. Otherwise, we are allowed to touch
2803 any adjacent non bit-fields.
2805 The underlying object is known to be aligned to a boundary of ALIGN bits.
2806 If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2807 larger than LARGEST_MODE (usually SImode).
2809 If no mode meets all these conditions, we return VOIDmode.
2811 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2812 smallest mode meeting these conditions.
2814 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2815 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2816 all the conditions.
2818 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2819 decide which of the above modes should be used. */
2821 machine_mode
2822 get_best_mode (int bitsize, int bitpos,
2823 unsigned HOST_WIDE_INT bitregion_start,
2824 unsigned HOST_WIDE_INT bitregion_end,
2825 unsigned int align,
2826 machine_mode largest_mode, bool volatilep)
2828 bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start,
2829 bitregion_end, align, volatilep);
2830 machine_mode widest_mode = VOIDmode;
2831 machine_mode mode;
2832 while (iter.next_mode (&mode)
2833 /* ??? For historical reasons, reject modes that would normally
2834 receive greater alignment, even if unaligned accesses are
2835 acceptable. This has both advantages and disadvantages.
2836 Removing this check means that something like:
2838 struct s { unsigned int x; unsigned int y; };
2839 int f (struct s *s) { return s->x == 0 && s->y == 0; }
2841 can be implemented using a single load and compare on
2842 64-bit machines that have no alignment restrictions.
2843 For example, on powerpc64-linux-gnu, we would generate:
2845 ld 3,0(3)
2846 cntlzd 3,3
2847 srdi 3,3,6
2850 rather than:
2852 lwz 9,0(3)
2853 cmpwi 7,9,0
2854 bne 7,.L3
2855 lwz 3,4(3)
2856 cntlzw 3,3
2857 srwi 3,3,5
2858 extsw 3,3
2860 .p2align 4,,15
2861 .L3:
2862 li 3,0
2865 However, accessing more than one field can make life harder
2866 for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c
2867 has a series of unsigned short copies followed by a series of
2868 unsigned short comparisons. With this check, both the copies
2869 and comparisons remain 16-bit accesses and FRE is able
2870 to eliminate the latter. Without the check, the comparisons
2871 can be done using 2 64-bit operations, which FRE isn't able
2872 to handle in the same way.
2874 Either way, it would probably be worth disabling this check
2875 during expand. One particular example where removing the
2876 check would help is the get_best_mode call in store_bit_field.
2877 If we are given a memory bitregion of 128 bits that is aligned
2878 to a 64-bit boundary, and the bitfield we want to modify is
2879 in the second half of the bitregion, this check causes
2880 store_bitfield to turn the memory into a 64-bit reference
2881 to the _first_ half of the region. We later use
2882 adjust_bitfield_address to get a reference to the correct half,
2883 but doing so looks to adjust_bitfield_address as though we are
2884 moving past the end of the original object, so it drops the
2885 associated MEM_EXPR and MEM_OFFSET. Removing the check
2886 causes store_bit_field to keep a 128-bit memory reference,
2887 so that the final bitfield reference still has a MEM_EXPR
2888 and MEM_OFFSET. */
2889 && GET_MODE_ALIGNMENT (mode) <= align
2890 && (largest_mode == VOIDmode
2891 || GET_MODE_SIZE (mode) <= GET_MODE_SIZE (largest_mode)))
2893 widest_mode = mode;
2894 if (iter.prefer_smaller_modes ())
2895 break;
2897 return widest_mode;
2900 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
2901 SIGN). The returned constants are made to be usable in TARGET_MODE. */
2903 void
2904 get_mode_bounds (machine_mode mode, int sign,
2905 machine_mode target_mode,
2906 rtx *mmin, rtx *mmax)
2908 unsigned size = GET_MODE_PRECISION (mode);
2909 unsigned HOST_WIDE_INT min_val, max_val;
2911 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2913 /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */
2914 if (mode == BImode)
2916 if (STORE_FLAG_VALUE < 0)
2918 min_val = STORE_FLAG_VALUE;
2919 max_val = 0;
2921 else
2923 min_val = 0;
2924 max_val = STORE_FLAG_VALUE;
2927 else if (sign)
2929 min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2930 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2932 else
2934 min_val = 0;
2935 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2938 *mmin = gen_int_mode (min_val, target_mode);
2939 *mmax = gen_int_mode (max_val, target_mode);
2942 #include "gt-stor-layout.h"