Mark ChangeLog
[official-gcc.git] / gcc / stor-layout.c
blob24cad6c889a1696803aab39e5f1388e375ce8063
1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "expr.h"
31 #include "diagnostic-core.h"
32 #include "ggc.h"
33 #include "target.h"
34 #include "langhooks.h"
35 #include "regs.h"
36 #include "params.h"
37 #include "cgraph.h"
38 #include "tree-inline.h"
39 #include "tree-dump.h"
40 #include "gimple.h"
42 /* Data type for the expressions representing sizes of data types.
43 It is the first integer type laid out. */
44 tree sizetype_tab[(int) stk_type_kind_last];
46 /* If nonzero, this is an upper limit on alignment of structure fields.
47 The value is measured in bits. */
48 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
50 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
51 in the address spaces' address_mode, not pointer_mode. Set only by
52 internal_reference_types called only by a front end. */
53 static int reference_types_internal = 0;
55 static tree self_referential_size (tree);
56 static void finalize_record_size (record_layout_info);
57 static void finalize_type_size (tree);
58 static void place_union_field (record_layout_info, tree);
59 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
60 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
61 HOST_WIDE_INT, tree);
62 #endif
63 extern void debug_rli (record_layout_info);
65 /* Show that REFERENCE_TYPES are internal and should use address_mode.
66 Called only by front end. */
68 void
69 internal_reference_types (void)
71 reference_types_internal = 1;
74 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
75 to serve as the actual size-expression for a type or decl. */
77 tree
78 variable_size (tree size)
80 /* Obviously. */
81 if (TREE_CONSTANT (size))
82 return size;
84 /* If the size is self-referential, we can't make a SAVE_EXPR (see
85 save_expr for the rationale). But we can do something else. */
86 if (CONTAINS_PLACEHOLDER_P (size))
87 return self_referential_size (size);
89 /* If we are in the global binding level, we can't make a SAVE_EXPR
90 since it may end up being shared across functions, so it is up
91 to the front-end to deal with this case. */
92 if (lang_hooks.decls.global_bindings_p ())
93 return size;
95 return save_expr (size);
98 /* An array of functions used for self-referential size computation. */
99 static GTY(()) vec<tree, va_gc> *size_functions;
101 /* Look inside EXPR into simple arithmetic operations involving constants.
102 Return the outermost non-arithmetic or non-constant node. */
104 static tree
105 skip_simple_constant_arithmetic (tree expr)
107 while (true)
109 if (UNARY_CLASS_P (expr))
110 expr = TREE_OPERAND (expr, 0);
111 else if (BINARY_CLASS_P (expr))
113 if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
114 expr = TREE_OPERAND (expr, 0);
115 else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
116 expr = TREE_OPERAND (expr, 1);
117 else
118 break;
120 else
121 break;
124 return expr;
127 /* Similar to copy_tree_r but do not copy component references involving
128 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
129 and substituted in substitute_in_expr. */
131 static tree
132 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
134 enum tree_code code = TREE_CODE (*tp);
136 /* Stop at types, decls, constants like copy_tree_r. */
137 if (TREE_CODE_CLASS (code) == tcc_type
138 || TREE_CODE_CLASS (code) == tcc_declaration
139 || TREE_CODE_CLASS (code) == tcc_constant)
141 *walk_subtrees = 0;
142 return NULL_TREE;
145 /* This is the pattern built in ada/make_aligning_type. */
146 else if (code == ADDR_EXPR
147 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
149 *walk_subtrees = 0;
150 return NULL_TREE;
153 /* Default case: the component reference. */
154 else if (code == COMPONENT_REF)
156 tree inner;
157 for (inner = TREE_OPERAND (*tp, 0);
158 REFERENCE_CLASS_P (inner);
159 inner = TREE_OPERAND (inner, 0))
162 if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
164 *walk_subtrees = 0;
165 return NULL_TREE;
169 /* We're not supposed to have them in self-referential size trees
170 because we wouldn't properly control when they are evaluated.
171 However, not creating superfluous SAVE_EXPRs requires accurate
172 tracking of readonly-ness all the way down to here, which we
173 cannot always guarantee in practice. So punt in this case. */
174 else if (code == SAVE_EXPR)
175 return error_mark_node;
177 else if (code == STATEMENT_LIST)
178 gcc_unreachable ();
180 return copy_tree_r (tp, walk_subtrees, data);
183 /* Given a SIZE expression that is self-referential, return an equivalent
184 expression to serve as the actual size expression for a type. */
186 static tree
187 self_referential_size (tree size)
189 static unsigned HOST_WIDE_INT fnno = 0;
190 vec<tree> self_refs = vNULL;
191 tree param_type_list = NULL, param_decl_list = NULL;
192 tree t, ref, return_type, fntype, fnname, fndecl;
193 unsigned int i;
194 char buf[128];
195 vec<tree, va_gc> *args = NULL;
197 /* Do not factor out simple operations. */
198 t = skip_simple_constant_arithmetic (size);
199 if (TREE_CODE (t) == CALL_EXPR)
200 return size;
202 /* Collect the list of self-references in the expression. */
203 find_placeholder_in_expr (size, &self_refs);
204 gcc_assert (self_refs.length () > 0);
206 /* Obtain a private copy of the expression. */
207 t = size;
208 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
209 return size;
210 size = t;
212 /* Build the parameter and argument lists in parallel; also
213 substitute the former for the latter in the expression. */
214 vec_alloc (args, self_refs.length ());
215 FOR_EACH_VEC_ELT (self_refs, i, ref)
217 tree subst, param_name, param_type, param_decl;
219 if (DECL_P (ref))
221 /* We shouldn't have true variables here. */
222 gcc_assert (TREE_READONLY (ref));
223 subst = ref;
225 /* This is the pattern built in ada/make_aligning_type. */
226 else if (TREE_CODE (ref) == ADDR_EXPR)
227 subst = ref;
228 /* Default case: the component reference. */
229 else
230 subst = TREE_OPERAND (ref, 1);
232 sprintf (buf, "p%d", i);
233 param_name = get_identifier (buf);
234 param_type = TREE_TYPE (ref);
235 param_decl
236 = build_decl (input_location, PARM_DECL, param_name, param_type);
237 DECL_ARG_TYPE (param_decl) = param_type;
238 DECL_ARTIFICIAL (param_decl) = 1;
239 TREE_READONLY (param_decl) = 1;
241 size = substitute_in_expr (size, subst, param_decl);
243 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
244 param_decl_list = chainon (param_decl, param_decl_list);
245 args->quick_push (ref);
248 self_refs.release ();
250 /* Append 'void' to indicate that the number of parameters is fixed. */
251 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
253 /* The 3 lists have been created in reverse order. */
254 param_type_list = nreverse (param_type_list);
255 param_decl_list = nreverse (param_decl_list);
257 /* Build the function type. */
258 return_type = TREE_TYPE (size);
259 fntype = build_function_type (return_type, param_type_list);
261 /* Build the function declaration. */
262 sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
263 fnname = get_file_function_name (buf);
264 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
265 for (t = param_decl_list; t; t = DECL_CHAIN (t))
266 DECL_CONTEXT (t) = fndecl;
267 DECL_ARGUMENTS (fndecl) = param_decl_list;
268 DECL_RESULT (fndecl)
269 = build_decl (input_location, RESULT_DECL, 0, return_type);
270 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
272 /* The function has been created by the compiler and we don't
273 want to emit debug info for it. */
274 DECL_ARTIFICIAL (fndecl) = 1;
275 DECL_IGNORED_P (fndecl) = 1;
277 /* It is supposed to be "const" and never throw. */
278 TREE_READONLY (fndecl) = 1;
279 TREE_NOTHROW (fndecl) = 1;
281 /* We want it to be inlined when this is deemed profitable, as
282 well as discarded if every call has been integrated. */
283 DECL_DECLARED_INLINE_P (fndecl) = 1;
285 /* It is made up of a unique return statement. */
286 DECL_INITIAL (fndecl) = make_node (BLOCK);
287 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
288 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
289 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
290 TREE_STATIC (fndecl) = 1;
292 /* Put it onto the list of size functions. */
293 vec_safe_push (size_functions, fndecl);
295 /* Replace the original expression with a call to the size function. */
296 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
299 /* Take, queue and compile all the size functions. It is essential that
300 the size functions be gimplified at the very end of the compilation
301 in order to guarantee transparent handling of self-referential sizes.
302 Otherwise the GENERIC inliner would not be able to inline them back
303 at each of their call sites, thus creating artificial non-constant
304 size expressions which would trigger nasty problems later on. */
306 void
307 finalize_size_functions (void)
309 unsigned int i;
310 tree fndecl;
312 for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++)
314 allocate_struct_function (fndecl, false);
315 set_cfun (NULL);
316 dump_function (TDI_original, fndecl);
317 gimplify_function_tree (fndecl);
318 dump_function (TDI_generic, fndecl);
319 cgraph_finalize_function (fndecl, false);
322 vec_free (size_functions);
325 /* Return the machine mode to use for a nonscalar of SIZE bits. The
326 mode must be in class MCLASS, and have exactly that many value bits;
327 it may have padding as well. If LIMIT is nonzero, modes of wider
328 than MAX_FIXED_MODE_SIZE will not be used. */
330 enum machine_mode
331 mode_for_size (unsigned int size, enum mode_class mclass, int limit)
333 enum machine_mode mode;
335 if (limit && size > MAX_FIXED_MODE_SIZE)
336 return BLKmode;
338 /* Get the first mode which has this size, in the specified class. */
339 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
340 mode = GET_MODE_WIDER_MODE (mode))
341 if (GET_MODE_PRECISION (mode) == size)
342 return mode;
344 return BLKmode;
347 /* Similar, except passed a tree node. */
349 enum machine_mode
350 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
352 unsigned HOST_WIDE_INT uhwi;
353 unsigned int ui;
355 if (!host_integerp (size, 1))
356 return BLKmode;
357 uhwi = tree_low_cst (size, 1);
358 ui = uhwi;
359 if (uhwi != ui)
360 return BLKmode;
361 return mode_for_size (ui, mclass, limit);
364 /* Similar, but never return BLKmode; return the narrowest mode that
365 contains at least the requested number of value bits. */
367 enum machine_mode
368 smallest_mode_for_size (unsigned int size, enum mode_class mclass)
370 enum machine_mode mode;
372 /* Get the first mode which has at least this size, in the
373 specified class. */
374 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
375 mode = GET_MODE_WIDER_MODE (mode))
376 if (GET_MODE_PRECISION (mode) >= size)
377 return mode;
379 gcc_unreachable ();
382 /* Find an integer mode of the exact same size, or BLKmode on failure. */
384 enum machine_mode
385 int_mode_for_mode (enum machine_mode mode)
387 switch (GET_MODE_CLASS (mode))
389 case MODE_INT:
390 case MODE_PARTIAL_INT:
391 break;
393 case MODE_COMPLEX_INT:
394 case MODE_COMPLEX_FLOAT:
395 case MODE_FLOAT:
396 case MODE_DECIMAL_FLOAT:
397 case MODE_VECTOR_INT:
398 case MODE_VECTOR_FLOAT:
399 case MODE_FRACT:
400 case MODE_ACCUM:
401 case MODE_UFRACT:
402 case MODE_UACCUM:
403 case MODE_VECTOR_FRACT:
404 case MODE_VECTOR_ACCUM:
405 case MODE_VECTOR_UFRACT:
406 case MODE_VECTOR_UACCUM:
407 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
408 break;
410 case MODE_RANDOM:
411 if (mode == BLKmode)
412 break;
414 /* ... fall through ... */
416 case MODE_CC:
417 default:
418 gcc_unreachable ();
421 return mode;
424 /* Find a mode that is suitable for representing a vector with
425 NUNITS elements of mode INNERMODE. Returns BLKmode if there
426 is no suitable mode. */
428 enum machine_mode
429 mode_for_vector (enum machine_mode innermode, unsigned nunits)
431 enum machine_mode mode;
433 /* First, look for a supported vector type. */
434 if (SCALAR_FLOAT_MODE_P (innermode))
435 mode = MIN_MODE_VECTOR_FLOAT;
436 else if (SCALAR_FRACT_MODE_P (innermode))
437 mode = MIN_MODE_VECTOR_FRACT;
438 else if (SCALAR_UFRACT_MODE_P (innermode))
439 mode = MIN_MODE_VECTOR_UFRACT;
440 else if (SCALAR_ACCUM_MODE_P (innermode))
441 mode = MIN_MODE_VECTOR_ACCUM;
442 else if (SCALAR_UACCUM_MODE_P (innermode))
443 mode = MIN_MODE_VECTOR_UACCUM;
444 else
445 mode = MIN_MODE_VECTOR_INT;
447 /* Do not check vector_mode_supported_p here. We'll do that
448 later in vector_type_mode. */
449 for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
450 if (GET_MODE_NUNITS (mode) == nunits
451 && GET_MODE_INNER (mode) == innermode)
452 break;
454 /* For integers, try mapping it to a same-sized scalar mode. */
455 if (mode == VOIDmode
456 && GET_MODE_CLASS (innermode) == MODE_INT)
457 mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
458 MODE_INT, 0);
460 if (mode == VOIDmode
461 || (GET_MODE_CLASS (mode) == MODE_INT
462 && !have_regs_of_mode[mode]))
463 return BLKmode;
465 return mode;
468 /* Return the alignment of MODE. This will be bounded by 1 and
469 BIGGEST_ALIGNMENT. */
471 unsigned int
472 get_mode_alignment (enum machine_mode mode)
474 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
477 /* Return the natural mode of an array, given that it is SIZE bytes in
478 total and has elements of type ELEM_TYPE. */
480 static enum machine_mode
481 mode_for_array (tree elem_type, tree size)
483 tree elem_size;
484 unsigned HOST_WIDE_INT int_size, int_elem_size;
485 bool limit_p;
487 /* One-element arrays get the component type's mode. */
488 elem_size = TYPE_SIZE (elem_type);
489 if (simple_cst_equal (size, elem_size))
490 return TYPE_MODE (elem_type);
492 limit_p = true;
493 if (host_integerp (size, 1) && host_integerp (elem_size, 1))
495 int_size = tree_low_cst (size, 1);
496 int_elem_size = tree_low_cst (elem_size, 1);
497 if (int_elem_size > 0
498 && int_size % int_elem_size == 0
499 && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
500 int_size / int_elem_size))
501 limit_p = false;
503 return mode_for_size_tree (size, MODE_INT, limit_p);
506 /* Subroutine of layout_decl: Force alignment required for the data type.
507 But if the decl itself wants greater alignment, don't override that. */
509 static inline void
510 do_type_align (tree type, tree decl)
512 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
514 DECL_ALIGN (decl) = TYPE_ALIGN (type);
515 if (TREE_CODE (decl) == FIELD_DECL)
516 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
520 /* Set the size, mode and alignment of a ..._DECL node.
521 TYPE_DECL does need this for C++.
522 Note that LABEL_DECL and CONST_DECL nodes do not need this,
523 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
524 Don't call layout_decl for them.
526 KNOWN_ALIGN is the amount of alignment we can assume this
527 decl has with no special effort. It is relevant only for FIELD_DECLs
528 and depends on the previous fields.
529 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
530 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
531 the record will be aligned to suit. */
533 void
534 layout_decl (tree decl, unsigned int known_align)
536 tree type = TREE_TYPE (decl);
537 enum tree_code code = TREE_CODE (decl);
538 rtx rtl = NULL_RTX;
539 location_t loc = DECL_SOURCE_LOCATION (decl);
541 if (code == CONST_DECL)
542 return;
544 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
545 || code == TYPE_DECL ||code == FIELD_DECL);
547 rtl = DECL_RTL_IF_SET (decl);
549 if (type == error_mark_node)
550 type = void_type_node;
552 /* Usually the size and mode come from the data type without change,
553 however, the front-end may set the explicit width of the field, so its
554 size may not be the same as the size of its type. This happens with
555 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
556 also happens with other fields. For example, the C++ front-end creates
557 zero-sized fields corresponding to empty base classes, and depends on
558 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
559 size in bytes from the size in bits. If we have already set the mode,
560 don't set it again since we can be called twice for FIELD_DECLs. */
562 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
563 if (DECL_MODE (decl) == VOIDmode)
564 DECL_MODE (decl) = TYPE_MODE (type);
566 if (DECL_SIZE (decl) == 0)
568 DECL_SIZE (decl) = TYPE_SIZE (type);
569 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
571 else if (DECL_SIZE_UNIT (decl) == 0)
572 DECL_SIZE_UNIT (decl)
573 = fold_convert_loc (loc, sizetype,
574 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
575 bitsize_unit_node));
577 if (code != FIELD_DECL)
578 /* For non-fields, update the alignment from the type. */
579 do_type_align (type, decl);
580 else
581 /* For fields, it's a bit more complicated... */
583 bool old_user_align = DECL_USER_ALIGN (decl);
584 bool zero_bitfield = false;
585 bool packed_p = DECL_PACKED (decl);
586 unsigned int mfa;
588 if (DECL_BIT_FIELD (decl))
590 DECL_BIT_FIELD_TYPE (decl) = type;
592 /* A zero-length bit-field affects the alignment of the next
593 field. In essence such bit-fields are not influenced by
594 any packing due to #pragma pack or attribute packed. */
595 if (integer_zerop (DECL_SIZE (decl))
596 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
598 zero_bitfield = true;
599 packed_p = false;
600 #ifdef PCC_BITFIELD_TYPE_MATTERS
601 if (PCC_BITFIELD_TYPE_MATTERS)
602 do_type_align (type, decl);
603 else
604 #endif
606 #ifdef EMPTY_FIELD_BOUNDARY
607 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
609 DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
610 DECL_USER_ALIGN (decl) = 0;
612 #endif
616 /* See if we can use an ordinary integer mode for a bit-field.
617 Conditions are: a fixed size that is correct for another mode,
618 occupying a complete byte or bytes on proper boundary,
619 and not -fstrict-volatile-bitfields. If the latter is set,
620 we unfortunately can't check TREE_THIS_VOLATILE, as a cast
621 may make a volatile object later. */
622 if (TYPE_SIZE (type) != 0
623 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
624 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
625 && flag_strict_volatile_bitfields <= 0)
627 enum machine_mode xmode
628 = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
629 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
631 if (xmode != BLKmode
632 && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
633 && (known_align == 0 || known_align >= xalign))
635 DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
636 DECL_MODE (decl) = xmode;
637 DECL_BIT_FIELD (decl) = 0;
641 /* Turn off DECL_BIT_FIELD if we won't need it set. */
642 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
643 && known_align >= TYPE_ALIGN (type)
644 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
645 DECL_BIT_FIELD (decl) = 0;
647 else if (packed_p && DECL_USER_ALIGN (decl))
648 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
649 round up; we'll reduce it again below. We want packing to
650 supersede USER_ALIGN inherited from the type, but defer to
651 alignment explicitly specified on the field decl. */;
652 else
653 do_type_align (type, decl);
655 /* If the field is packed and not explicitly aligned, give it the
656 minimum alignment. Note that do_type_align may set
657 DECL_USER_ALIGN, so we need to check old_user_align instead. */
658 if (packed_p
659 && !old_user_align)
660 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
662 if (! packed_p && ! DECL_USER_ALIGN (decl))
664 /* Some targets (i.e. i386, VMS) limit struct field alignment
665 to a lower boundary than alignment of variables unless
666 it was overridden by attribute aligned. */
667 #ifdef BIGGEST_FIELD_ALIGNMENT
668 DECL_ALIGN (decl)
669 = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
670 #endif
671 #ifdef ADJUST_FIELD_ALIGN
672 DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
673 #endif
676 if (zero_bitfield)
677 mfa = initial_max_fld_align * BITS_PER_UNIT;
678 else
679 mfa = maximum_field_alignment;
680 /* Should this be controlled by DECL_USER_ALIGN, too? */
681 if (mfa != 0)
682 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
685 /* Evaluate nonconstant size only once, either now or as soon as safe. */
686 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
687 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
688 if (DECL_SIZE_UNIT (decl) != 0
689 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
690 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
692 /* If requested, warn about definitions of large data objects. */
693 if (warn_larger_than
694 && (code == VAR_DECL || code == PARM_DECL)
695 && ! DECL_EXTERNAL (decl))
697 tree size = DECL_SIZE_UNIT (decl);
699 if (size != 0 && TREE_CODE (size) == INTEGER_CST
700 && compare_tree_int (size, larger_than_size) > 0)
702 int size_as_int = TREE_INT_CST_LOW (size);
704 if (compare_tree_int (size, size_as_int) == 0)
705 warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
706 else
707 warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
708 decl, larger_than_size);
712 /* If the RTL was already set, update its mode and mem attributes. */
713 if (rtl)
715 PUT_MODE (rtl, DECL_MODE (decl));
716 SET_DECL_RTL (decl, 0);
717 set_mem_attributes (rtl, decl, 1);
718 SET_DECL_RTL (decl, rtl);
722 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
723 a previous call to layout_decl and calls it again. */
725 void
726 relayout_decl (tree decl)
728 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
729 DECL_MODE (decl) = VOIDmode;
730 if (!DECL_USER_ALIGN (decl))
731 DECL_ALIGN (decl) = 0;
732 SET_DECL_RTL (decl, 0);
734 layout_decl (decl, 0);
737 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
738 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
739 is to be passed to all other layout functions for this record. It is the
740 responsibility of the caller to call `free' for the storage returned.
741 Note that garbage collection is not permitted until we finish laying
742 out the record. */
744 record_layout_info
745 start_record_layout (tree t)
747 record_layout_info rli = XNEW (struct record_layout_info_s);
749 rli->t = t;
751 /* If the type has a minimum specified alignment (via an attribute
752 declaration, for example) use it -- otherwise, start with a
753 one-byte alignment. */
754 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
755 rli->unpacked_align = rli->record_align;
756 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
758 #ifdef STRUCTURE_SIZE_BOUNDARY
759 /* Packed structures don't need to have minimum size. */
760 if (! TYPE_PACKED (t))
762 unsigned tmp;
764 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
765 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
766 if (maximum_field_alignment != 0)
767 tmp = MIN (tmp, maximum_field_alignment);
768 rli->record_align = MAX (rli->record_align, tmp);
770 #endif
772 rli->offset = size_zero_node;
773 rli->bitpos = bitsize_zero_node;
774 rli->prev_field = 0;
775 rli->pending_statics = 0;
776 rli->packed_maybe_necessary = 0;
777 rli->remaining_in_alignment = 0;
779 return rli;
782 /* Return the combined bit position for the byte offset OFFSET and the
783 bit position BITPOS.
785 These functions operate on byte and bit positions present in FIELD_DECLs
786 and assume that these expressions result in no (intermediate) overflow.
787 This assumption is necessary to fold the expressions as much as possible,
788 so as to avoid creating artificially variable-sized types in languages
789 supporting variable-sized types like Ada. */
791 tree
792 bit_from_pos (tree offset, tree bitpos)
794 if (TREE_CODE (offset) == PLUS_EXPR)
795 offset = size_binop (PLUS_EXPR,
796 fold_convert (bitsizetype, TREE_OPERAND (offset, 0)),
797 fold_convert (bitsizetype, TREE_OPERAND (offset, 1)));
798 else
799 offset = fold_convert (bitsizetype, offset);
800 return size_binop (PLUS_EXPR, bitpos,
801 size_binop (MULT_EXPR, offset, bitsize_unit_node));
804 /* Return the combined truncated byte position for the byte offset OFFSET and
805 the bit position BITPOS. */
807 tree
808 byte_from_pos (tree offset, tree bitpos)
810 tree bytepos;
811 if (TREE_CODE (bitpos) == MULT_EXPR
812 && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
813 bytepos = TREE_OPERAND (bitpos, 0);
814 else
815 bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
816 return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
819 /* Split the bit position POS into a byte offset *POFFSET and a bit
820 position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */
822 void
823 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
824 tree pos)
826 tree toff_align = bitsize_int (off_align);
827 if (TREE_CODE (pos) == MULT_EXPR
828 && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
830 *poffset = size_binop (MULT_EXPR,
831 fold_convert (sizetype, TREE_OPERAND (pos, 0)),
832 size_int (off_align / BITS_PER_UNIT));
833 *pbitpos = bitsize_zero_node;
835 else
837 *poffset = size_binop (MULT_EXPR,
838 fold_convert (sizetype,
839 size_binop (FLOOR_DIV_EXPR, pos,
840 toff_align)),
841 size_int (off_align / BITS_PER_UNIT));
842 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
846 /* Given a pointer to bit and byte offsets and an offset alignment,
847 normalize the offsets so they are within the alignment. */
849 void
850 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
852 /* If the bit position is now larger than it should be, adjust it
853 downwards. */
854 if (compare_tree_int (*pbitpos, off_align) >= 0)
856 tree offset, bitpos;
857 pos_from_bit (&offset, &bitpos, off_align, *pbitpos);
858 *poffset = size_binop (PLUS_EXPR, *poffset, offset);
859 *pbitpos = bitpos;
863 /* Print debugging information about the information in RLI. */
865 DEBUG_FUNCTION void
866 debug_rli (record_layout_info rli)
868 print_node_brief (stderr, "type", rli->t, 0);
869 print_node_brief (stderr, "\noffset", rli->offset, 0);
870 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
872 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
873 rli->record_align, rli->unpacked_align,
874 rli->offset_align);
876 /* The ms_struct code is the only that uses this. */
877 if (targetm.ms_bitfield_layout_p (rli->t))
878 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
880 if (rli->packed_maybe_necessary)
881 fprintf (stderr, "packed may be necessary\n");
883 if (!vec_safe_is_empty (rli->pending_statics))
885 fprintf (stderr, "pending statics:\n");
886 debug_vec_tree (rli->pending_statics);
890 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
891 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
893 void
894 normalize_rli (record_layout_info rli)
896 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
899 /* Returns the size in bytes allocated so far. */
901 tree
902 rli_size_unit_so_far (record_layout_info rli)
904 return byte_from_pos (rli->offset, rli->bitpos);
907 /* Returns the size in bits allocated so far. */
909 tree
910 rli_size_so_far (record_layout_info rli)
912 return bit_from_pos (rli->offset, rli->bitpos);
915 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
916 the next available location within the record is given by KNOWN_ALIGN.
917 Update the variable alignment fields in RLI, and return the alignment
918 to give the FIELD. */
920 unsigned int
921 update_alignment_for_field (record_layout_info rli, tree field,
922 unsigned int known_align)
924 /* The alignment required for FIELD. */
925 unsigned int desired_align;
926 /* The type of this field. */
927 tree type = TREE_TYPE (field);
928 /* True if the field was explicitly aligned by the user. */
929 bool user_align;
930 bool is_bitfield;
932 /* Do not attempt to align an ERROR_MARK node */
933 if (TREE_CODE (type) == ERROR_MARK)
934 return 0;
936 /* Lay out the field so we know what alignment it needs. */
937 layout_decl (field, known_align);
938 desired_align = DECL_ALIGN (field);
939 user_align = DECL_USER_ALIGN (field);
941 is_bitfield = (type != error_mark_node
942 && DECL_BIT_FIELD_TYPE (field)
943 && ! integer_zerop (TYPE_SIZE (type)));
945 /* Record must have at least as much alignment as any field.
946 Otherwise, the alignment of the field within the record is
947 meaningless. */
948 if (targetm.ms_bitfield_layout_p (rli->t))
950 /* Here, the alignment of the underlying type of a bitfield can
951 affect the alignment of a record; even a zero-sized field
952 can do this. The alignment should be to the alignment of
953 the type, except that for zero-size bitfields this only
954 applies if there was an immediately prior, nonzero-size
955 bitfield. (That's the way it is, experimentally.) */
956 if ((!is_bitfield && !DECL_PACKED (field))
957 || ((DECL_SIZE (field) == NULL_TREE
958 || !integer_zerop (DECL_SIZE (field)))
959 ? !DECL_PACKED (field)
960 : (rli->prev_field
961 && DECL_BIT_FIELD_TYPE (rli->prev_field)
962 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
964 unsigned int type_align = TYPE_ALIGN (type);
965 type_align = MAX (type_align, desired_align);
966 if (maximum_field_alignment != 0)
967 type_align = MIN (type_align, maximum_field_alignment);
968 rli->record_align = MAX (rli->record_align, type_align);
969 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
972 #ifdef PCC_BITFIELD_TYPE_MATTERS
973 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
975 /* Named bit-fields cause the entire structure to have the
976 alignment implied by their type. Some targets also apply the same
977 rules to unnamed bitfields. */
978 if (DECL_NAME (field) != 0
979 || targetm.align_anon_bitfield ())
981 unsigned int type_align = TYPE_ALIGN (type);
983 #ifdef ADJUST_FIELD_ALIGN
984 if (! TYPE_USER_ALIGN (type))
985 type_align = ADJUST_FIELD_ALIGN (field, type_align);
986 #endif
988 /* Targets might chose to handle unnamed and hence possibly
989 zero-width bitfield. Those are not influenced by #pragmas
990 or packed attributes. */
991 if (integer_zerop (DECL_SIZE (field)))
993 if (initial_max_fld_align)
994 type_align = MIN (type_align,
995 initial_max_fld_align * BITS_PER_UNIT);
997 else if (maximum_field_alignment != 0)
998 type_align = MIN (type_align, maximum_field_alignment);
999 else if (DECL_PACKED (field))
1000 type_align = MIN (type_align, BITS_PER_UNIT);
1002 /* The alignment of the record is increased to the maximum
1003 of the current alignment, the alignment indicated on the
1004 field (i.e., the alignment specified by an __aligned__
1005 attribute), and the alignment indicated by the type of
1006 the field. */
1007 rli->record_align = MAX (rli->record_align, desired_align);
1008 rli->record_align = MAX (rli->record_align, type_align);
1010 if (warn_packed)
1011 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1012 user_align |= TYPE_USER_ALIGN (type);
1015 #endif
1016 else
1018 rli->record_align = MAX (rli->record_align, desired_align);
1019 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1022 TYPE_USER_ALIGN (rli->t) |= user_align;
1024 return desired_align;
1027 /* Called from place_field to handle unions. */
1029 static void
1030 place_union_field (record_layout_info rli, tree field)
1032 update_alignment_for_field (rli, field, /*known_align=*/0);
1034 DECL_FIELD_OFFSET (field) = size_zero_node;
1035 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1036 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1038 /* If this is an ERROR_MARK return *after* having set the
1039 field at the start of the union. This helps when parsing
1040 invalid fields. */
1041 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1042 return;
1044 /* We assume the union's size will be a multiple of a byte so we don't
1045 bother with BITPOS. */
1046 if (TREE_CODE (rli->t) == UNION_TYPE)
1047 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1048 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1049 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1050 DECL_SIZE_UNIT (field), rli->offset);
1053 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
1054 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1055 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1056 units of alignment than the underlying TYPE. */
1057 static int
1058 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1059 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1061 /* Note that the calculation of OFFSET might overflow; we calculate it so
1062 that we still get the right result as long as ALIGN is a power of two. */
1063 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1065 offset = offset % align;
1066 return ((offset + size + align - 1) / align
1067 > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
1068 / align));
1070 #endif
1072 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
1073 is a FIELD_DECL to be added after those fields already present in
1074 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1075 callers that desire that behavior must manually perform that step.) */
1077 void
1078 place_field (record_layout_info rli, tree field)
1080 /* The alignment required for FIELD. */
1081 unsigned int desired_align;
1082 /* The alignment FIELD would have if we just dropped it into the
1083 record as it presently stands. */
1084 unsigned int known_align;
1085 unsigned int actual_align;
1086 /* The type of this field. */
1087 tree type = TREE_TYPE (field);
1089 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1091 /* If FIELD is static, then treat it like a separate variable, not
1092 really like a structure field. If it is a FUNCTION_DECL, it's a
1093 method. In both cases, all we do is lay out the decl, and we do
1094 it *after* the record is laid out. */
1095 if (TREE_CODE (field) == VAR_DECL)
1097 vec_safe_push (rli->pending_statics, field);
1098 return;
1101 /* Enumerators and enum types which are local to this class need not
1102 be laid out. Likewise for initialized constant fields. */
1103 else if (TREE_CODE (field) != FIELD_DECL)
1104 return;
1106 /* Unions are laid out very differently than records, so split
1107 that code off to another function. */
1108 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1110 place_union_field (rli, field);
1111 return;
1114 else if (TREE_CODE (type) == ERROR_MARK)
1116 /* Place this field at the current allocation position, so we
1117 maintain monotonicity. */
1118 DECL_FIELD_OFFSET (field) = rli->offset;
1119 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1120 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1121 return;
1124 /* Work out the known alignment so far. Note that A & (-A) is the
1125 value of the least-significant bit in A that is one. */
1126 if (! integer_zerop (rli->bitpos))
1127 known_align = (tree_low_cst (rli->bitpos, 1)
1128 & - tree_low_cst (rli->bitpos, 1));
1129 else if (integer_zerop (rli->offset))
1130 known_align = 0;
1131 else if (host_integerp (rli->offset, 1))
1132 known_align = (BITS_PER_UNIT
1133 * (tree_low_cst (rli->offset, 1)
1134 & - tree_low_cst (rli->offset, 1)));
1135 else
1136 known_align = rli->offset_align;
1138 desired_align = update_alignment_for_field (rli, field, known_align);
1139 if (known_align == 0)
1140 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1142 if (warn_packed && DECL_PACKED (field))
1144 if (known_align >= TYPE_ALIGN (type))
1146 if (TYPE_ALIGN (type) > desired_align)
1148 if (STRICT_ALIGNMENT)
1149 warning (OPT_Wattributes, "packed attribute causes "
1150 "inefficient alignment for %q+D", field);
1151 /* Don't warn if DECL_PACKED was set by the type. */
1152 else if (!TYPE_PACKED (rli->t))
1153 warning (OPT_Wattributes, "packed attribute is "
1154 "unnecessary for %q+D", field);
1157 else
1158 rli->packed_maybe_necessary = 1;
1161 /* Does this field automatically have alignment it needs by virtue
1162 of the fields that precede it and the record's own alignment? */
1163 if (known_align < desired_align)
1165 /* No, we need to skip space before this field.
1166 Bump the cumulative size to multiple of field alignment. */
1168 if (!targetm.ms_bitfield_layout_p (rli->t)
1169 && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1170 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1172 /* If the alignment is still within offset_align, just align
1173 the bit position. */
1174 if (desired_align < rli->offset_align)
1175 rli->bitpos = round_up (rli->bitpos, desired_align);
1176 else
1178 /* First adjust OFFSET by the partial bits, then align. */
1179 rli->offset
1180 = size_binop (PLUS_EXPR, rli->offset,
1181 fold_convert (sizetype,
1182 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1183 bitsize_unit_node)));
1184 rli->bitpos = bitsize_zero_node;
1186 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1189 if (! TREE_CONSTANT (rli->offset))
1190 rli->offset_align = desired_align;
1191 if (targetm.ms_bitfield_layout_p (rli->t))
1192 rli->prev_field = NULL;
1195 /* Handle compatibility with PCC. Note that if the record has any
1196 variable-sized fields, we need not worry about compatibility. */
1197 #ifdef PCC_BITFIELD_TYPE_MATTERS
1198 if (PCC_BITFIELD_TYPE_MATTERS
1199 && ! targetm.ms_bitfield_layout_p (rli->t)
1200 && TREE_CODE (field) == FIELD_DECL
1201 && type != error_mark_node
1202 && DECL_BIT_FIELD (field)
1203 && (! DECL_PACKED (field)
1204 /* Enter for these packed fields only to issue a warning. */
1205 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1206 && maximum_field_alignment == 0
1207 && ! integer_zerop (DECL_SIZE (field))
1208 && host_integerp (DECL_SIZE (field), 1)
1209 && host_integerp (rli->offset, 1)
1210 && host_integerp (TYPE_SIZE (type), 1))
1212 unsigned int type_align = TYPE_ALIGN (type);
1213 tree dsize = DECL_SIZE (field);
1214 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1215 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1216 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1218 #ifdef ADJUST_FIELD_ALIGN
1219 if (! TYPE_USER_ALIGN (type))
1220 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1221 #endif
1223 /* A bit field may not span more units of alignment of its type
1224 than its type itself. Advance to next boundary if necessary. */
1225 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1227 if (DECL_PACKED (field))
1229 if (warn_packed_bitfield_compat == 1)
1230 inform
1231 (input_location,
1232 "offset of packed bit-field %qD has changed in GCC 4.4",
1233 field);
1235 else
1236 rli->bitpos = round_up (rli->bitpos, type_align);
1239 if (! DECL_PACKED (field))
1240 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1242 #endif
1244 #ifdef BITFIELD_NBYTES_LIMITED
1245 if (BITFIELD_NBYTES_LIMITED
1246 && ! targetm.ms_bitfield_layout_p (rli->t)
1247 && TREE_CODE (field) == FIELD_DECL
1248 && type != error_mark_node
1249 && DECL_BIT_FIELD_TYPE (field)
1250 && ! DECL_PACKED (field)
1251 && ! integer_zerop (DECL_SIZE (field))
1252 && host_integerp (DECL_SIZE (field), 1)
1253 && host_integerp (rli->offset, 1)
1254 && host_integerp (TYPE_SIZE (type), 1))
1256 unsigned int type_align = TYPE_ALIGN (type);
1257 tree dsize = DECL_SIZE (field);
1258 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1259 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1260 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1262 #ifdef ADJUST_FIELD_ALIGN
1263 if (! TYPE_USER_ALIGN (type))
1264 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1265 #endif
1267 if (maximum_field_alignment != 0)
1268 type_align = MIN (type_align, maximum_field_alignment);
1269 /* ??? This test is opposite the test in the containing if
1270 statement, so this code is unreachable currently. */
1271 else if (DECL_PACKED (field))
1272 type_align = MIN (type_align, BITS_PER_UNIT);
1274 /* A bit field may not span the unit of alignment of its type.
1275 Advance to next boundary if necessary. */
1276 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1277 rli->bitpos = round_up (rli->bitpos, type_align);
1279 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1281 #endif
1283 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1284 A subtlety:
1285 When a bit field is inserted into a packed record, the whole
1286 size of the underlying type is used by one or more same-size
1287 adjacent bitfields. (That is, if its long:3, 32 bits is
1288 used in the record, and any additional adjacent long bitfields are
1289 packed into the same chunk of 32 bits. However, if the size
1290 changes, a new field of that size is allocated.) In an unpacked
1291 record, this is the same as using alignment, but not equivalent
1292 when packing.
1294 Note: for compatibility, we use the type size, not the type alignment
1295 to determine alignment, since that matches the documentation */
1297 if (targetm.ms_bitfield_layout_p (rli->t))
1299 tree prev_saved = rli->prev_field;
1300 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1302 /* This is a bitfield if it exists. */
1303 if (rli->prev_field)
1305 /* If both are bitfields, nonzero, and the same size, this is
1306 the middle of a run. Zero declared size fields are special
1307 and handled as "end of run". (Note: it's nonzero declared
1308 size, but equal type sizes!) (Since we know that both
1309 the current and previous fields are bitfields by the
1310 time we check it, DECL_SIZE must be present for both.) */
1311 if (DECL_BIT_FIELD_TYPE (field)
1312 && !integer_zerop (DECL_SIZE (field))
1313 && !integer_zerop (DECL_SIZE (rli->prev_field))
1314 && host_integerp (DECL_SIZE (rli->prev_field), 0)
1315 && host_integerp (TYPE_SIZE (type), 0)
1316 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1318 /* We're in the middle of a run of equal type size fields; make
1319 sure we realign if we run out of bits. (Not decl size,
1320 type size!) */
1321 HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1323 if (rli->remaining_in_alignment < bitsize)
1325 HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
1327 /* out of bits; bump up to next 'word'. */
1328 rli->bitpos
1329 = size_binop (PLUS_EXPR, rli->bitpos,
1330 bitsize_int (rli->remaining_in_alignment));
1331 rli->prev_field = field;
1332 if (typesize < bitsize)
1333 rli->remaining_in_alignment = 0;
1334 else
1335 rli->remaining_in_alignment = typesize - bitsize;
1337 else
1338 rli->remaining_in_alignment -= bitsize;
1340 else
1342 /* End of a run: if leaving a run of bitfields of the same type
1343 size, we have to "use up" the rest of the bits of the type
1344 size.
1346 Compute the new position as the sum of the size for the prior
1347 type and where we first started working on that type.
1348 Note: since the beginning of the field was aligned then
1349 of course the end will be too. No round needed. */
1351 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1353 rli->bitpos
1354 = size_binop (PLUS_EXPR, rli->bitpos,
1355 bitsize_int (rli->remaining_in_alignment));
1357 else
1358 /* We "use up" size zero fields; the code below should behave
1359 as if the prior field was not a bitfield. */
1360 prev_saved = NULL;
1362 /* Cause a new bitfield to be captured, either this time (if
1363 currently a bitfield) or next time we see one. */
1364 if (!DECL_BIT_FIELD_TYPE(field)
1365 || integer_zerop (DECL_SIZE (field)))
1366 rli->prev_field = NULL;
1369 normalize_rli (rli);
1372 /* If we're starting a new run of same type size bitfields
1373 (or a run of non-bitfields), set up the "first of the run"
1374 fields.
1376 That is, if the current field is not a bitfield, or if there
1377 was a prior bitfield the type sizes differ, or if there wasn't
1378 a prior bitfield the size of the current field is nonzero.
1380 Note: we must be sure to test ONLY the type size if there was
1381 a prior bitfield and ONLY for the current field being zero if
1382 there wasn't. */
1384 if (!DECL_BIT_FIELD_TYPE (field)
1385 || (prev_saved != NULL
1386 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1387 : !integer_zerop (DECL_SIZE (field)) ))
1389 /* Never smaller than a byte for compatibility. */
1390 unsigned int type_align = BITS_PER_UNIT;
1392 /* (When not a bitfield), we could be seeing a flex array (with
1393 no DECL_SIZE). Since we won't be using remaining_in_alignment
1394 until we see a bitfield (and come by here again) we just skip
1395 calculating it. */
1396 if (DECL_SIZE (field) != NULL
1397 && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
1398 && host_integerp (DECL_SIZE (field), 1))
1400 unsigned HOST_WIDE_INT bitsize
1401 = tree_low_cst (DECL_SIZE (field), 1);
1402 unsigned HOST_WIDE_INT typesize
1403 = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
1405 if (typesize < bitsize)
1406 rli->remaining_in_alignment = 0;
1407 else
1408 rli->remaining_in_alignment = typesize - bitsize;
1411 /* Now align (conventionally) for the new type. */
1412 type_align = TYPE_ALIGN (TREE_TYPE (field));
1414 if (maximum_field_alignment != 0)
1415 type_align = MIN (type_align, maximum_field_alignment);
1417 rli->bitpos = round_up (rli->bitpos, type_align);
1419 /* If we really aligned, don't allow subsequent bitfields
1420 to undo that. */
1421 rli->prev_field = NULL;
1425 /* Offset so far becomes the position of this field after normalizing. */
1426 normalize_rli (rli);
1427 DECL_FIELD_OFFSET (field) = rli->offset;
1428 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1429 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1431 /* If this field ended up more aligned than we thought it would be (we
1432 approximate this by seeing if its position changed), lay out the field
1433 again; perhaps we can use an integral mode for it now. */
1434 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1435 actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1436 & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1437 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1438 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1439 else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1440 actual_align = (BITS_PER_UNIT
1441 * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1442 & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1443 else
1444 actual_align = DECL_OFFSET_ALIGN (field);
1445 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1446 store / extract bit field operations will check the alignment of the
1447 record against the mode of bit fields. */
1449 if (known_align != actual_align)
1450 layout_decl (field, actual_align);
1452 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1453 rli->prev_field = field;
1455 /* Now add size of this field to the size of the record. If the size is
1456 not constant, treat the field as being a multiple of bytes and just
1457 adjust the offset, resetting the bit position. Otherwise, apportion the
1458 size amongst the bit position and offset. First handle the case of an
1459 unspecified size, which can happen when we have an invalid nested struct
1460 definition, such as struct j { struct j { int i; } }. The error message
1461 is printed in finish_struct. */
1462 if (DECL_SIZE (field) == 0)
1463 /* Do nothing. */;
1464 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1465 || TREE_OVERFLOW (DECL_SIZE (field)))
1467 rli->offset
1468 = size_binop (PLUS_EXPR, rli->offset,
1469 fold_convert (sizetype,
1470 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1471 bitsize_unit_node)));
1472 rli->offset
1473 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1474 rli->bitpos = bitsize_zero_node;
1475 rli->offset_align = MIN (rli->offset_align, desired_align);
1477 else if (targetm.ms_bitfield_layout_p (rli->t))
1479 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1481 /* If we ended a bitfield before the full length of the type then
1482 pad the struct out to the full length of the last type. */
1483 if ((DECL_CHAIN (field) == NULL
1484 || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
1485 && DECL_BIT_FIELD_TYPE (field)
1486 && !integer_zerop (DECL_SIZE (field)))
1487 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1488 bitsize_int (rli->remaining_in_alignment));
1490 normalize_rli (rli);
1492 else
1494 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1495 normalize_rli (rli);
1499 /* Assuming that all the fields have been laid out, this function uses
1500 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1501 indicated by RLI. */
1503 static void
1504 finalize_record_size (record_layout_info rli)
1506 tree unpadded_size, unpadded_size_unit;
1508 /* Now we want just byte and bit offsets, so set the offset alignment
1509 to be a byte and then normalize. */
1510 rli->offset_align = BITS_PER_UNIT;
1511 normalize_rli (rli);
1513 /* Determine the desired alignment. */
1514 #ifdef ROUND_TYPE_ALIGN
1515 TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1516 rli->record_align);
1517 #else
1518 TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1519 #endif
1521 /* Compute the size so far. Be sure to allow for extra bits in the
1522 size in bytes. We have guaranteed above that it will be no more
1523 than a single byte. */
1524 unpadded_size = rli_size_so_far (rli);
1525 unpadded_size_unit = rli_size_unit_so_far (rli);
1526 if (! integer_zerop (rli->bitpos))
1527 unpadded_size_unit
1528 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1530 /* Round the size up to be a multiple of the required alignment. */
1531 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1532 TYPE_SIZE_UNIT (rli->t)
1533 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1535 if (TREE_CONSTANT (unpadded_size)
1536 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1537 && input_location != BUILTINS_LOCATION)
1538 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1540 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1541 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1542 && TREE_CONSTANT (unpadded_size))
1544 tree unpacked_size;
1546 #ifdef ROUND_TYPE_ALIGN
1547 rli->unpacked_align
1548 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1549 #else
1550 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1551 #endif
1553 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1554 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1556 if (TYPE_NAME (rli->t))
1558 tree name;
1560 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1561 name = TYPE_NAME (rli->t);
1562 else
1563 name = DECL_NAME (TYPE_NAME (rli->t));
1565 if (STRICT_ALIGNMENT)
1566 warning (OPT_Wpacked, "packed attribute causes inefficient "
1567 "alignment for %qE", name);
1568 else
1569 warning (OPT_Wpacked,
1570 "packed attribute is unnecessary for %qE", name);
1572 else
1574 if (STRICT_ALIGNMENT)
1575 warning (OPT_Wpacked,
1576 "packed attribute causes inefficient alignment");
1577 else
1578 warning (OPT_Wpacked, "packed attribute is unnecessary");
1584 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1586 void
1587 compute_record_mode (tree type)
1589 tree field;
1590 enum machine_mode mode = VOIDmode;
1592 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1593 However, if possible, we use a mode that fits in a register
1594 instead, in order to allow for better optimization down the
1595 line. */
1596 SET_TYPE_MODE (type, BLKmode);
1598 if (! host_integerp (TYPE_SIZE (type), 1))
1599 return;
1601 /* A record which has any BLKmode members must itself be
1602 BLKmode; it can't go in a register. Unless the member is
1603 BLKmode only because it isn't aligned. */
1604 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1606 if (TREE_CODE (field) != FIELD_DECL)
1607 continue;
1609 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1610 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1611 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1612 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1613 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1614 || ! host_integerp (bit_position (field), 1)
1615 || DECL_SIZE (field) == 0
1616 || ! host_integerp (DECL_SIZE (field), 1))
1617 return;
1619 /* If this field is the whole struct, remember its mode so
1620 that, say, we can put a double in a class into a DF
1621 register instead of forcing it to live in the stack. */
1622 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1623 mode = DECL_MODE (field);
1625 /* With some targets, it is sub-optimal to access an aligned
1626 BLKmode structure as a scalar. */
1627 if (targetm.member_type_forces_blk (field, mode))
1628 return;
1631 /* If we only have one real field; use its mode if that mode's size
1632 matches the type's size. This only applies to RECORD_TYPE. This
1633 does not apply to unions. */
1634 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1635 && host_integerp (TYPE_SIZE (type), 1)
1636 && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1637 SET_TYPE_MODE (type, mode);
1638 else
1639 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1641 /* If structure's known alignment is less than what the scalar
1642 mode would need, and it matters, then stick with BLKmode. */
1643 if (TYPE_MODE (type) != BLKmode
1644 && STRICT_ALIGNMENT
1645 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1646 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1648 /* If this is the only reason this type is BLKmode, then
1649 don't force containing types to be BLKmode. */
1650 TYPE_NO_FORCE_BLK (type) = 1;
1651 SET_TYPE_MODE (type, BLKmode);
1655 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1656 out. */
1658 static void
1659 finalize_type_size (tree type)
1661 /* Normally, use the alignment corresponding to the mode chosen.
1662 However, where strict alignment is not required, avoid
1663 over-aligning structures, since most compilers do not do this
1664 alignment. */
1666 if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1667 && (STRICT_ALIGNMENT
1668 || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1669 && TREE_CODE (type) != QUAL_UNION_TYPE
1670 && TREE_CODE (type) != ARRAY_TYPE)))
1672 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1674 /* Don't override a larger alignment requirement coming from a user
1675 alignment of one of the fields. */
1676 if (mode_align >= TYPE_ALIGN (type))
1678 TYPE_ALIGN (type) = mode_align;
1679 TYPE_USER_ALIGN (type) = 0;
1683 /* Do machine-dependent extra alignment. */
1684 #ifdef ROUND_TYPE_ALIGN
1685 TYPE_ALIGN (type)
1686 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1687 #endif
1689 /* If we failed to find a simple way to calculate the unit size
1690 of the type, find it by division. */
1691 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1692 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1693 result will fit in sizetype. We will get more efficient code using
1694 sizetype, so we force a conversion. */
1695 TYPE_SIZE_UNIT (type)
1696 = fold_convert (sizetype,
1697 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1698 bitsize_unit_node));
1700 if (TYPE_SIZE (type) != 0)
1702 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1703 TYPE_SIZE_UNIT (type)
1704 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1707 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1708 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1709 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1710 if (TYPE_SIZE_UNIT (type) != 0
1711 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1712 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1714 /* Also layout any other variants of the type. */
1715 if (TYPE_NEXT_VARIANT (type)
1716 || type != TYPE_MAIN_VARIANT (type))
1718 tree variant;
1719 /* Record layout info of this variant. */
1720 tree size = TYPE_SIZE (type);
1721 tree size_unit = TYPE_SIZE_UNIT (type);
1722 unsigned int align = TYPE_ALIGN (type);
1723 unsigned int user_align = TYPE_USER_ALIGN (type);
1724 enum machine_mode mode = TYPE_MODE (type);
1726 /* Copy it into all variants. */
1727 for (variant = TYPE_MAIN_VARIANT (type);
1728 variant != 0;
1729 variant = TYPE_NEXT_VARIANT (variant))
1731 TYPE_SIZE (variant) = size;
1732 TYPE_SIZE_UNIT (variant) = size_unit;
1733 TYPE_ALIGN (variant) = align;
1734 TYPE_USER_ALIGN (variant) = user_align;
1735 SET_TYPE_MODE (variant, mode);
1740 /* Return a new underlying object for a bitfield started with FIELD. */
1742 static tree
1743 start_bitfield_representative (tree field)
1745 tree repr = make_node (FIELD_DECL);
1746 DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
1747 /* Force the representative to begin at a BITS_PER_UNIT aligned
1748 boundary - C++ may use tail-padding of a base object to
1749 continue packing bits so the bitfield region does not start
1750 at bit zero (see g++.dg/abi/bitfield5.C for example).
1751 Unallocated bits may happen for other reasons as well,
1752 for example Ada which allows explicit bit-granular structure layout. */
1753 DECL_FIELD_BIT_OFFSET (repr)
1754 = size_binop (BIT_AND_EXPR,
1755 DECL_FIELD_BIT_OFFSET (field),
1756 bitsize_int (~(BITS_PER_UNIT - 1)));
1757 SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
1758 DECL_SIZE (repr) = DECL_SIZE (field);
1759 DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
1760 DECL_PACKED (repr) = DECL_PACKED (field);
1761 DECL_CONTEXT (repr) = DECL_CONTEXT (field);
1762 return repr;
1765 /* Finish up a bitfield group that was started by creating the underlying
1766 object REPR with the last field in the bitfield group FIELD. */
1768 static void
1769 finish_bitfield_representative (tree repr, tree field)
1771 unsigned HOST_WIDE_INT bitsize, maxbitsize;
1772 enum machine_mode mode;
1773 tree nextf, size;
1775 size = size_diffop (DECL_FIELD_OFFSET (field),
1776 DECL_FIELD_OFFSET (repr));
1777 gcc_assert (host_integerp (size, 1));
1778 bitsize = (tree_low_cst (size, 1) * BITS_PER_UNIT
1779 + tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1780 - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)
1781 + tree_low_cst (DECL_SIZE (field), 1));
1783 /* Round up bitsize to multiples of BITS_PER_UNIT. */
1784 bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
1786 /* Now nothing tells us how to pad out bitsize ... */
1787 nextf = DECL_CHAIN (field);
1788 while (nextf && TREE_CODE (nextf) != FIELD_DECL)
1789 nextf = DECL_CHAIN (nextf);
1790 if (nextf)
1792 tree maxsize;
1793 /* If there was an error, the field may be not laid out
1794 correctly. Don't bother to do anything. */
1795 if (TREE_TYPE (nextf) == error_mark_node)
1796 return;
1797 maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
1798 DECL_FIELD_OFFSET (repr));
1799 if (host_integerp (maxsize, 1))
1801 maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
1802 + tree_low_cst (DECL_FIELD_BIT_OFFSET (nextf), 1)
1803 - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
1804 /* If the group ends within a bitfield nextf does not need to be
1805 aligned to BITS_PER_UNIT. Thus round up. */
1806 maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
1808 else
1809 maxbitsize = bitsize;
1811 else
1813 /* ??? If you consider that tail-padding of this struct might be
1814 re-used when deriving from it we cannot really do the following
1815 and thus need to set maxsize to bitsize? Also we cannot
1816 generally rely on maxsize to fold to an integer constant, so
1817 use bitsize as fallback for this case. */
1818 tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)),
1819 DECL_FIELD_OFFSET (repr));
1820 if (host_integerp (maxsize, 1))
1821 maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
1822 - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
1823 else
1824 maxbitsize = bitsize;
1827 /* Only if we don't artificially break up the representative in
1828 the middle of a large bitfield with different possibly
1829 overlapping representatives. And all representatives start
1830 at byte offset. */
1831 gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
1833 /* Find the smallest nice mode to use. */
1834 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1835 mode = GET_MODE_WIDER_MODE (mode))
1836 if (GET_MODE_BITSIZE (mode) >= bitsize)
1837 break;
1838 if (mode != VOIDmode
1839 && (GET_MODE_BITSIZE (mode) > maxbitsize
1840 || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE))
1841 mode = VOIDmode;
1843 if (mode == VOIDmode)
1845 /* We really want a BLKmode representative only as a last resort,
1846 considering the member b in
1847 struct { int a : 7; int b : 17; int c; } __attribute__((packed));
1848 Otherwise we simply want to split the representative up
1849 allowing for overlaps within the bitfield region as required for
1850 struct { int a : 7; int b : 7;
1851 int c : 10; int d; } __attribute__((packed));
1852 [0, 15] HImode for a and b, [8, 23] HImode for c. */
1853 DECL_SIZE (repr) = bitsize_int (bitsize);
1854 DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
1855 DECL_MODE (repr) = BLKmode;
1856 TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
1857 bitsize / BITS_PER_UNIT);
1859 else
1861 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
1862 DECL_SIZE (repr) = bitsize_int (modesize);
1863 DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
1864 DECL_MODE (repr) = mode;
1865 TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
1868 /* Remember whether the bitfield group is at the end of the
1869 structure or not. */
1870 DECL_CHAIN (repr) = nextf;
1873 /* Compute and set FIELD_DECLs for the underlying objects we should
1874 use for bitfield access for the structure laid out with RLI. */
1876 static void
1877 finish_bitfield_layout (record_layout_info rli)
1879 tree field, prev;
1880 tree repr = NULL_TREE;
1882 /* Unions would be special, for the ease of type-punning optimizations
1883 we could use the underlying type as hint for the representative
1884 if the bitfield would fit and the representative would not exceed
1885 the union in size. */
1886 if (TREE_CODE (rli->t) != RECORD_TYPE)
1887 return;
1889 for (prev = NULL_TREE, field = TYPE_FIELDS (rli->t);
1890 field; field = DECL_CHAIN (field))
1892 if (TREE_CODE (field) != FIELD_DECL)
1893 continue;
1895 /* In the C++ memory model, consecutive bit fields in a structure are
1896 considered one memory location and updating a memory location
1897 may not store into adjacent memory locations. */
1898 if (!repr
1899 && DECL_BIT_FIELD_TYPE (field))
1901 /* Start new representative. */
1902 repr = start_bitfield_representative (field);
1904 else if (repr
1905 && ! DECL_BIT_FIELD_TYPE (field))
1907 /* Finish off new representative. */
1908 finish_bitfield_representative (repr, prev);
1909 repr = NULL_TREE;
1911 else if (DECL_BIT_FIELD_TYPE (field))
1913 gcc_assert (repr != NULL_TREE);
1915 /* Zero-size bitfields finish off a representative and
1916 do not have a representative themselves. This is
1917 required by the C++ memory model. */
1918 if (integer_zerop (DECL_SIZE (field)))
1920 finish_bitfield_representative (repr, prev);
1921 repr = NULL_TREE;
1924 /* We assume that either DECL_FIELD_OFFSET of the representative
1925 and each bitfield member is a constant or they are equal.
1926 This is because we need to be able to compute the bit-offset
1927 of each field relative to the representative in get_bit_range
1928 during RTL expansion.
1929 If these constraints are not met, simply force a new
1930 representative to be generated. That will at most
1931 generate worse code but still maintain correctness with
1932 respect to the C++ memory model. */
1933 else if (!((host_integerp (DECL_FIELD_OFFSET (repr), 1)
1934 && host_integerp (DECL_FIELD_OFFSET (field), 1))
1935 || operand_equal_p (DECL_FIELD_OFFSET (repr),
1936 DECL_FIELD_OFFSET (field), 0)))
1938 finish_bitfield_representative (repr, prev);
1939 repr = start_bitfield_representative (field);
1942 else
1943 continue;
1945 if (repr)
1946 DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
1948 prev = field;
1951 if (repr)
1952 finish_bitfield_representative (repr, prev);
1955 /* Do all of the work required to layout the type indicated by RLI,
1956 once the fields have been laid out. This function will call `free'
1957 for RLI, unless FREE_P is false. Passing a value other than false
1958 for FREE_P is bad practice; this option only exists to support the
1959 G++ 3.2 ABI. */
1961 void
1962 finish_record_layout (record_layout_info rli, int free_p)
1964 tree variant;
1966 /* Compute the final size. */
1967 finalize_record_size (rli);
1969 /* Compute the TYPE_MODE for the record. */
1970 compute_record_mode (rli->t);
1972 /* Perform any last tweaks to the TYPE_SIZE, etc. */
1973 finalize_type_size (rli->t);
1975 /* Compute bitfield representatives. */
1976 finish_bitfield_layout (rli);
1978 /* Propagate TYPE_PACKED to variants. With C++ templates,
1979 handle_packed_attribute is too early to do this. */
1980 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1981 variant = TYPE_NEXT_VARIANT (variant))
1982 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1984 /* Lay out any static members. This is done now because their type
1985 may use the record's type. */
1986 while (!vec_safe_is_empty (rli->pending_statics))
1987 layout_decl (rli->pending_statics->pop (), 0);
1989 /* Clean up. */
1990 if (free_p)
1992 vec_free (rli->pending_statics);
1993 free (rli);
1998 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
1999 NAME, its fields are chained in reverse on FIELDS.
2001 If ALIGN_TYPE is non-null, it is given the same alignment as
2002 ALIGN_TYPE. */
2004 void
2005 finish_builtin_struct (tree type, const char *name, tree fields,
2006 tree align_type)
2008 tree tail, next;
2010 for (tail = NULL_TREE; fields; tail = fields, fields = next)
2012 DECL_FIELD_CONTEXT (fields) = type;
2013 next = DECL_CHAIN (fields);
2014 DECL_CHAIN (fields) = tail;
2016 TYPE_FIELDS (type) = tail;
2018 if (align_type)
2020 TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
2021 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
2024 layout_type (type);
2025 #if 0 /* not yet, should get fixed properly later */
2026 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
2027 #else
2028 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
2029 TYPE_DECL, get_identifier (name), type);
2030 #endif
2031 TYPE_STUB_DECL (type) = TYPE_NAME (type);
2032 layout_decl (TYPE_NAME (type), 0);
2035 /* Calculate the mode, size, and alignment for TYPE.
2036 For an array type, calculate the element separation as well.
2037 Record TYPE on the chain of permanent or temporary types
2038 so that dbxout will find out about it.
2040 TYPE_SIZE of a type is nonzero if the type has been laid out already.
2041 layout_type does nothing on such a type.
2043 If the type is incomplete, its TYPE_SIZE remains zero. */
2045 void
2046 layout_type (tree type)
2048 gcc_assert (type);
2050 if (type == error_mark_node)
2051 return;
2053 /* Do nothing if type has been laid out before. */
2054 if (TYPE_SIZE (type))
2055 return;
2057 switch (TREE_CODE (type))
2059 case LANG_TYPE:
2060 /* This kind of type is the responsibility
2061 of the language-specific code. */
2062 gcc_unreachable ();
2064 case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
2065 if (TYPE_PRECISION (type) == 0)
2066 TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */
2068 /* ... fall through ... */
2070 case INTEGER_TYPE:
2071 case ENUMERAL_TYPE:
2072 if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
2073 && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
2074 TYPE_UNSIGNED (type) = 1;
2076 SET_TYPE_MODE (type,
2077 smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
2078 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2079 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2080 break;
2082 case REAL_TYPE:
2083 SET_TYPE_MODE (type,
2084 mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
2085 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2086 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2087 break;
2089 case FIXED_POINT_TYPE:
2090 /* TYPE_MODE (type) has been set already. */
2091 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2092 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2093 break;
2095 case COMPLEX_TYPE:
2096 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2097 SET_TYPE_MODE (type,
2098 mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
2099 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
2100 ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
2101 0));
2102 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2103 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2104 break;
2106 case VECTOR_TYPE:
2108 int nunits = TYPE_VECTOR_SUBPARTS (type);
2109 tree innertype = TREE_TYPE (type);
2111 gcc_assert (!(nunits & (nunits - 1)));
2113 /* Find an appropriate mode for the vector type. */
2114 if (TYPE_MODE (type) == VOIDmode)
2115 SET_TYPE_MODE (type,
2116 mode_for_vector (TYPE_MODE (innertype), nunits));
2118 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
2119 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2120 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
2121 TYPE_SIZE_UNIT (innertype),
2122 size_int (nunits));
2123 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
2124 bitsize_int (nunits));
2126 /* For vector types, we do not default to the mode's alignment.
2127 Instead, query a target hook, defaulting to natural alignment.
2128 This prevents ABI changes depending on whether or not native
2129 vector modes are supported. */
2130 TYPE_ALIGN (type) = targetm.vector_alignment (type);
2132 /* However, if the underlying mode requires a bigger alignment than
2133 what the target hook provides, we cannot use the mode. For now,
2134 simply reject that case. */
2135 gcc_assert (TYPE_ALIGN (type)
2136 >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
2137 break;
2140 case VOID_TYPE:
2141 /* This is an incomplete type and so doesn't have a size. */
2142 TYPE_ALIGN (type) = 1;
2143 TYPE_USER_ALIGN (type) = 0;
2144 SET_TYPE_MODE (type, VOIDmode);
2145 break;
2147 case OFFSET_TYPE:
2148 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
2149 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
2150 /* A pointer might be MODE_PARTIAL_INT,
2151 but ptrdiff_t must be integral. */
2152 SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
2153 TYPE_PRECISION (type) = POINTER_SIZE;
2154 break;
2156 case FUNCTION_TYPE:
2157 case METHOD_TYPE:
2158 /* It's hard to see what the mode and size of a function ought to
2159 be, but we do know the alignment is FUNCTION_BOUNDARY, so
2160 make it consistent with that. */
2161 SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
2162 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
2163 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2164 break;
2166 case POINTER_TYPE:
2167 case REFERENCE_TYPE:
2169 enum machine_mode mode = TYPE_MODE (type);
2170 if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
2172 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
2173 mode = targetm.addr_space.address_mode (as);
2176 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2177 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2178 TYPE_UNSIGNED (type) = 1;
2179 TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
2181 break;
2183 case ARRAY_TYPE:
2185 tree index = TYPE_DOMAIN (type);
2186 tree element = TREE_TYPE (type);
2188 build_pointer_type (element);
2190 /* We need to know both bounds in order to compute the size. */
2191 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2192 && TYPE_SIZE (element))
2194 tree ub = TYPE_MAX_VALUE (index);
2195 tree lb = TYPE_MIN_VALUE (index);
2196 tree element_size = TYPE_SIZE (element);
2197 tree length;
2199 /* Make sure that an array of zero-sized element is zero-sized
2200 regardless of its extent. */
2201 if (integer_zerop (element_size))
2202 length = size_zero_node;
2204 /* The computation should happen in the original signedness so
2205 that (possible) negative values are handled appropriately
2206 when determining overflow. */
2207 else
2209 /* ??? When it is obvious that the range is signed
2210 represent it using ssizetype. */
2211 if (TREE_CODE (lb) == INTEGER_CST
2212 && TREE_CODE (ub) == INTEGER_CST
2213 && TYPE_UNSIGNED (TREE_TYPE (lb))
2214 && tree_int_cst_lt (ub, lb))
2216 unsigned prec = TYPE_PRECISION (TREE_TYPE (lb));
2217 lb = double_int_to_tree
2218 (ssizetype,
2219 tree_to_double_int (lb).sext (prec));
2220 ub = double_int_to_tree
2221 (ssizetype,
2222 tree_to_double_int (ub).sext (prec));
2224 length
2225 = fold_convert (sizetype,
2226 size_binop (PLUS_EXPR,
2227 build_int_cst (TREE_TYPE (lb), 1),
2228 size_binop (MINUS_EXPR, ub, lb)));
2231 /* ??? We have no way to distinguish a null-sized array from an
2232 array spanning the whole sizetype range, so we arbitrarily
2233 decide that [0, -1] is the only valid representation. */
2234 if (integer_zerop (length)
2235 && TREE_OVERFLOW (length)
2236 && integer_zerop (lb))
2237 length = size_zero_node;
2239 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2240 fold_convert (bitsizetype,
2241 length));
2243 /* If we know the size of the element, calculate the total size
2244 directly, rather than do some division thing below. This
2245 optimization helps Fortran assumed-size arrays (where the
2246 size of the array is determined at runtime) substantially. */
2247 if (TYPE_SIZE_UNIT (element))
2248 TYPE_SIZE_UNIT (type)
2249 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2252 /* Now round the alignment and size,
2253 using machine-dependent criteria if any. */
2255 #ifdef ROUND_TYPE_ALIGN
2256 TYPE_ALIGN (type)
2257 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
2258 #else
2259 TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
2260 #endif
2261 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2262 SET_TYPE_MODE (type, BLKmode);
2263 if (TYPE_SIZE (type) != 0
2264 && ! targetm.member_type_forces_blk (type, VOIDmode)
2265 /* BLKmode elements force BLKmode aggregate;
2266 else extract/store fields may lose. */
2267 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2268 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2270 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2271 TYPE_SIZE (type)));
2272 if (TYPE_MODE (type) != BLKmode
2273 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2274 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2276 TYPE_NO_FORCE_BLK (type) = 1;
2277 SET_TYPE_MODE (type, BLKmode);
2280 /* When the element size is constant, check that it is at least as
2281 large as the element alignment. */
2282 if (TYPE_SIZE_UNIT (element)
2283 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2284 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2285 TYPE_ALIGN_UNIT. */
2286 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2287 && !integer_zerop (TYPE_SIZE_UNIT (element))
2288 && compare_tree_int (TYPE_SIZE_UNIT (element),
2289 TYPE_ALIGN_UNIT (element)) < 0)
2290 error ("alignment of array elements is greater than element size");
2291 break;
2294 case RECORD_TYPE:
2295 case UNION_TYPE:
2296 case QUAL_UNION_TYPE:
2298 tree field;
2299 record_layout_info rli;
2301 /* Initialize the layout information. */
2302 rli = start_record_layout (type);
2304 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2305 in the reverse order in building the COND_EXPR that denotes
2306 its size. We reverse them again later. */
2307 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2308 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2310 /* Place all the fields. */
2311 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2312 place_field (rli, field);
2314 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2315 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2317 /* Finish laying out the record. */
2318 finish_record_layout (rli, /*free_p=*/true);
2320 break;
2322 default:
2323 gcc_unreachable ();
2326 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2327 records and unions, finish_record_layout already called this
2328 function. */
2329 if (TREE_CODE (type) != RECORD_TYPE
2330 && TREE_CODE (type) != UNION_TYPE
2331 && TREE_CODE (type) != QUAL_UNION_TYPE)
2332 finalize_type_size (type);
2334 /* We should never see alias sets on incomplete aggregates. And we
2335 should not call layout_type on not incomplete aggregates. */
2336 if (AGGREGATE_TYPE_P (type))
2337 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2340 /* Vector types need to re-check the target flags each time we report
2341 the machine mode. We need to do this because attribute target can
2342 change the result of vector_mode_supported_p and have_regs_of_mode
2343 on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
2344 change on a per-function basis. */
2345 /* ??? Possibly a better solution is to run through all the types
2346 referenced by a function and re-compute the TYPE_MODE once, rather
2347 than make the TYPE_MODE macro call a function. */
2349 enum machine_mode
2350 vector_type_mode (const_tree t)
2352 enum machine_mode mode;
2354 gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
2356 mode = t->type_common.mode;
2357 if (VECTOR_MODE_P (mode)
2358 && (!targetm.vector_mode_supported_p (mode)
2359 || !have_regs_of_mode[mode]))
2361 enum machine_mode innermode = TREE_TYPE (t)->type_common.mode;
2363 /* For integers, try mapping it to a same-sized scalar mode. */
2364 if (GET_MODE_CLASS (innermode) == MODE_INT)
2366 mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
2367 * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
2369 if (mode != VOIDmode && have_regs_of_mode[mode])
2370 return mode;
2373 return BLKmode;
2376 return mode;
2379 /* Create and return a type for signed integers of PRECISION bits. */
2381 tree
2382 make_signed_type (int precision)
2384 tree type = make_node (INTEGER_TYPE);
2386 TYPE_PRECISION (type) = precision;
2388 fixup_signed_type (type);
2389 return type;
2392 /* Create and return a type for unsigned integers of PRECISION bits. */
2394 tree
2395 make_unsigned_type (int precision)
2397 tree type = make_node (INTEGER_TYPE);
2399 TYPE_PRECISION (type) = precision;
2401 fixup_unsigned_type (type);
2402 return type;
2405 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2406 and SATP. */
2408 tree
2409 make_fract_type (int precision, int unsignedp, int satp)
2411 tree type = make_node (FIXED_POINT_TYPE);
2413 TYPE_PRECISION (type) = precision;
2415 if (satp)
2416 TYPE_SATURATING (type) = 1;
2418 /* Lay out the type: set its alignment, size, etc. */
2419 if (unsignedp)
2421 TYPE_UNSIGNED (type) = 1;
2422 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
2424 else
2425 SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
2426 layout_type (type);
2428 return type;
2431 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2432 and SATP. */
2434 tree
2435 make_accum_type (int precision, int unsignedp, int satp)
2437 tree type = make_node (FIXED_POINT_TYPE);
2439 TYPE_PRECISION (type) = precision;
2441 if (satp)
2442 TYPE_SATURATING (type) = 1;
2444 /* Lay out the type: set its alignment, size, etc. */
2445 if (unsignedp)
2447 TYPE_UNSIGNED (type) = 1;
2448 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2450 else
2451 SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2452 layout_type (type);
2454 return type;
2457 /* Initialize sizetypes so layout_type can use them. */
2459 void
2460 initialize_sizetypes (void)
2462 int precision, bprecision;
2464 /* Get sizetypes precision from the SIZE_TYPE target macro. */
2465 if (strcmp (SIZETYPE, "unsigned int") == 0)
2466 precision = INT_TYPE_SIZE;
2467 else if (strcmp (SIZETYPE, "long unsigned int") == 0)
2468 precision = LONG_TYPE_SIZE;
2469 else if (strcmp (SIZETYPE, "long long unsigned int") == 0)
2470 precision = LONG_LONG_TYPE_SIZE;
2471 else if (strcmp (SIZETYPE, "short unsigned int") == 0)
2472 precision = SHORT_TYPE_SIZE;
2473 else
2474 gcc_unreachable ();
2476 bprecision
2477 = MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
2478 bprecision
2479 = GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT));
2480 if (bprecision > HOST_BITS_PER_DOUBLE_INT)
2481 bprecision = HOST_BITS_PER_DOUBLE_INT;
2483 /* Create stubs for sizetype and bitsizetype so we can create constants. */
2484 sizetype = make_node (INTEGER_TYPE);
2485 TYPE_NAME (sizetype) = get_identifier ("sizetype");
2486 TYPE_PRECISION (sizetype) = precision;
2487 TYPE_UNSIGNED (sizetype) = 1;
2488 bitsizetype = make_node (INTEGER_TYPE);
2489 TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2490 TYPE_PRECISION (bitsizetype) = bprecision;
2491 TYPE_UNSIGNED (bitsizetype) = 1;
2493 /* Now layout both types manually. */
2494 SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT));
2495 TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype));
2496 TYPE_SIZE (sizetype) = bitsize_int (precision);
2497 TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
2498 set_min_and_max_values_for_integral_type (sizetype, precision,
2499 /*is_unsigned=*/true);
2501 SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
2502 TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
2503 TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2504 TYPE_SIZE_UNIT (bitsizetype)
2505 = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
2506 set_min_and_max_values_for_integral_type (bitsizetype, bprecision,
2507 /*is_unsigned=*/true);
2509 /* Create the signed variants of *sizetype. */
2510 ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2511 TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2512 sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2513 TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2516 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2517 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2518 for TYPE, based on the PRECISION and whether or not the TYPE
2519 IS_UNSIGNED. PRECISION need not correspond to a width supported
2520 natively by the hardware; for example, on a machine with 8-bit,
2521 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2522 61. */
2524 void
2525 set_min_and_max_values_for_integral_type (tree type,
2526 int precision,
2527 bool is_unsigned)
2529 tree min_value;
2530 tree max_value;
2532 if (is_unsigned)
2534 min_value = build_int_cst (type, 0);
2535 max_value
2536 = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2537 ? -1
2538 : ((HOST_WIDE_INT) 1 << precision) - 1,
2539 precision - HOST_BITS_PER_WIDE_INT > 0
2540 ? ((unsigned HOST_WIDE_INT) ~0
2541 >> (HOST_BITS_PER_WIDE_INT
2542 - (precision - HOST_BITS_PER_WIDE_INT)))
2543 : 0);
2545 else
2547 min_value
2548 = build_int_cst_wide (type,
2549 (precision - HOST_BITS_PER_WIDE_INT > 0
2551 : (HOST_WIDE_INT) (-1) << (precision - 1)),
2552 (((HOST_WIDE_INT) (-1)
2553 << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2554 ? precision - HOST_BITS_PER_WIDE_INT - 1
2555 : 0))));
2556 max_value
2557 = build_int_cst_wide (type,
2558 (precision - HOST_BITS_PER_WIDE_INT > 0
2559 ? -1
2560 : (HOST_WIDE_INT)
2561 (((unsigned HOST_WIDE_INT) 1
2562 << (precision - 1)) - 1)),
2563 (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2564 ? (HOST_WIDE_INT)
2565 ((((unsigned HOST_WIDE_INT) 1
2566 << (precision - HOST_BITS_PER_WIDE_INT
2567 - 1))) - 1)
2568 : 0));
2571 TYPE_MIN_VALUE (type) = min_value;
2572 TYPE_MAX_VALUE (type) = max_value;
2575 /* Set the extreme values of TYPE based on its precision in bits,
2576 then lay it out. Used when make_signed_type won't do
2577 because the tree code is not INTEGER_TYPE.
2578 E.g. for Pascal, when the -fsigned-char option is given. */
2580 void
2581 fixup_signed_type (tree type)
2583 int precision = TYPE_PRECISION (type);
2585 /* We can not represent properly constants greater then
2586 HOST_BITS_PER_DOUBLE_INT, still we need the types
2587 as they are used by i386 vector extensions and friends. */
2588 if (precision > HOST_BITS_PER_DOUBLE_INT)
2589 precision = HOST_BITS_PER_DOUBLE_INT;
2591 set_min_and_max_values_for_integral_type (type, precision,
2592 /*is_unsigned=*/false);
2594 /* Lay out the type: set its alignment, size, etc. */
2595 layout_type (type);
2598 /* Set the extreme values of TYPE based on its precision in bits,
2599 then lay it out. This is used both in `make_unsigned_type'
2600 and for enumeral types. */
2602 void
2603 fixup_unsigned_type (tree type)
2605 int precision = TYPE_PRECISION (type);
2607 /* We can not represent properly constants greater then
2608 HOST_BITS_PER_DOUBLE_INT, still we need the types
2609 as they are used by i386 vector extensions and friends. */
2610 if (precision > HOST_BITS_PER_DOUBLE_INT)
2611 precision = HOST_BITS_PER_DOUBLE_INT;
2613 TYPE_UNSIGNED (type) = 1;
2615 set_min_and_max_values_for_integral_type (type, precision,
2616 /*is_unsigned=*/true);
2618 /* Lay out the type: set its alignment, size, etc. */
2619 layout_type (type);
2622 /* Construct an iterator for a bitfield that spans BITSIZE bits,
2623 starting at BITPOS.
2625 BITREGION_START is the bit position of the first bit in this
2626 sequence of bit fields. BITREGION_END is the last bit in this
2627 sequence. If these two fields are non-zero, we should restrict the
2628 memory access to that range. Otherwise, we are allowed to touch
2629 any adjacent non bit-fields.
2631 ALIGN is the alignment of the underlying object in bits.
2632 VOLATILEP says whether the bitfield is volatile. */
2634 bit_field_mode_iterator
2635 ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
2636 HOST_WIDE_INT bitregion_start,
2637 HOST_WIDE_INT bitregion_end,
2638 unsigned int align, bool volatilep)
2639 : mode_ (GET_CLASS_NARROWEST_MODE (MODE_INT)), bitsize_ (bitsize),
2640 bitpos_ (bitpos), bitregion_start_ (bitregion_start),
2641 bitregion_end_ (bitregion_end), align_ (align),
2642 volatilep_ (volatilep), count_ (0)
2644 if (!bitregion_end_)
2646 /* We can assume that any aligned chunk of ALIGN bits that overlaps
2647 the bitfield is mapped and won't trap, provided that ALIGN isn't
2648 too large. The cap is the biggest required alignment for data,
2649 or at least the word size. And force one such chunk at least. */
2650 unsigned HOST_WIDE_INT units
2651 = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
2652 if (bitsize <= 0)
2653 bitsize = 1;
2654 bitregion_end_ = bitpos + bitsize + units - 1;
2655 bitregion_end_ -= bitregion_end_ % units + 1;
2659 /* Calls to this function return successively larger modes that can be used
2660 to represent the bitfield. Return true if another bitfield mode is
2661 available, storing it in *OUT_MODE if so. */
2663 bool
2664 bit_field_mode_iterator::next_mode (enum machine_mode *out_mode)
2666 for (; mode_ != VOIDmode; mode_ = GET_MODE_WIDER_MODE (mode_))
2668 unsigned int unit = GET_MODE_BITSIZE (mode_);
2670 /* Skip modes that don't have full precision. */
2671 if (unit != GET_MODE_PRECISION (mode_))
2672 continue;
2674 /* Stop if the mode is too wide to handle efficiently. */
2675 if (unit > MAX_FIXED_MODE_SIZE)
2676 break;
2678 /* Don't deliver more than one multiword mode; the smallest one
2679 should be used. */
2680 if (count_ > 0 && unit > BITS_PER_WORD)
2681 break;
2683 /* Skip modes that are too small. */
2684 unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) bitpos_ % unit;
2685 unsigned HOST_WIDE_INT subend = substart + bitsize_;
2686 if (subend > unit)
2687 continue;
2689 /* Stop if the mode goes outside the bitregion. */
2690 HOST_WIDE_INT start = bitpos_ - substart;
2691 if (bitregion_start_ && start < bitregion_start_)
2692 break;
2693 HOST_WIDE_INT end = start + unit;
2694 if (end > bitregion_end_ + 1)
2695 break;
2697 /* Stop if the mode requires too much alignment. */
2698 if (GET_MODE_ALIGNMENT (mode_) > align_
2699 && SLOW_UNALIGNED_ACCESS (mode_, align_))
2700 break;
2702 *out_mode = mode_;
2703 mode_ = GET_MODE_WIDER_MODE (mode_);
2704 count_++;
2705 return true;
2707 return false;
2710 /* Return true if smaller modes are generally preferred for this kind
2711 of bitfield. */
2713 bool
2714 bit_field_mode_iterator::prefer_smaller_modes ()
2716 return (volatilep_
2717 ? targetm.narrow_volatile_bitfield ()
2718 : !SLOW_BYTE_ACCESS);
2721 /* Find the best machine mode to use when referencing a bit field of length
2722 BITSIZE bits starting at BITPOS.
2724 BITREGION_START is the bit position of the first bit in this
2725 sequence of bit fields. BITREGION_END is the last bit in this
2726 sequence. If these two fields are non-zero, we should restrict the
2727 memory access to that range. Otherwise, we are allowed to touch
2728 any adjacent non bit-fields.
2730 The underlying object is known to be aligned to a boundary of ALIGN bits.
2731 If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2732 larger than LARGEST_MODE (usually SImode).
2734 If no mode meets all these conditions, we return VOIDmode.
2736 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2737 smallest mode meeting these conditions.
2739 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2740 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2741 all the conditions.
2743 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2744 decide which of the above modes should be used. */
2746 enum machine_mode
2747 get_best_mode (int bitsize, int bitpos,
2748 unsigned HOST_WIDE_INT bitregion_start,
2749 unsigned HOST_WIDE_INT bitregion_end,
2750 unsigned int align,
2751 enum machine_mode largest_mode, bool volatilep)
2753 bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start,
2754 bitregion_end, align, volatilep);
2755 enum machine_mode widest_mode = VOIDmode;
2756 enum machine_mode mode;
2757 while (iter.next_mode (&mode)
2758 /* ??? For historical reasons, reject modes that would normally
2759 receive greater alignment, even if unaligned accesses are
2760 acceptable. This has both advantages and disadvantages.
2761 Removing this check means that something like:
2763 struct s { unsigned int x; unsigned int y; };
2764 int f (struct s *s) { return s->x == 0 && s->y == 0; }
2766 can be implemented using a single load and compare on
2767 64-bit machines that have no alignment restrictions.
2768 For example, on powerpc64-linux-gnu, we would generate:
2770 ld 3,0(3)
2771 cntlzd 3,3
2772 srdi 3,3,6
2775 rather than:
2777 lwz 9,0(3)
2778 cmpwi 7,9,0
2779 bne 7,.L3
2780 lwz 3,4(3)
2781 cntlzw 3,3
2782 srwi 3,3,5
2783 extsw 3,3
2785 .p2align 4,,15
2786 .L3:
2787 li 3,0
2790 However, accessing more than one field can make life harder
2791 for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c
2792 has a series of unsigned short copies followed by a series of
2793 unsigned short comparisons. With this check, both the copies
2794 and comparisons remain 16-bit accesses and FRE is able
2795 to eliminate the latter. Without the check, the comparisons
2796 can be done using 2 64-bit operations, which FRE isn't able
2797 to handle in the same way.
2799 Either way, it would probably be worth disabling this check
2800 during expand. One particular example where removing the
2801 check would help is the get_best_mode call in store_bit_field.
2802 If we are given a memory bitregion of 128 bits that is aligned
2803 to a 64-bit boundary, and the bitfield we want to modify is
2804 in the second half of the bitregion, this check causes
2805 store_bitfield to turn the memory into a 64-bit reference
2806 to the _first_ half of the region. We later use
2807 adjust_bitfield_address to get a reference to the correct half,
2808 but doing so looks to adjust_bitfield_address as though we are
2809 moving past the end of the original object, so it drops the
2810 associated MEM_EXPR and MEM_OFFSET. Removing the check
2811 causes store_bit_field to keep a 128-bit memory reference,
2812 so that the final bitfield reference still has a MEM_EXPR
2813 and MEM_OFFSET. */
2814 && GET_MODE_ALIGNMENT (mode) <= align
2815 && (largest_mode == VOIDmode
2816 || GET_MODE_SIZE (mode) <= GET_MODE_SIZE (largest_mode)))
2818 widest_mode = mode;
2819 if (iter.prefer_smaller_modes ())
2820 break;
2822 return widest_mode;
2825 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
2826 SIGN). The returned constants are made to be usable in TARGET_MODE. */
2828 void
2829 get_mode_bounds (enum machine_mode mode, int sign,
2830 enum machine_mode target_mode,
2831 rtx *mmin, rtx *mmax)
2833 unsigned size = GET_MODE_BITSIZE (mode);
2834 unsigned HOST_WIDE_INT min_val, max_val;
2836 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2838 if (sign)
2840 min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2841 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2843 else
2845 min_val = 0;
2846 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2849 *mmin = gen_int_mode (min_val, target_mode);
2850 *mmax = gen_int_mode (max_val, target_mode);
2853 #include "gt-stor-layout.h"