recog.c (split_all_insns): Remove dead code.
[official-gcc.git] / gcc / stor-layout.c
blob17e4fe54a37e8a126be479247a803fb85499f5db
1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "function.h"
32 #include "expr.h"
33 #include "output.h"
34 #include "diagnostic-core.h"
35 #include "toplev.h"
36 #include "ggc.h"
37 #include "target.h"
38 #include "langhooks.h"
39 #include "regs.h"
40 #include "params.h"
41 #include "cgraph.h"
42 #include "tree-inline.h"
43 #include "tree-dump.h"
44 #include "gimple.h"
46 /* Data type for the expressions representing sizes of data types.
47 It is the first integer type laid out. */
48 tree sizetype_tab[(int) TYPE_KIND_LAST];
50 /* If nonzero, this is an upper limit on alignment of structure fields.
51 The value is measured in bits. */
52 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
53 /* ... and its original value in bytes, specified via -fpack-struct=<value>. */
54 unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
56 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
57 in the address spaces' address_mode, not pointer_mode. Set only by
58 internal_reference_types called only by a front end. */
59 static int reference_types_internal = 0;
61 static tree self_referential_size (tree);
62 static void finalize_record_size (record_layout_info);
63 static void finalize_type_size (tree);
64 static void place_union_field (record_layout_info, tree);
65 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
66 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
67 HOST_WIDE_INT, tree);
68 #endif
69 extern void debug_rli (record_layout_info);
71 /* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
73 static GTY(()) VEC(tree,gc) *pending_sizes;
75 /* Show that REFERENCE_TYPES are internal and should use address_mode.
76 Called only by front end. */
78 void
79 internal_reference_types (void)
81 reference_types_internal = 1;
84 /* Get a VEC of all the objects put on the pending sizes list. */
86 VEC(tree,gc) *
87 get_pending_sizes (void)
89 VEC(tree,gc) *chain = pending_sizes;
91 pending_sizes = 0;
92 return chain;
95 /* Add EXPR to the pending sizes list. */
97 void
98 put_pending_size (tree expr)
100 /* Strip any simple arithmetic from EXPR to see if it has an underlying
101 SAVE_EXPR. */
102 expr = skip_simple_arithmetic (expr);
104 if (TREE_CODE (expr) == SAVE_EXPR)
105 VEC_safe_push (tree, gc, pending_sizes, expr);
108 /* Put a chain of objects into the pending sizes list, which must be
109 empty. */
111 void
112 put_pending_sizes (VEC(tree,gc) *chain)
114 gcc_assert (!pending_sizes);
115 pending_sizes = chain;
118 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
119 to serve as the actual size-expression for a type or decl. */
121 tree
122 variable_size (tree size)
124 tree save;
126 /* Obviously. */
127 if (TREE_CONSTANT (size))
128 return size;
130 /* If the size is self-referential, we can't make a SAVE_EXPR (see
131 save_expr for the rationale). But we can do something else. */
132 if (CONTAINS_PLACEHOLDER_P (size))
133 return self_referential_size (size);
135 /* If the language-processor is to take responsibility for variable-sized
136 items (e.g., languages which have elaboration procedures like Ada),
137 just return SIZE unchanged. */
138 if (lang_hooks.decls.global_bindings_p () < 0)
139 return size;
141 size = save_expr (size);
143 /* If an array with a variable number of elements is declared, and
144 the elements require destruction, we will emit a cleanup for the
145 array. That cleanup is run both on normal exit from the block
146 and in the exception-handler for the block. Normally, when code
147 is used in both ordinary code and in an exception handler it is
148 `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
149 not wish to do that here; the array-size is the same in both
150 places. */
151 save = skip_simple_arithmetic (size);
153 if (cfun && cfun->dont_save_pending_sizes_p)
154 /* The front-end doesn't want us to keep a list of the expressions
155 that determine sizes for variable size objects. Trust it. */
156 return size;
158 if (lang_hooks.decls.global_bindings_p ())
160 if (TREE_CONSTANT (size))
161 error ("type size can%'t be explicitly evaluated");
162 else
163 error ("variable-size type declared outside of any function");
165 return size_one_node;
168 put_pending_size (save);
170 return size;
173 /* An array of functions used for self-referential size computation. */
174 static GTY(()) VEC (tree, gc) *size_functions;
176 /* Look inside EXPR into simple arithmetic operations involving constants.
177 Return the outermost non-arithmetic or non-constant node. */
179 static tree
180 skip_simple_constant_arithmetic (tree expr)
182 while (true)
184 if (UNARY_CLASS_P (expr))
185 expr = TREE_OPERAND (expr, 0);
186 else if (BINARY_CLASS_P (expr))
188 if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
189 expr = TREE_OPERAND (expr, 0);
190 else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
191 expr = TREE_OPERAND (expr, 1);
192 else
193 break;
195 else
196 break;
199 return expr;
202 /* Similar to copy_tree_r but do not copy component references involving
203 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
204 and substituted in substitute_in_expr. */
206 static tree
207 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
209 enum tree_code code = TREE_CODE (*tp);
211 /* Stop at types, decls, constants like copy_tree_r. */
212 if (TREE_CODE_CLASS (code) == tcc_type
213 || TREE_CODE_CLASS (code) == tcc_declaration
214 || TREE_CODE_CLASS (code) == tcc_constant)
216 *walk_subtrees = 0;
217 return NULL_TREE;
220 /* This is the pattern built in ada/make_aligning_type. */
221 else if (code == ADDR_EXPR
222 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
224 *walk_subtrees = 0;
225 return NULL_TREE;
228 /* Default case: the component reference. */
229 else if (code == COMPONENT_REF)
231 tree inner;
232 for (inner = TREE_OPERAND (*tp, 0);
233 REFERENCE_CLASS_P (inner);
234 inner = TREE_OPERAND (inner, 0))
237 if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
239 *walk_subtrees = 0;
240 return NULL_TREE;
244 /* We're not supposed to have them in self-referential size trees
245 because we wouldn't properly control when they are evaluated.
246 However, not creating superfluous SAVE_EXPRs requires accurate
247 tracking of readonly-ness all the way down to here, which we
248 cannot always guarantee in practice. So punt in this case. */
249 else if (code == SAVE_EXPR)
250 return error_mark_node;
252 return copy_tree_r (tp, walk_subtrees, data);
255 /* Given a SIZE expression that is self-referential, return an equivalent
256 expression to serve as the actual size expression for a type. */
258 static tree
259 self_referential_size (tree size)
261 static unsigned HOST_WIDE_INT fnno = 0;
262 VEC (tree, heap) *self_refs = NULL;
263 tree param_type_list = NULL, param_decl_list = NULL;
264 tree t, ref, return_type, fntype, fnname, fndecl;
265 unsigned int i;
266 char buf[128];
267 VEC(tree,gc) *args = NULL;
269 /* Do not factor out simple operations. */
270 t = skip_simple_constant_arithmetic (size);
271 if (TREE_CODE (t) == CALL_EXPR)
272 return size;
274 /* Collect the list of self-references in the expression. */
275 find_placeholder_in_expr (size, &self_refs);
276 gcc_assert (VEC_length (tree, self_refs) > 0);
278 /* Obtain a private copy of the expression. */
279 t = size;
280 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
281 return size;
282 size = t;
284 /* Build the parameter and argument lists in parallel; also
285 substitute the former for the latter in the expression. */
286 args = VEC_alloc (tree, gc, VEC_length (tree, self_refs));
287 FOR_EACH_VEC_ELT (tree, self_refs, i, ref)
289 tree subst, param_name, param_type, param_decl;
291 if (DECL_P (ref))
293 /* We shouldn't have true variables here. */
294 gcc_assert (TREE_READONLY (ref));
295 subst = ref;
297 /* This is the pattern built in ada/make_aligning_type. */
298 else if (TREE_CODE (ref) == ADDR_EXPR)
299 subst = ref;
300 /* Default case: the component reference. */
301 else
302 subst = TREE_OPERAND (ref, 1);
304 sprintf (buf, "p%d", i);
305 param_name = get_identifier (buf);
306 param_type = TREE_TYPE (ref);
307 param_decl
308 = build_decl (input_location, PARM_DECL, param_name, param_type);
309 if (targetm.calls.promote_prototypes (NULL_TREE)
310 && INTEGRAL_TYPE_P (param_type)
311 && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
312 DECL_ARG_TYPE (param_decl) = integer_type_node;
313 else
314 DECL_ARG_TYPE (param_decl) = param_type;
315 DECL_ARTIFICIAL (param_decl) = 1;
316 TREE_READONLY (param_decl) = 1;
318 size = substitute_in_expr (size, subst, param_decl);
320 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
321 param_decl_list = chainon (param_decl, param_decl_list);
322 VEC_quick_push (tree, args, ref);
325 VEC_free (tree, heap, self_refs);
327 /* Append 'void' to indicate that the number of parameters is fixed. */
328 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
330 /* The 3 lists have been created in reverse order. */
331 param_type_list = nreverse (param_type_list);
332 param_decl_list = nreverse (param_decl_list);
334 /* Build the function type. */
335 return_type = TREE_TYPE (size);
336 fntype = build_function_type (return_type, param_type_list);
338 /* Build the function declaration. */
339 sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
340 fnname = get_file_function_name (buf);
341 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
342 for (t = param_decl_list; t; t = DECL_CHAIN (t))
343 DECL_CONTEXT (t) = fndecl;
344 DECL_ARGUMENTS (fndecl) = param_decl_list;
345 DECL_RESULT (fndecl)
346 = build_decl (input_location, RESULT_DECL, 0, return_type);
347 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
349 /* The function has been created by the compiler and we don't
350 want to emit debug info for it. */
351 DECL_ARTIFICIAL (fndecl) = 1;
352 DECL_IGNORED_P (fndecl) = 1;
354 /* It is supposed to be "const" and never throw. */
355 TREE_READONLY (fndecl) = 1;
356 TREE_NOTHROW (fndecl) = 1;
358 /* We want it to be inlined when this is deemed profitable, as
359 well as discarded if every call has been integrated. */
360 DECL_DECLARED_INLINE_P (fndecl) = 1;
362 /* It is made up of a unique return statement. */
363 DECL_INITIAL (fndecl) = make_node (BLOCK);
364 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
365 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
366 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
367 TREE_STATIC (fndecl) = 1;
369 /* Put it onto the list of size functions. */
370 VEC_safe_push (tree, gc, size_functions, fndecl);
372 /* Replace the original expression with a call to the size function. */
373 return build_call_expr_loc_vec (input_location, fndecl, args);
376 /* Take, queue and compile all the size functions. It is essential that
377 the size functions be gimplified at the very end of the compilation
378 in order to guarantee transparent handling of self-referential sizes.
379 Otherwise the GENERIC inliner would not be able to inline them back
380 at each of their call sites, thus creating artificial non-constant
381 size expressions which would trigger nasty problems later on. */
383 void
384 finalize_size_functions (void)
386 unsigned int i;
387 tree fndecl;
389 for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
391 dump_function (TDI_original, fndecl);
392 gimplify_function_tree (fndecl);
393 dump_function (TDI_generic, fndecl);
394 cgraph_finalize_function (fndecl, false);
397 VEC_free (tree, gc, size_functions);
400 /* Return the machine mode to use for a nonscalar of SIZE bits. The
401 mode must be in class MCLASS, and have exactly that many value bits;
402 it may have padding as well. If LIMIT is nonzero, modes of wider
403 than MAX_FIXED_MODE_SIZE will not be used. */
405 enum machine_mode
406 mode_for_size (unsigned int size, enum mode_class mclass, int limit)
408 enum machine_mode mode;
410 if (limit && size > MAX_FIXED_MODE_SIZE)
411 return BLKmode;
413 /* Get the first mode which has this size, in the specified class. */
414 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
415 mode = GET_MODE_WIDER_MODE (mode))
416 if (GET_MODE_PRECISION (mode) == size)
417 return mode;
419 return BLKmode;
422 /* Similar, except passed a tree node. */
424 enum machine_mode
425 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
427 unsigned HOST_WIDE_INT uhwi;
428 unsigned int ui;
430 if (!host_integerp (size, 1))
431 return BLKmode;
432 uhwi = tree_low_cst (size, 1);
433 ui = uhwi;
434 if (uhwi != ui)
435 return BLKmode;
436 return mode_for_size (ui, mclass, limit);
439 /* Similar, but never return BLKmode; return the narrowest mode that
440 contains at least the requested number of value bits. */
442 enum machine_mode
443 smallest_mode_for_size (unsigned int size, enum mode_class mclass)
445 enum machine_mode mode;
447 /* Get the first mode which has at least this size, in the
448 specified class. */
449 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
450 mode = GET_MODE_WIDER_MODE (mode))
451 if (GET_MODE_PRECISION (mode) >= size)
452 return mode;
454 gcc_unreachable ();
457 /* Find an integer mode of the exact same size, or BLKmode on failure. */
459 enum machine_mode
460 int_mode_for_mode (enum machine_mode mode)
462 switch (GET_MODE_CLASS (mode))
464 case MODE_INT:
465 case MODE_PARTIAL_INT:
466 break;
468 case MODE_COMPLEX_INT:
469 case MODE_COMPLEX_FLOAT:
470 case MODE_FLOAT:
471 case MODE_DECIMAL_FLOAT:
472 case MODE_VECTOR_INT:
473 case MODE_VECTOR_FLOAT:
474 case MODE_FRACT:
475 case MODE_ACCUM:
476 case MODE_UFRACT:
477 case MODE_UACCUM:
478 case MODE_VECTOR_FRACT:
479 case MODE_VECTOR_ACCUM:
480 case MODE_VECTOR_UFRACT:
481 case MODE_VECTOR_UACCUM:
482 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
483 break;
485 case MODE_RANDOM:
486 if (mode == BLKmode)
487 break;
489 /* ... fall through ... */
491 case MODE_CC:
492 default:
493 gcc_unreachable ();
496 return mode;
499 /* Find a mode that is suitable for representing a vector with
500 NUNITS elements of mode INNERMODE. Returns BLKmode if there
501 is no suitable mode. */
503 enum machine_mode
504 mode_for_vector (enum machine_mode innermode, unsigned nunits)
506 enum machine_mode mode;
508 /* First, look for a supported vector type. */
509 if (SCALAR_FLOAT_MODE_P (innermode))
510 mode = MIN_MODE_VECTOR_FLOAT;
511 else if (SCALAR_FRACT_MODE_P (innermode))
512 mode = MIN_MODE_VECTOR_FRACT;
513 else if (SCALAR_UFRACT_MODE_P (innermode))
514 mode = MIN_MODE_VECTOR_UFRACT;
515 else if (SCALAR_ACCUM_MODE_P (innermode))
516 mode = MIN_MODE_VECTOR_ACCUM;
517 else if (SCALAR_UACCUM_MODE_P (innermode))
518 mode = MIN_MODE_VECTOR_UACCUM;
519 else
520 mode = MIN_MODE_VECTOR_INT;
522 /* Do not check vector_mode_supported_p here. We'll do that
523 later in vector_type_mode. */
524 for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
525 if (GET_MODE_NUNITS (mode) == nunits
526 && GET_MODE_INNER (mode) == innermode)
527 break;
529 /* For integers, try mapping it to a same-sized scalar mode. */
530 if (mode == VOIDmode
531 && GET_MODE_CLASS (innermode) == MODE_INT)
532 mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
533 MODE_INT, 0);
535 if (mode == VOIDmode
536 || (GET_MODE_CLASS (mode) == MODE_INT
537 && !have_regs_of_mode[mode]))
538 return BLKmode;
540 return mode;
543 /* Return the alignment of MODE. This will be bounded by 1 and
544 BIGGEST_ALIGNMENT. */
546 unsigned int
547 get_mode_alignment (enum machine_mode mode)
549 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
553 /* Subroutine of layout_decl: Force alignment required for the data type.
554 But if the decl itself wants greater alignment, don't override that. */
556 static inline void
557 do_type_align (tree type, tree decl)
559 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
561 DECL_ALIGN (decl) = TYPE_ALIGN (type);
562 if (TREE_CODE (decl) == FIELD_DECL)
563 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
567 /* Set the size, mode and alignment of a ..._DECL node.
568 TYPE_DECL does need this for C++.
569 Note that LABEL_DECL and CONST_DECL nodes do not need this,
570 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
571 Don't call layout_decl for them.
573 KNOWN_ALIGN is the amount of alignment we can assume this
574 decl has with no special effort. It is relevant only for FIELD_DECLs
575 and depends on the previous fields.
576 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
577 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
578 the record will be aligned to suit. */
580 void
581 layout_decl (tree decl, unsigned int known_align)
583 tree type = TREE_TYPE (decl);
584 enum tree_code code = TREE_CODE (decl);
585 rtx rtl = NULL_RTX;
586 location_t loc = DECL_SOURCE_LOCATION (decl);
588 if (code == CONST_DECL)
589 return;
591 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
592 || code == TYPE_DECL ||code == FIELD_DECL);
594 rtl = DECL_RTL_IF_SET (decl);
596 if (type == error_mark_node)
597 type = void_type_node;
599 /* Usually the size and mode come from the data type without change,
600 however, the front-end may set the explicit width of the field, so its
601 size may not be the same as the size of its type. This happens with
602 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
603 also happens with other fields. For example, the C++ front-end creates
604 zero-sized fields corresponding to empty base classes, and depends on
605 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
606 size in bytes from the size in bits. If we have already set the mode,
607 don't set it again since we can be called twice for FIELD_DECLs. */
609 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
610 if (DECL_MODE (decl) == VOIDmode)
611 DECL_MODE (decl) = TYPE_MODE (type);
613 if (DECL_SIZE (decl) == 0)
615 DECL_SIZE (decl) = TYPE_SIZE (type);
616 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
618 else if (DECL_SIZE_UNIT (decl) == 0)
619 DECL_SIZE_UNIT (decl)
620 = fold_convert_loc (loc, sizetype,
621 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
622 bitsize_unit_node));
624 if (code != FIELD_DECL)
625 /* For non-fields, update the alignment from the type. */
626 do_type_align (type, decl);
627 else
628 /* For fields, it's a bit more complicated... */
630 bool old_user_align = DECL_USER_ALIGN (decl);
631 bool zero_bitfield = false;
632 bool packed_p = DECL_PACKED (decl);
633 unsigned int mfa;
635 if (DECL_BIT_FIELD (decl))
637 DECL_BIT_FIELD_TYPE (decl) = type;
639 /* A zero-length bit-field affects the alignment of the next
640 field. In essence such bit-fields are not influenced by
641 any packing due to #pragma pack or attribute packed. */
642 if (integer_zerop (DECL_SIZE (decl))
643 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
645 zero_bitfield = true;
646 packed_p = false;
647 #ifdef PCC_BITFIELD_TYPE_MATTERS
648 if (PCC_BITFIELD_TYPE_MATTERS)
649 do_type_align (type, decl);
650 else
651 #endif
653 #ifdef EMPTY_FIELD_BOUNDARY
654 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
656 DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
657 DECL_USER_ALIGN (decl) = 0;
659 #endif
663 /* See if we can use an ordinary integer mode for a bit-field.
664 Conditions are: a fixed size that is correct for another mode,
665 occupying a complete byte or bytes on proper boundary,
666 and not volatile or not -fstrict-volatile-bitfields. */
667 if (TYPE_SIZE (type) != 0
668 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
669 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
670 && !(TREE_THIS_VOLATILE (decl)
671 && flag_strict_volatile_bitfields > 0))
673 enum machine_mode xmode
674 = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
675 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
677 if (xmode != BLKmode
678 && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
679 && (known_align == 0 || known_align >= xalign))
681 DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
682 DECL_MODE (decl) = xmode;
683 DECL_BIT_FIELD (decl) = 0;
687 /* Turn off DECL_BIT_FIELD if we won't need it set. */
688 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
689 && known_align >= TYPE_ALIGN (type)
690 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
691 DECL_BIT_FIELD (decl) = 0;
693 else if (packed_p && DECL_USER_ALIGN (decl))
694 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
695 round up; we'll reduce it again below. We want packing to
696 supersede USER_ALIGN inherited from the type, but defer to
697 alignment explicitly specified on the field decl. */;
698 else
699 do_type_align (type, decl);
701 /* If the field is packed and not explicitly aligned, give it the
702 minimum alignment. Note that do_type_align may set
703 DECL_USER_ALIGN, so we need to check old_user_align instead. */
704 if (packed_p
705 && !old_user_align)
706 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
708 if (! packed_p && ! DECL_USER_ALIGN (decl))
710 /* Some targets (i.e. i386, VMS) limit struct field alignment
711 to a lower boundary than alignment of variables unless
712 it was overridden by attribute aligned. */
713 #ifdef BIGGEST_FIELD_ALIGNMENT
714 DECL_ALIGN (decl)
715 = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
716 #endif
717 #ifdef ADJUST_FIELD_ALIGN
718 DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
719 #endif
722 if (zero_bitfield)
723 mfa = initial_max_fld_align * BITS_PER_UNIT;
724 else
725 mfa = maximum_field_alignment;
726 /* Should this be controlled by DECL_USER_ALIGN, too? */
727 if (mfa != 0)
728 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
731 /* Evaluate nonconstant size only once, either now or as soon as safe. */
732 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
733 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
734 if (DECL_SIZE_UNIT (decl) != 0
735 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
736 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
738 /* If requested, warn about definitions of large data objects. */
739 if (warn_larger_than
740 && (code == VAR_DECL || code == PARM_DECL)
741 && ! DECL_EXTERNAL (decl))
743 tree size = DECL_SIZE_UNIT (decl);
745 if (size != 0 && TREE_CODE (size) == INTEGER_CST
746 && compare_tree_int (size, larger_than_size) > 0)
748 int size_as_int = TREE_INT_CST_LOW (size);
750 if (compare_tree_int (size, size_as_int) == 0)
751 warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
752 else
753 warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
754 decl, larger_than_size);
758 /* If the RTL was already set, update its mode and mem attributes. */
759 if (rtl)
761 PUT_MODE (rtl, DECL_MODE (decl));
762 SET_DECL_RTL (decl, 0);
763 set_mem_attributes (rtl, decl, 1);
764 SET_DECL_RTL (decl, rtl);
768 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
769 a previous call to layout_decl and calls it again. */
771 void
772 relayout_decl (tree decl)
774 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
775 DECL_MODE (decl) = VOIDmode;
776 if (!DECL_USER_ALIGN (decl))
777 DECL_ALIGN (decl) = 0;
778 SET_DECL_RTL (decl, 0);
780 layout_decl (decl, 0);
783 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
784 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
785 is to be passed to all other layout functions for this record. It is the
786 responsibility of the caller to call `free' for the storage returned.
787 Note that garbage collection is not permitted until we finish laying
788 out the record. */
790 record_layout_info
791 start_record_layout (tree t)
793 record_layout_info rli = XNEW (struct record_layout_info_s);
795 rli->t = t;
797 /* If the type has a minimum specified alignment (via an attribute
798 declaration, for example) use it -- otherwise, start with a
799 one-byte alignment. */
800 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
801 rli->unpacked_align = rli->record_align;
802 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
804 #ifdef STRUCTURE_SIZE_BOUNDARY
805 /* Packed structures don't need to have minimum size. */
806 if (! TYPE_PACKED (t))
808 unsigned tmp;
810 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
811 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
812 if (maximum_field_alignment != 0)
813 tmp = MIN (tmp, maximum_field_alignment);
814 rli->record_align = MAX (rli->record_align, tmp);
816 #endif
818 rli->offset = size_zero_node;
819 rli->bitpos = bitsize_zero_node;
820 rli->prev_field = 0;
821 rli->pending_statics = NULL;
822 rli->packed_maybe_necessary = 0;
823 rli->remaining_in_alignment = 0;
825 return rli;
828 /* These four routines perform computations that convert between
829 the offset/bitpos forms and byte and bit offsets. */
831 tree
832 bit_from_pos (tree offset, tree bitpos)
834 return size_binop (PLUS_EXPR, bitpos,
835 size_binop (MULT_EXPR,
836 fold_convert (bitsizetype, offset),
837 bitsize_unit_node));
840 tree
841 byte_from_pos (tree offset, tree bitpos)
843 return size_binop (PLUS_EXPR, offset,
844 fold_convert (sizetype,
845 size_binop (TRUNC_DIV_EXPR, bitpos,
846 bitsize_unit_node)));
849 void
850 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
851 tree pos)
853 *poffset = size_binop (MULT_EXPR,
854 fold_convert (sizetype,
855 size_binop (FLOOR_DIV_EXPR, pos,
856 bitsize_int (off_align))),
857 size_int (off_align / BITS_PER_UNIT));
858 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
861 /* Given a pointer to bit and byte offsets and an offset alignment,
862 normalize the offsets so they are within the alignment. */
864 void
865 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
867 /* If the bit position is now larger than it should be, adjust it
868 downwards. */
869 if (compare_tree_int (*pbitpos, off_align) >= 0)
871 tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
872 bitsize_int (off_align));
874 *poffset
875 = size_binop (PLUS_EXPR, *poffset,
876 size_binop (MULT_EXPR,
877 fold_convert (sizetype, extra_aligns),
878 size_int (off_align / BITS_PER_UNIT)));
880 *pbitpos
881 = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
885 /* Print debugging information about the information in RLI. */
887 DEBUG_FUNCTION void
888 debug_rli (record_layout_info rli)
890 print_node_brief (stderr, "type", rli->t, 0);
891 print_node_brief (stderr, "\noffset", rli->offset, 0);
892 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
894 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
895 rli->record_align, rli->unpacked_align,
896 rli->offset_align);
898 /* The ms_struct code is the only that uses this. */
899 if (targetm.ms_bitfield_layout_p (rli->t))
900 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
902 if (rli->packed_maybe_necessary)
903 fprintf (stderr, "packed may be necessary\n");
905 if (!VEC_empty (tree, rli->pending_statics))
907 fprintf (stderr, "pending statics:\n");
908 debug_vec_tree (rli->pending_statics);
912 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
913 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
915 void
916 normalize_rli (record_layout_info rli)
918 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
921 /* Returns the size in bytes allocated so far. */
923 tree
924 rli_size_unit_so_far (record_layout_info rli)
926 return byte_from_pos (rli->offset, rli->bitpos);
929 /* Returns the size in bits allocated so far. */
931 tree
932 rli_size_so_far (record_layout_info rli)
934 return bit_from_pos (rli->offset, rli->bitpos);
937 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
938 the next available location within the record is given by KNOWN_ALIGN.
939 Update the variable alignment fields in RLI, and return the alignment
940 to give the FIELD. */
942 unsigned int
943 update_alignment_for_field (record_layout_info rli, tree field,
944 unsigned int known_align)
946 /* The alignment required for FIELD. */
947 unsigned int desired_align;
948 /* The type of this field. */
949 tree type = TREE_TYPE (field);
950 /* True if the field was explicitly aligned by the user. */
951 bool user_align;
952 bool is_bitfield;
954 /* Do not attempt to align an ERROR_MARK node */
955 if (TREE_CODE (type) == ERROR_MARK)
956 return 0;
958 /* Lay out the field so we know what alignment it needs. */
959 layout_decl (field, known_align);
960 desired_align = DECL_ALIGN (field);
961 user_align = DECL_USER_ALIGN (field);
963 is_bitfield = (type != error_mark_node
964 && DECL_BIT_FIELD_TYPE (field)
965 && ! integer_zerop (TYPE_SIZE (type)));
967 /* Record must have at least as much alignment as any field.
968 Otherwise, the alignment of the field within the record is
969 meaningless. */
970 if (targetm.ms_bitfield_layout_p (rli->t))
972 /* Here, the alignment of the underlying type of a bitfield can
973 affect the alignment of a record; even a zero-sized field
974 can do this. The alignment should be to the alignment of
975 the type, except that for zero-size bitfields this only
976 applies if there was an immediately prior, nonzero-size
977 bitfield. (That's the way it is, experimentally.) */
978 if ((!is_bitfield && !DECL_PACKED (field))
979 || (!integer_zerop (DECL_SIZE (field))
980 ? !DECL_PACKED (field)
981 : (rli->prev_field
982 && DECL_BIT_FIELD_TYPE (rli->prev_field)
983 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
985 unsigned int type_align = TYPE_ALIGN (type);
986 type_align = MAX (type_align, desired_align);
987 if (maximum_field_alignment != 0)
988 type_align = MIN (type_align, maximum_field_alignment);
989 rli->record_align = MAX (rli->record_align, type_align);
990 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
993 #ifdef PCC_BITFIELD_TYPE_MATTERS
994 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
996 /* Named bit-fields cause the entire structure to have the
997 alignment implied by their type. Some targets also apply the same
998 rules to unnamed bitfields. */
999 if (DECL_NAME (field) != 0
1000 || targetm.align_anon_bitfield ())
1002 unsigned int type_align = TYPE_ALIGN (type);
1004 #ifdef ADJUST_FIELD_ALIGN
1005 if (! TYPE_USER_ALIGN (type))
1006 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1007 #endif
1009 /* Targets might chose to handle unnamed and hence possibly
1010 zero-width bitfield. Those are not influenced by #pragmas
1011 or packed attributes. */
1012 if (integer_zerop (DECL_SIZE (field)))
1014 if (initial_max_fld_align)
1015 type_align = MIN (type_align,
1016 initial_max_fld_align * BITS_PER_UNIT);
1018 else if (maximum_field_alignment != 0)
1019 type_align = MIN (type_align, maximum_field_alignment);
1020 else if (DECL_PACKED (field))
1021 type_align = MIN (type_align, BITS_PER_UNIT);
1023 /* The alignment of the record is increased to the maximum
1024 of the current alignment, the alignment indicated on the
1025 field (i.e., the alignment specified by an __aligned__
1026 attribute), and the alignment indicated by the type of
1027 the field. */
1028 rli->record_align = MAX (rli->record_align, desired_align);
1029 rli->record_align = MAX (rli->record_align, type_align);
1031 if (warn_packed)
1032 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1033 user_align |= TYPE_USER_ALIGN (type);
1036 #endif
1037 else
1039 rli->record_align = MAX (rli->record_align, desired_align);
1040 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1043 TYPE_USER_ALIGN (rli->t) |= user_align;
1045 return desired_align;
1048 /* Called from place_field to handle unions. */
1050 static void
1051 place_union_field (record_layout_info rli, tree field)
1053 update_alignment_for_field (rli, field, /*known_align=*/0);
1055 DECL_FIELD_OFFSET (field) = size_zero_node;
1056 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1057 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1059 /* If this is an ERROR_MARK return *after* having set the
1060 field at the start of the union. This helps when parsing
1061 invalid fields. */
1062 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1063 return;
1065 /* We assume the union's size will be a multiple of a byte so we don't
1066 bother with BITPOS. */
1067 if (TREE_CODE (rli->t) == UNION_TYPE)
1068 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1069 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1070 rli->offset = fold_build3_loc (input_location, COND_EXPR, sizetype,
1071 DECL_QUALIFIER (field),
1072 DECL_SIZE_UNIT (field), rli->offset);
1075 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
1076 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1077 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1078 units of alignment than the underlying TYPE. */
1079 static int
1080 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1081 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1083 /* Note that the calculation of OFFSET might overflow; we calculate it so
1084 that we still get the right result as long as ALIGN is a power of two. */
1085 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1087 offset = offset % align;
1088 return ((offset + size + align - 1) / align
1089 > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
1090 / align));
1092 #endif
1094 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
1095 is a FIELD_DECL to be added after those fields already present in
1096 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1097 callers that desire that behavior must manually perform that step.) */
1099 void
1100 place_field (record_layout_info rli, tree field)
1102 /* The alignment required for FIELD. */
1103 unsigned int desired_align;
1104 /* The alignment FIELD would have if we just dropped it into the
1105 record as it presently stands. */
1106 unsigned int known_align;
1107 unsigned int actual_align;
1108 /* The type of this field. */
1109 tree type = TREE_TYPE (field);
1111 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1113 /* If FIELD is static, then treat it like a separate variable, not
1114 really like a structure field. If it is a FUNCTION_DECL, it's a
1115 method. In both cases, all we do is lay out the decl, and we do
1116 it *after* the record is laid out. */
1117 if (TREE_CODE (field) == VAR_DECL)
1119 VEC_safe_push (tree, gc, rli->pending_statics, field);
1120 return;
1123 /* Enumerators and enum types which are local to this class need not
1124 be laid out. Likewise for initialized constant fields. */
1125 else if (TREE_CODE (field) != FIELD_DECL)
1126 return;
1128 /* Unions are laid out very differently than records, so split
1129 that code off to another function. */
1130 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1132 place_union_field (rli, field);
1133 return;
1136 else if (TREE_CODE (type) == ERROR_MARK)
1138 /* Place this field at the current allocation position, so we
1139 maintain monotonicity. */
1140 DECL_FIELD_OFFSET (field) = rli->offset;
1141 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1142 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1143 return;
1146 /* Work out the known alignment so far. Note that A & (-A) is the
1147 value of the least-significant bit in A that is one. */
1148 if (! integer_zerop (rli->bitpos))
1149 known_align = (tree_low_cst (rli->bitpos, 1)
1150 & - tree_low_cst (rli->bitpos, 1));
1151 else if (integer_zerop (rli->offset))
1152 known_align = 0;
1153 else if (host_integerp (rli->offset, 1))
1154 known_align = (BITS_PER_UNIT
1155 * (tree_low_cst (rli->offset, 1)
1156 & - tree_low_cst (rli->offset, 1)));
1157 else
1158 known_align = rli->offset_align;
1160 desired_align = update_alignment_for_field (rli, field, known_align);
1161 if (known_align == 0)
1162 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1164 if (warn_packed && DECL_PACKED (field))
1166 if (known_align >= TYPE_ALIGN (type))
1168 if (TYPE_ALIGN (type) > desired_align)
1170 if (STRICT_ALIGNMENT)
1171 warning (OPT_Wattributes, "packed attribute causes "
1172 "inefficient alignment for %q+D", field);
1173 /* Don't warn if DECL_PACKED was set by the type. */
1174 else if (!TYPE_PACKED (rli->t))
1175 warning (OPT_Wattributes, "packed attribute is "
1176 "unnecessary for %q+D", field);
1179 else
1180 rli->packed_maybe_necessary = 1;
1183 /* Does this field automatically have alignment it needs by virtue
1184 of the fields that precede it and the record's own alignment?
1185 We already align ms_struct fields, so don't re-align them. */
1186 if (known_align < desired_align
1187 && !targetm.ms_bitfield_layout_p (rli->t))
1189 /* No, we need to skip space before this field.
1190 Bump the cumulative size to multiple of field alignment. */
1192 if (DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1193 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1195 /* If the alignment is still within offset_align, just align
1196 the bit position. */
1197 if (desired_align < rli->offset_align)
1198 rli->bitpos = round_up (rli->bitpos, desired_align);
1199 else
1201 /* First adjust OFFSET by the partial bits, then align. */
1202 rli->offset
1203 = size_binop (PLUS_EXPR, rli->offset,
1204 fold_convert (sizetype,
1205 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1206 bitsize_unit_node)));
1207 rli->bitpos = bitsize_zero_node;
1209 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1212 if (! TREE_CONSTANT (rli->offset))
1213 rli->offset_align = desired_align;
1217 /* Handle compatibility with PCC. Note that if the record has any
1218 variable-sized fields, we need not worry about compatibility. */
1219 #ifdef PCC_BITFIELD_TYPE_MATTERS
1220 if (PCC_BITFIELD_TYPE_MATTERS
1221 && ! targetm.ms_bitfield_layout_p (rli->t)
1222 && TREE_CODE (field) == FIELD_DECL
1223 && type != error_mark_node
1224 && DECL_BIT_FIELD (field)
1225 && (! DECL_PACKED (field)
1226 /* Enter for these packed fields only to issue a warning. */
1227 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1228 && maximum_field_alignment == 0
1229 && ! integer_zerop (DECL_SIZE (field))
1230 && host_integerp (DECL_SIZE (field), 1)
1231 && host_integerp (rli->offset, 1)
1232 && host_integerp (TYPE_SIZE (type), 1))
1234 unsigned int type_align = TYPE_ALIGN (type);
1235 tree dsize = DECL_SIZE (field);
1236 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1237 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1238 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1240 #ifdef ADJUST_FIELD_ALIGN
1241 if (! TYPE_USER_ALIGN (type))
1242 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1243 #endif
1245 /* A bit field may not span more units of alignment of its type
1246 than its type itself. Advance to next boundary if necessary. */
1247 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1249 if (DECL_PACKED (field))
1251 if (warn_packed_bitfield_compat == 1)
1252 inform
1253 (input_location,
1254 "Offset of packed bit-field %qD has changed in GCC 4.4",
1255 field);
1257 else
1258 rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align);
1261 if (! DECL_PACKED (field))
1262 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1264 #endif
1266 #ifdef BITFIELD_NBYTES_LIMITED
1267 if (BITFIELD_NBYTES_LIMITED
1268 && ! targetm.ms_bitfield_layout_p (rli->t)
1269 && TREE_CODE (field) == FIELD_DECL
1270 && type != error_mark_node
1271 && DECL_BIT_FIELD_TYPE (field)
1272 && ! DECL_PACKED (field)
1273 && ! integer_zerop (DECL_SIZE (field))
1274 && host_integerp (DECL_SIZE (field), 1)
1275 && host_integerp (rli->offset, 1)
1276 && host_integerp (TYPE_SIZE (type), 1))
1278 unsigned int type_align = TYPE_ALIGN (type);
1279 tree dsize = DECL_SIZE (field);
1280 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1281 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1282 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1284 #ifdef ADJUST_FIELD_ALIGN
1285 if (! TYPE_USER_ALIGN (type))
1286 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1287 #endif
1289 if (maximum_field_alignment != 0)
1290 type_align = MIN (type_align, maximum_field_alignment);
1291 /* ??? This test is opposite the test in the containing if
1292 statement, so this code is unreachable currently. */
1293 else if (DECL_PACKED (field))
1294 type_align = MIN (type_align, BITS_PER_UNIT);
1296 /* A bit field may not span the unit of alignment of its type.
1297 Advance to next boundary if necessary. */
1298 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1299 rli->bitpos = round_up (rli->bitpos, type_align);
1301 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1303 #endif
1305 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1306 A subtlety:
1307 When a bit field is inserted into a packed record, the whole
1308 size of the underlying type is used by one or more same-size
1309 adjacent bitfields. (That is, if its long:3, 32 bits is
1310 used in the record, and any additional adjacent long bitfields are
1311 packed into the same chunk of 32 bits. However, if the size
1312 changes, a new field of that size is allocated.) In an unpacked
1313 record, this is the same as using alignment, but not equivalent
1314 when packing.
1316 Note: for compatibility, we use the type size, not the type alignment
1317 to determine alignment, since that matches the documentation */
1319 if (targetm.ms_bitfield_layout_p (rli->t))
1321 tree prev_saved = rli->prev_field;
1322 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1324 /* This is a bitfield if it exists. */
1325 if (rli->prev_field)
1327 /* If both are bitfields, nonzero, and the same size, this is
1328 the middle of a run. Zero declared size fields are special
1329 and handled as "end of run". (Note: it's nonzero declared
1330 size, but equal type sizes!) (Since we know that both
1331 the current and previous fields are bitfields by the
1332 time we check it, DECL_SIZE must be present for both.) */
1333 if (DECL_BIT_FIELD_TYPE (field)
1334 && !integer_zerop (DECL_SIZE (field))
1335 && !integer_zerop (DECL_SIZE (rli->prev_field))
1336 && host_integerp (DECL_SIZE (rli->prev_field), 0)
1337 && host_integerp (TYPE_SIZE (type), 0)
1338 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1340 /* We're in the middle of a run of equal type size fields; make
1341 sure we realign if we run out of bits. (Not decl size,
1342 type size!) */
1343 HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1345 if (rli->remaining_in_alignment < bitsize)
1347 HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
1349 /* out of bits; bump up to next 'word'. */
1350 rli->bitpos
1351 = size_binop (PLUS_EXPR, rli->bitpos,
1352 bitsize_int (rli->remaining_in_alignment));
1353 rli->prev_field = field;
1354 if (typesize < bitsize)
1355 rli->remaining_in_alignment = 0;
1356 else
1357 rli->remaining_in_alignment = typesize - bitsize;
1359 else
1360 rli->remaining_in_alignment -= bitsize;
1362 else
1364 /* End of a run: if leaving a run of bitfields of the same type
1365 size, we have to "use up" the rest of the bits of the type
1366 size.
1368 Compute the new position as the sum of the size for the prior
1369 type and where we first started working on that type.
1370 Note: since the beginning of the field was aligned then
1371 of course the end will be too. No round needed. */
1373 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1375 rli->bitpos
1376 = size_binop (PLUS_EXPR, rli->bitpos,
1377 bitsize_int (rli->remaining_in_alignment));
1379 else
1380 /* We "use up" size zero fields; the code below should behave
1381 as if the prior field was not a bitfield. */
1382 prev_saved = NULL;
1384 /* Cause a new bitfield to be captured, either this time (if
1385 currently a bitfield) or next time we see one. */
1386 if (!DECL_BIT_FIELD_TYPE(field)
1387 || integer_zerop (DECL_SIZE (field)))
1388 rli->prev_field = NULL;
1391 normalize_rli (rli);
1394 /* If we're starting a new run of same size type bitfields
1395 (or a run of non-bitfields), set up the "first of the run"
1396 fields.
1398 That is, if the current field is not a bitfield, or if there
1399 was a prior bitfield the type sizes differ, or if there wasn't
1400 a prior bitfield the size of the current field is nonzero.
1402 Note: we must be sure to test ONLY the type size if there was
1403 a prior bitfield and ONLY for the current field being zero if
1404 there wasn't. */
1406 if (!DECL_BIT_FIELD_TYPE (field)
1407 || (prev_saved != NULL
1408 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1409 : !integer_zerop (DECL_SIZE (field)) ))
1411 /* Never smaller than a byte for compatibility. */
1412 unsigned int type_align = BITS_PER_UNIT;
1414 /* (When not a bitfield), we could be seeing a flex array (with
1415 no DECL_SIZE). Since we won't be using remaining_in_alignment
1416 until we see a bitfield (and come by here again) we just skip
1417 calculating it. */
1418 if (DECL_SIZE (field) != NULL
1419 && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
1420 && host_integerp (DECL_SIZE (field), 1))
1422 unsigned HOST_WIDE_INT bitsize
1423 = tree_low_cst (DECL_SIZE (field), 1);
1424 unsigned HOST_WIDE_INT typesize
1425 = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
1427 if (typesize < bitsize)
1428 rli->remaining_in_alignment = 0;
1429 else
1430 rli->remaining_in_alignment = typesize - bitsize;
1433 /* Now align (conventionally) for the new type. */
1434 type_align = TYPE_ALIGN (TREE_TYPE (field));
1436 if (maximum_field_alignment != 0)
1437 type_align = MIN (type_align, maximum_field_alignment);
1439 rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align);
1441 /* If we really aligned, don't allow subsequent bitfields
1442 to undo that. */
1443 rli->prev_field = NULL;
1447 /* Offset so far becomes the position of this field after normalizing. */
1448 normalize_rli (rli);
1449 DECL_FIELD_OFFSET (field) = rli->offset;
1450 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1451 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1453 /* If this field ended up more aligned than we thought it would be (we
1454 approximate this by seeing if its position changed), lay out the field
1455 again; perhaps we can use an integral mode for it now. */
1456 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1457 actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1458 & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1459 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1460 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1461 else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1462 actual_align = (BITS_PER_UNIT
1463 * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1464 & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1465 else
1466 actual_align = DECL_OFFSET_ALIGN (field);
1467 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1468 store / extract bit field operations will check the alignment of the
1469 record against the mode of bit fields. */
1471 if (known_align != actual_align)
1472 layout_decl (field, actual_align);
1474 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1475 rli->prev_field = field;
1477 /* Now add size of this field to the size of the record. If the size is
1478 not constant, treat the field as being a multiple of bytes and just
1479 adjust the offset, resetting the bit position. Otherwise, apportion the
1480 size amongst the bit position and offset. First handle the case of an
1481 unspecified size, which can happen when we have an invalid nested struct
1482 definition, such as struct j { struct j { int i; } }. The error message
1483 is printed in finish_struct. */
1484 if (DECL_SIZE (field) == 0)
1485 /* Do nothing. */;
1486 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1487 || TREE_OVERFLOW (DECL_SIZE (field)))
1489 rli->offset
1490 = size_binop (PLUS_EXPR, rli->offset,
1491 fold_convert (sizetype,
1492 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1493 bitsize_unit_node)));
1494 rli->offset
1495 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1496 rli->bitpos = bitsize_zero_node;
1497 rli->offset_align = MIN (rli->offset_align, desired_align);
1499 else if (targetm.ms_bitfield_layout_p (rli->t))
1501 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1503 /* If we ended a bitfield before the full length of the type then
1504 pad the struct out to the full length of the last type. */
1505 if ((DECL_CHAIN (field) == NULL
1506 || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
1507 && DECL_BIT_FIELD_TYPE (field)
1508 && !integer_zerop (DECL_SIZE (field)))
1509 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1510 bitsize_int (rli->remaining_in_alignment));
1512 normalize_rli (rli);
1514 else
1516 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1517 normalize_rli (rli);
1521 /* Assuming that all the fields have been laid out, this function uses
1522 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1523 indicated by RLI. */
1525 static void
1526 finalize_record_size (record_layout_info rli)
1528 tree unpadded_size, unpadded_size_unit;
1530 /* Now we want just byte and bit offsets, so set the offset alignment
1531 to be a byte and then normalize. */
1532 rli->offset_align = BITS_PER_UNIT;
1533 normalize_rli (rli);
1535 /* Determine the desired alignment. */
1536 #ifdef ROUND_TYPE_ALIGN
1537 TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1538 rli->record_align);
1539 #else
1540 TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1541 #endif
1543 /* Compute the size so far. Be sure to allow for extra bits in the
1544 size in bytes. We have guaranteed above that it will be no more
1545 than a single byte. */
1546 unpadded_size = rli_size_so_far (rli);
1547 unpadded_size_unit = rli_size_unit_so_far (rli);
1548 if (! integer_zerop (rli->bitpos))
1549 unpadded_size_unit
1550 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1552 /* Round the size up to be a multiple of the required alignment. */
1553 TYPE_SIZE (rli->t) = round_up_loc (input_location, unpadded_size,
1554 TYPE_ALIGN (rli->t));
1555 TYPE_SIZE_UNIT (rli->t)
1556 = round_up_loc (input_location, unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1558 if (TREE_CONSTANT (unpadded_size)
1559 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1560 && input_location != BUILTINS_LOCATION)
1561 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1563 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1564 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1565 && TREE_CONSTANT (unpadded_size))
1567 tree unpacked_size;
1569 #ifdef ROUND_TYPE_ALIGN
1570 rli->unpacked_align
1571 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1572 #else
1573 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1574 #endif
1576 unpacked_size = round_up_loc (input_location, TYPE_SIZE (rli->t), rli->unpacked_align);
1577 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1579 if (TYPE_NAME (rli->t))
1581 tree name;
1583 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1584 name = TYPE_NAME (rli->t);
1585 else
1586 name = DECL_NAME (TYPE_NAME (rli->t));
1588 if (STRICT_ALIGNMENT)
1589 warning (OPT_Wpacked, "packed attribute causes inefficient "
1590 "alignment for %qE", name);
1591 else
1592 warning (OPT_Wpacked,
1593 "packed attribute is unnecessary for %qE", name);
1595 else
1597 if (STRICT_ALIGNMENT)
1598 warning (OPT_Wpacked,
1599 "packed attribute causes inefficient alignment");
1600 else
1601 warning (OPT_Wpacked, "packed attribute is unnecessary");
1607 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1609 void
1610 compute_record_mode (tree type)
1612 tree field;
1613 enum machine_mode mode = VOIDmode;
1615 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1616 However, if possible, we use a mode that fits in a register
1617 instead, in order to allow for better optimization down the
1618 line. */
1619 SET_TYPE_MODE (type, BLKmode);
1621 if (! host_integerp (TYPE_SIZE (type), 1))
1622 return;
1624 /* A record which has any BLKmode members must itself be
1625 BLKmode; it can't go in a register. Unless the member is
1626 BLKmode only because it isn't aligned. */
1627 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1629 if (TREE_CODE (field) != FIELD_DECL)
1630 continue;
1632 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1633 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1634 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1635 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1636 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1637 || ! host_integerp (bit_position (field), 1)
1638 || DECL_SIZE (field) == 0
1639 || ! host_integerp (DECL_SIZE (field), 1))
1640 return;
1642 /* If this field is the whole struct, remember its mode so
1643 that, say, we can put a double in a class into a DF
1644 register instead of forcing it to live in the stack. */
1645 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1646 mode = DECL_MODE (field);
1648 #ifdef MEMBER_TYPE_FORCES_BLK
1649 /* With some targets, eg. c4x, it is sub-optimal
1650 to access an aligned BLKmode structure as a scalar. */
1652 if (MEMBER_TYPE_FORCES_BLK (field, mode))
1653 return;
1654 #endif /* MEMBER_TYPE_FORCES_BLK */
1657 /* If we only have one real field; use its mode if that mode's size
1658 matches the type's size. This only applies to RECORD_TYPE. This
1659 does not apply to unions. */
1660 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1661 && host_integerp (TYPE_SIZE (type), 1)
1662 && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1663 SET_TYPE_MODE (type, mode);
1664 else
1665 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1667 /* If structure's known alignment is less than what the scalar
1668 mode would need, and it matters, then stick with BLKmode. */
1669 if (TYPE_MODE (type) != BLKmode
1670 && STRICT_ALIGNMENT
1671 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1672 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1674 /* If this is the only reason this type is BLKmode, then
1675 don't force containing types to be BLKmode. */
1676 TYPE_NO_FORCE_BLK (type) = 1;
1677 SET_TYPE_MODE (type, BLKmode);
1681 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1682 out. */
1684 static void
1685 finalize_type_size (tree type)
1687 /* Normally, use the alignment corresponding to the mode chosen.
1688 However, where strict alignment is not required, avoid
1689 over-aligning structures, since most compilers do not do this
1690 alignment. */
1692 if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1693 && (STRICT_ALIGNMENT
1694 || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1695 && TREE_CODE (type) != QUAL_UNION_TYPE
1696 && TREE_CODE (type) != ARRAY_TYPE)))
1698 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1700 /* Don't override a larger alignment requirement coming from a user
1701 alignment of one of the fields. */
1702 if (mode_align >= TYPE_ALIGN (type))
1704 TYPE_ALIGN (type) = mode_align;
1705 TYPE_USER_ALIGN (type) = 0;
1709 /* Do machine-dependent extra alignment. */
1710 #ifdef ROUND_TYPE_ALIGN
1711 TYPE_ALIGN (type)
1712 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1713 #endif
1715 /* If we failed to find a simple way to calculate the unit size
1716 of the type, find it by division. */
1717 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1718 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1719 result will fit in sizetype. We will get more efficient code using
1720 sizetype, so we force a conversion. */
1721 TYPE_SIZE_UNIT (type)
1722 = fold_convert (sizetype,
1723 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1724 bitsize_unit_node));
1726 if (TYPE_SIZE (type) != 0)
1728 TYPE_SIZE (type) = round_up_loc (input_location,
1729 TYPE_SIZE (type), TYPE_ALIGN (type));
1730 TYPE_SIZE_UNIT (type) = round_up_loc (input_location, TYPE_SIZE_UNIT (type),
1731 TYPE_ALIGN_UNIT (type));
1734 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1735 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1736 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1737 if (TYPE_SIZE_UNIT (type) != 0
1738 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1739 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1741 /* Also layout any other variants of the type. */
1742 if (TYPE_NEXT_VARIANT (type)
1743 || type != TYPE_MAIN_VARIANT (type))
1745 tree variant;
1746 /* Record layout info of this variant. */
1747 tree size = TYPE_SIZE (type);
1748 tree size_unit = TYPE_SIZE_UNIT (type);
1749 unsigned int align = TYPE_ALIGN (type);
1750 unsigned int user_align = TYPE_USER_ALIGN (type);
1751 enum machine_mode mode = TYPE_MODE (type);
1753 /* Copy it into all variants. */
1754 for (variant = TYPE_MAIN_VARIANT (type);
1755 variant != 0;
1756 variant = TYPE_NEXT_VARIANT (variant))
1758 TYPE_SIZE (variant) = size;
1759 TYPE_SIZE_UNIT (variant) = size_unit;
1760 TYPE_ALIGN (variant) = align;
1761 TYPE_USER_ALIGN (variant) = user_align;
1762 SET_TYPE_MODE (variant, mode);
1767 /* Do all of the work required to layout the type indicated by RLI,
1768 once the fields have been laid out. This function will call `free'
1769 for RLI, unless FREE_P is false. Passing a value other than false
1770 for FREE_P is bad practice; this option only exists to support the
1771 G++ 3.2 ABI. */
1773 void
1774 finish_record_layout (record_layout_info rli, int free_p)
1776 tree variant;
1778 /* Compute the final size. */
1779 finalize_record_size (rli);
1781 /* Compute the TYPE_MODE for the record. */
1782 compute_record_mode (rli->t);
1784 /* Perform any last tweaks to the TYPE_SIZE, etc. */
1785 finalize_type_size (rli->t);
1787 /* Propagate TYPE_PACKED to variants. With C++ templates,
1788 handle_packed_attribute is too early to do this. */
1789 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1790 variant = TYPE_NEXT_VARIANT (variant))
1791 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1793 /* Lay out any static members. This is done now because their type
1794 may use the record's type. */
1795 while (!VEC_empty (tree, rli->pending_statics))
1796 layout_decl (VEC_pop (tree, rli->pending_statics), 0);
1798 /* Clean up. */
1799 if (free_p)
1801 VEC_free (tree, gc, rli->pending_statics);
1802 free (rli);
1807 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
1808 NAME, its fields are chained in reverse on FIELDS.
1810 If ALIGN_TYPE is non-null, it is given the same alignment as
1811 ALIGN_TYPE. */
1813 void
1814 finish_builtin_struct (tree type, const char *name, tree fields,
1815 tree align_type)
1817 tree tail, next;
1819 for (tail = NULL_TREE; fields; tail = fields, fields = next)
1821 DECL_FIELD_CONTEXT (fields) = type;
1822 next = DECL_CHAIN (fields);
1823 DECL_CHAIN (fields) = tail;
1825 TYPE_FIELDS (type) = tail;
1827 if (align_type)
1829 TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1830 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1833 layout_type (type);
1834 #if 0 /* not yet, should get fixed properly later */
1835 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1836 #else
1837 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
1838 TYPE_DECL, get_identifier (name), type);
1839 #endif
1840 TYPE_STUB_DECL (type) = TYPE_NAME (type);
1841 layout_decl (TYPE_NAME (type), 0);
1844 /* Calculate the mode, size, and alignment for TYPE.
1845 For an array type, calculate the element separation as well.
1846 Record TYPE on the chain of permanent or temporary types
1847 so that dbxout will find out about it.
1849 TYPE_SIZE of a type is nonzero if the type has been laid out already.
1850 layout_type does nothing on such a type.
1852 If the type is incomplete, its TYPE_SIZE remains zero. */
1854 void
1855 layout_type (tree type)
1857 gcc_assert (type);
1859 if (type == error_mark_node)
1860 return;
1862 /* Do nothing if type has been laid out before. */
1863 if (TYPE_SIZE (type))
1864 return;
1866 switch (TREE_CODE (type))
1868 case LANG_TYPE:
1869 /* This kind of type is the responsibility
1870 of the language-specific code. */
1871 gcc_unreachable ();
1873 case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
1874 if (TYPE_PRECISION (type) == 0)
1875 TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */
1877 /* ... fall through ... */
1879 case INTEGER_TYPE:
1880 case ENUMERAL_TYPE:
1881 if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1882 && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
1883 TYPE_UNSIGNED (type) = 1;
1885 SET_TYPE_MODE (type,
1886 smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
1887 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1888 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1889 break;
1891 case REAL_TYPE:
1892 SET_TYPE_MODE (type,
1893 mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
1894 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1895 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1896 break;
1898 case FIXED_POINT_TYPE:
1899 /* TYPE_MODE (type) has been set already. */
1900 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1901 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1902 break;
1904 case COMPLEX_TYPE:
1905 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1906 SET_TYPE_MODE (type,
1907 mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
1908 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1909 ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
1910 0));
1911 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1912 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1913 break;
1915 case VECTOR_TYPE:
1917 int nunits = TYPE_VECTOR_SUBPARTS (type);
1918 tree innertype = TREE_TYPE (type);
1920 gcc_assert (!(nunits & (nunits - 1)));
1922 /* Find an appropriate mode for the vector type. */
1923 if (TYPE_MODE (type) == VOIDmode)
1924 SET_TYPE_MODE (type,
1925 mode_for_vector (TYPE_MODE (innertype), nunits));
1927 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
1928 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1929 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1930 TYPE_SIZE_UNIT (innertype),
1931 size_int (nunits), 0);
1932 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1933 bitsize_int (nunits), 0);
1935 /* Always naturally align vectors. This prevents ABI changes
1936 depending on whether or not native vector modes are supported. */
1937 TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
1938 break;
1941 case VOID_TYPE:
1942 /* This is an incomplete type and so doesn't have a size. */
1943 TYPE_ALIGN (type) = 1;
1944 TYPE_USER_ALIGN (type) = 0;
1945 SET_TYPE_MODE (type, VOIDmode);
1946 break;
1948 case OFFSET_TYPE:
1949 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
1950 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
1951 /* A pointer might be MODE_PARTIAL_INT,
1952 but ptrdiff_t must be integral. */
1953 SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
1954 TYPE_PRECISION (type) = POINTER_SIZE;
1955 break;
1957 case FUNCTION_TYPE:
1958 case METHOD_TYPE:
1959 /* It's hard to see what the mode and size of a function ought to
1960 be, but we do know the alignment is FUNCTION_BOUNDARY, so
1961 make it consistent with that. */
1962 SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
1963 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1964 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
1965 break;
1967 case POINTER_TYPE:
1968 case REFERENCE_TYPE:
1970 enum machine_mode mode = TYPE_MODE (type);
1971 if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
1973 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
1974 mode = targetm.addr_space.address_mode (as);
1977 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
1978 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
1979 TYPE_UNSIGNED (type) = 1;
1980 TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
1982 break;
1984 case ARRAY_TYPE:
1986 tree index = TYPE_DOMAIN (type);
1987 tree element = TREE_TYPE (type);
1989 build_pointer_type (element);
1991 /* We need to know both bounds in order to compute the size. */
1992 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
1993 && TYPE_SIZE (element))
1995 tree ub = TYPE_MAX_VALUE (index);
1996 tree lb = TYPE_MIN_VALUE (index);
1997 tree element_size = TYPE_SIZE (element);
1998 tree length;
2000 /* Make sure that an array of zero-sized element is zero-sized
2001 regardless of its extent. */
2002 if (integer_zerop (element_size))
2003 length = size_zero_node;
2005 /* The initial subtraction should happen in the original type so
2006 that (possible) negative values are handled appropriately. */
2007 else
2008 length
2009 = size_binop (PLUS_EXPR, size_one_node,
2010 fold_convert (sizetype,
2011 fold_build2_loc (input_location,
2012 MINUS_EXPR,
2013 TREE_TYPE (lb),
2014 ub, lb)));
2016 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2017 fold_convert (bitsizetype,
2018 length));
2020 /* If we know the size of the element, calculate the total size
2021 directly, rather than do some division thing below. This
2022 optimization helps Fortran assumed-size arrays (where the
2023 size of the array is determined at runtime) substantially. */
2024 if (TYPE_SIZE_UNIT (element))
2025 TYPE_SIZE_UNIT (type)
2026 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2029 /* Now round the alignment and size,
2030 using machine-dependent criteria if any. */
2032 #ifdef ROUND_TYPE_ALIGN
2033 TYPE_ALIGN (type)
2034 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
2035 #else
2036 TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
2037 #endif
2038 if (!TYPE_SIZE (element))
2039 /* We don't know the size of the underlying element type, so
2040 our alignment calculations will be wrong, forcing us to
2041 fall back on structural equality. */
2042 SET_TYPE_STRUCTURAL_EQUALITY (type);
2043 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2044 SET_TYPE_MODE (type, BLKmode);
2045 if (TYPE_SIZE (type) != 0
2046 #ifdef MEMBER_TYPE_FORCES_BLK
2047 && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
2048 #endif
2049 /* BLKmode elements force BLKmode aggregate;
2050 else extract/store fields may lose. */
2051 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2052 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2054 /* One-element arrays get the component type's mode. */
2055 if (simple_cst_equal (TYPE_SIZE (type),
2056 TYPE_SIZE (TREE_TYPE (type))))
2057 SET_TYPE_MODE (type, TYPE_MODE (TREE_TYPE (type)));
2058 else
2059 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type),
2060 MODE_INT, 1));
2062 if (TYPE_MODE (type) != BLKmode
2063 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2064 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2066 TYPE_NO_FORCE_BLK (type) = 1;
2067 SET_TYPE_MODE (type, BLKmode);
2070 /* When the element size is constant, check that it is at least as
2071 large as the element alignment. */
2072 if (TYPE_SIZE_UNIT (element)
2073 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2074 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2075 TYPE_ALIGN_UNIT. */
2076 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2077 && !integer_zerop (TYPE_SIZE_UNIT (element))
2078 && compare_tree_int (TYPE_SIZE_UNIT (element),
2079 TYPE_ALIGN_UNIT (element)) < 0)
2080 error ("alignment of array elements is greater than element size");
2081 break;
2084 case RECORD_TYPE:
2085 case UNION_TYPE:
2086 case QUAL_UNION_TYPE:
2088 tree field;
2089 record_layout_info rli;
2091 /* Initialize the layout information. */
2092 rli = start_record_layout (type);
2094 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2095 in the reverse order in building the COND_EXPR that denotes
2096 its size. We reverse them again later. */
2097 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2098 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2100 /* Place all the fields. */
2101 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2102 place_field (rli, field);
2104 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2105 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2107 /* Finish laying out the record. */
2108 finish_record_layout (rli, /*free_p=*/true);
2110 break;
2112 default:
2113 gcc_unreachable ();
2116 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2117 records and unions, finish_record_layout already called this
2118 function. */
2119 if (TREE_CODE (type) != RECORD_TYPE
2120 && TREE_CODE (type) != UNION_TYPE
2121 && TREE_CODE (type) != QUAL_UNION_TYPE)
2122 finalize_type_size (type);
2124 /* We should never see alias sets on incomplete aggregates. And we
2125 should not call layout_type on not incomplete aggregates. */
2126 if (AGGREGATE_TYPE_P (type))
2127 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2130 /* Vector types need to re-check the target flags each time we report
2131 the machine mode. We need to do this because attribute target can
2132 change the result of vector_mode_supported_p and have_regs_of_mode
2133 on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
2134 change on a per-function basis. */
2135 /* ??? Possibly a better solution is to run through all the types
2136 referenced by a function and re-compute the TYPE_MODE once, rather
2137 than make the TYPE_MODE macro call a function. */
2139 enum machine_mode
2140 vector_type_mode (const_tree t)
2142 enum machine_mode mode;
2144 gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
2146 mode = t->type.mode;
2147 if (VECTOR_MODE_P (mode)
2148 && (!targetm.vector_mode_supported_p (mode)
2149 || !have_regs_of_mode[mode]))
2151 enum machine_mode innermode = TREE_TYPE (t)->type.mode;
2153 /* For integers, try mapping it to a same-sized scalar mode. */
2154 if (GET_MODE_CLASS (innermode) == MODE_INT)
2156 mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
2157 * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
2159 if (mode != VOIDmode && have_regs_of_mode[mode])
2160 return mode;
2163 return BLKmode;
2166 return mode;
2169 /* Create and return a type for signed integers of PRECISION bits. */
2171 tree
2172 make_signed_type (int precision)
2174 tree type = make_node (INTEGER_TYPE);
2176 TYPE_PRECISION (type) = precision;
2178 fixup_signed_type (type);
2179 return type;
2182 /* Create and return a type for unsigned integers of PRECISION bits. */
2184 tree
2185 make_unsigned_type (int precision)
2187 tree type = make_node (INTEGER_TYPE);
2189 TYPE_PRECISION (type) = precision;
2191 fixup_unsigned_type (type);
2192 return type;
2195 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2196 and SATP. */
2198 tree
2199 make_fract_type (int precision, int unsignedp, int satp)
2201 tree type = make_node (FIXED_POINT_TYPE);
2203 TYPE_PRECISION (type) = precision;
2205 if (satp)
2206 TYPE_SATURATING (type) = 1;
2208 /* Lay out the type: set its alignment, size, etc. */
2209 if (unsignedp)
2211 TYPE_UNSIGNED (type) = 1;
2212 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
2214 else
2215 SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
2216 layout_type (type);
2218 return type;
2221 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2222 and SATP. */
2224 tree
2225 make_accum_type (int precision, int unsignedp, int satp)
2227 tree type = make_node (FIXED_POINT_TYPE);
2229 TYPE_PRECISION (type) = precision;
2231 if (satp)
2232 TYPE_SATURATING (type) = 1;
2234 /* Lay out the type: set its alignment, size, etc. */
2235 if (unsignedp)
2237 TYPE_UNSIGNED (type) = 1;
2238 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2240 else
2241 SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2242 layout_type (type);
2244 return type;
2247 /* Initialize sizetype and bitsizetype to a reasonable and temporary
2248 value to enable integer types to be created. */
2250 void
2251 initialize_sizetypes (void)
2253 tree t = make_node (INTEGER_TYPE);
2254 int precision = GET_MODE_BITSIZE (SImode);
2256 SET_TYPE_MODE (t, SImode);
2257 TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
2258 TYPE_IS_SIZETYPE (t) = 1;
2259 TYPE_UNSIGNED (t) = 1;
2260 TYPE_SIZE (t) = build_int_cst (t, precision);
2261 TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
2262 TYPE_PRECISION (t) = precision;
2264 set_min_and_max_values_for_integral_type (t, precision, true);
2266 sizetype = t;
2267 bitsizetype = build_distinct_type_copy (t);
2270 /* Make sizetype a version of TYPE, and initialize *sizetype accordingly.
2271 We do this by overwriting the stub sizetype and bitsizetype nodes created
2272 by initialize_sizetypes. This makes sure that (a) anything stubby about
2273 them no longer exists and (b) any INTEGER_CSTs created with such a type,
2274 remain valid. */
2276 void
2277 set_sizetype (tree type)
2279 tree t, max;
2280 int oprecision = TYPE_PRECISION (type);
2281 /* The *bitsizetype types use a precision that avoids overflows when
2282 calculating signed sizes / offsets in bits. However, when
2283 cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
2284 precision. */
2285 int precision
2286 = MIN (oprecision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
2287 precision
2288 = GET_MODE_PRECISION (smallest_mode_for_size (precision, MODE_INT));
2289 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2290 precision = HOST_BITS_PER_WIDE_INT * 2;
2292 /* sizetype must be an unsigned type. */
2293 gcc_assert (TYPE_UNSIGNED (type));
2295 t = build_distinct_type_copy (type);
2296 /* We want to use sizetype's cache, as we will be replacing that type. */
2297 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
2298 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
2299 TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
2300 TYPE_UID (t) = TYPE_UID (sizetype);
2301 TYPE_IS_SIZETYPE (t) = 1;
2303 /* Replace our original stub sizetype. */
2304 memcpy (sizetype, t, tree_size (sizetype));
2305 TYPE_MAIN_VARIANT (sizetype) = sizetype;
2306 TYPE_CANONICAL (sizetype) = sizetype;
2308 /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
2309 sign-extended in a way consistent with force_fit_type. */
2310 max = TYPE_MAX_VALUE (sizetype);
2311 TYPE_MAX_VALUE (sizetype)
2312 = double_int_to_tree (sizetype, tree_to_double_int (max));
2314 t = make_node (INTEGER_TYPE);
2315 TYPE_NAME (t) = get_identifier ("bit_size_type");
2316 /* We want to use bitsizetype's cache, as we will be replacing that type. */
2317 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
2318 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
2319 TYPE_PRECISION (t) = precision;
2320 TYPE_UID (t) = TYPE_UID (bitsizetype);
2321 TYPE_IS_SIZETYPE (t) = 1;
2323 /* Replace our original stub bitsizetype. */
2324 memcpy (bitsizetype, t, tree_size (bitsizetype));
2325 TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
2326 TYPE_CANONICAL (bitsizetype) = bitsizetype;
2328 fixup_unsigned_type (bitsizetype);
2330 /* Create the signed variants of *sizetype. */
2331 ssizetype = make_signed_type (oprecision);
2332 TYPE_IS_SIZETYPE (ssizetype) = 1;
2333 sbitsizetype = make_signed_type (precision);
2334 TYPE_IS_SIZETYPE (sbitsizetype) = 1;
2337 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2338 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2339 for TYPE, based on the PRECISION and whether or not the TYPE
2340 IS_UNSIGNED. PRECISION need not correspond to a width supported
2341 natively by the hardware; for example, on a machine with 8-bit,
2342 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2343 61. */
2345 void
2346 set_min_and_max_values_for_integral_type (tree type,
2347 int precision,
2348 bool is_unsigned)
2350 tree min_value;
2351 tree max_value;
2353 if (is_unsigned)
2355 min_value = build_int_cst (type, 0);
2356 max_value
2357 = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2358 ? -1
2359 : ((HOST_WIDE_INT) 1 << precision) - 1,
2360 precision - HOST_BITS_PER_WIDE_INT > 0
2361 ? ((unsigned HOST_WIDE_INT) ~0
2362 >> (HOST_BITS_PER_WIDE_INT
2363 - (precision - HOST_BITS_PER_WIDE_INT)))
2364 : 0);
2366 else
2368 min_value
2369 = build_int_cst_wide (type,
2370 (precision - HOST_BITS_PER_WIDE_INT > 0
2372 : (HOST_WIDE_INT) (-1) << (precision - 1)),
2373 (((HOST_WIDE_INT) (-1)
2374 << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2375 ? precision - HOST_BITS_PER_WIDE_INT - 1
2376 : 0))));
2377 max_value
2378 = build_int_cst_wide (type,
2379 (precision - HOST_BITS_PER_WIDE_INT > 0
2380 ? -1
2381 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2382 (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2383 ? (((HOST_WIDE_INT) 1
2384 << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2385 : 0));
2388 TYPE_MIN_VALUE (type) = min_value;
2389 TYPE_MAX_VALUE (type) = max_value;
2392 /* Set the extreme values of TYPE based on its precision in bits,
2393 then lay it out. Used when make_signed_type won't do
2394 because the tree code is not INTEGER_TYPE.
2395 E.g. for Pascal, when the -fsigned-char option is given. */
2397 void
2398 fixup_signed_type (tree type)
2400 int precision = TYPE_PRECISION (type);
2402 /* We can not represent properly constants greater then
2403 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2404 as they are used by i386 vector extensions and friends. */
2405 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2406 precision = HOST_BITS_PER_WIDE_INT * 2;
2408 set_min_and_max_values_for_integral_type (type, precision,
2409 /*is_unsigned=*/false);
2411 /* Lay out the type: set its alignment, size, etc. */
2412 layout_type (type);
2415 /* Set the extreme values of TYPE based on its precision in bits,
2416 then lay it out. This is used both in `make_unsigned_type'
2417 and for enumeral types. */
2419 void
2420 fixup_unsigned_type (tree type)
2422 int precision = TYPE_PRECISION (type);
2424 /* We can not represent properly constants greater then
2425 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2426 as they are used by i386 vector extensions and friends. */
2427 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2428 precision = HOST_BITS_PER_WIDE_INT * 2;
2430 TYPE_UNSIGNED (type) = 1;
2432 set_min_and_max_values_for_integral_type (type, precision,
2433 /*is_unsigned=*/true);
2435 /* Lay out the type: set its alignment, size, etc. */
2436 layout_type (type);
2439 /* Find the best machine mode to use when referencing a bit field of length
2440 BITSIZE bits starting at BITPOS.
2442 The underlying object is known to be aligned to a boundary of ALIGN bits.
2443 If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2444 larger than LARGEST_MODE (usually SImode).
2446 If no mode meets all these conditions, we return VOIDmode.
2448 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2449 smallest mode meeting these conditions.
2451 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2452 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2453 all the conditions.
2455 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2456 decide which of the above modes should be used. */
2458 enum machine_mode
2459 get_best_mode (int bitsize, int bitpos, unsigned int align,
2460 enum machine_mode largest_mode, int volatilep)
2462 enum machine_mode mode;
2463 unsigned int unit = 0;
2465 /* Find the narrowest integer mode that contains the bit field. */
2466 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2467 mode = GET_MODE_WIDER_MODE (mode))
2469 unit = GET_MODE_BITSIZE (mode);
2470 if ((bitpos % unit) + bitsize <= unit)
2471 break;
2474 if (mode == VOIDmode
2475 /* It is tempting to omit the following line
2476 if STRICT_ALIGNMENT is true.
2477 But that is incorrect, since if the bitfield uses part of 3 bytes
2478 and we use a 4-byte mode, we could get a spurious segv
2479 if the extra 4th byte is past the end of memory.
2480 (Though at least one Unix compiler ignores this problem:
2481 that on the Sequent 386 machine. */
2482 || MIN (unit, BIGGEST_ALIGNMENT) > align
2483 || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2484 return VOIDmode;
2486 if ((SLOW_BYTE_ACCESS && ! volatilep)
2487 || (volatilep && !targetm.narrow_volatile_bitfield ()))
2489 enum machine_mode wide_mode = VOIDmode, tmode;
2491 for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2492 tmode = GET_MODE_WIDER_MODE (tmode))
2494 unit = GET_MODE_BITSIZE (tmode);
2495 if (bitpos / unit == (bitpos + bitsize - 1) / unit
2496 && unit <= BITS_PER_WORD
2497 && unit <= MIN (align, BIGGEST_ALIGNMENT)
2498 && (largest_mode == VOIDmode
2499 || unit <= GET_MODE_BITSIZE (largest_mode)))
2500 wide_mode = tmode;
2503 if (wide_mode != VOIDmode)
2504 return wide_mode;
2507 return mode;
2510 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
2511 SIGN). The returned constants are made to be usable in TARGET_MODE. */
2513 void
2514 get_mode_bounds (enum machine_mode mode, int sign,
2515 enum machine_mode target_mode,
2516 rtx *mmin, rtx *mmax)
2518 unsigned size = GET_MODE_BITSIZE (mode);
2519 unsigned HOST_WIDE_INT min_val, max_val;
2521 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2523 if (sign)
2525 min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2526 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2528 else
2530 min_val = 0;
2531 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2534 *mmin = gen_int_mode (min_val, target_mode);
2535 *mmax = gen_int_mode (max_val, target_mode);
2538 #include "gt-stor-layout.h"