1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
31 #include "stor-layout.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 /* Maximum size (in bytes) of an artificially generated label. */
57 #define MAX_ARTIFICIAL_LABEL_BYTES 30
59 /* A collected description of an entire row of the abstract CFI table. */
60 struct GTY(()) dw_cfi_row
62 /* The expression that computes the CFA, expressed in two different ways.
63 The CFA member for the simple cases, and the full CFI expression for
64 the complex cases. The later will be a DW_CFA_cfa_expression. */
68 /* The expressions for any register column that is saved. */
72 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
73 struct GTY(()) reg_saved_in_data
{
79 /* Since we no longer have a proper CFG, we're going to create a facsimile
80 of one on the fly while processing the frame-related insns.
82 We create dw_trace_info structures for each extended basic block beginning
83 and ending at a "save point". Save points are labels, barriers, certain
84 notes, and of course the beginning and end of the function.
86 As we encounter control transfer insns, we propagate the "current"
87 row state across the edges to the starts of traces. When checking is
88 enabled, we validate that we propagate the same data from all sources.
90 All traces are members of the TRACE_INFO array, in the order in which
91 they appear in the instruction stream.
93 All save points are present in the TRACE_INDEX hash, mapping the insn
94 starting a trace to the dw_trace_info describing the trace. */
98 /* The insn that begins the trace. */
101 /* The row state at the beginning and end of the trace. */
102 dw_cfi_row
*beg_row
, *end_row
;
104 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
105 while scanning insns. However, the args_size value is irrelevant at
106 any point except can_throw_internal_p insns. Therefore the "delay"
107 sizes the values that must actually be emitted for this trace. */
108 HOST_WIDE_INT beg_true_args_size
, end_true_args_size
;
109 HOST_WIDE_INT beg_delay_args_size
, end_delay_args_size
;
111 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
114 /* The following variables contain data used in interpreting frame related
115 expressions. These are not part of the "real" row state as defined by
116 Dwarf, but it seems like they need to be propagated into a trace in case
117 frame related expressions have been sunk. */
118 /* ??? This seems fragile. These variables are fragments of a larger
119 expression. If we do not keep the entire expression together, we risk
120 not being able to put it together properly. Consider forcing targets
121 to generate self-contained expressions and dropping all of the magic
122 interpretation code in this file. Or at least refusing to shrink wrap
123 any frame related insn that doesn't contain a complete expression. */
125 /* The register used for saving registers to the stack, and its offset
127 dw_cfa_location cfa_store
;
129 /* A temporary register holding an integral value used in adjusting SP
130 or setting up the store_reg. The "offset" field holds the integer
131 value, not an offset. */
132 dw_cfa_location cfa_temp
;
134 /* A set of registers saved in other registers. This is the inverse of
135 the row->reg_save info, if the entry is a DW_CFA_register. This is
136 implemented as a flat array because it normally contains zero or 1
137 entry, depending on the target. IA-64 is the big spender here, using
138 a maximum of 5 entries. */
139 vec
<reg_saved_in_data
> regs_saved_in_regs
;
141 /* An identifier for this trace. Used only for debugging dumps. */
144 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
145 bool switch_sections
;
147 /* True if we've seen different values incoming to beg_true_args_size. */
148 bool args_size_undefined
;
152 /* Hashtable helpers. */
154 struct trace_info_hasher
: nofree_ptr_hash
<dw_trace_info
>
156 static inline hashval_t
hash (const dw_trace_info
*);
157 static inline bool equal (const dw_trace_info
*, const dw_trace_info
*);
161 trace_info_hasher::hash (const dw_trace_info
*ti
)
163 return INSN_UID (ti
->head
);
167 trace_info_hasher::equal (const dw_trace_info
*a
, const dw_trace_info
*b
)
169 return a
->head
== b
->head
;
173 /* The variables making up the pseudo-cfg, as described above. */
174 static vec
<dw_trace_info
> trace_info
;
175 static vec
<dw_trace_info
*> trace_work_list
;
176 static hash_table
<trace_info_hasher
> *trace_index
;
178 /* A vector of call frame insns for the CIE. */
181 /* The state of the first row of the FDE table, which includes the
182 state provided by the CIE. */
183 static GTY(()) dw_cfi_row
*cie_cfi_row
;
185 static GTY(()) reg_saved_in_data
*cie_return_save
;
187 static GTY(()) unsigned long dwarf2out_cfi_label_num
;
189 /* The insn after which a new CFI note should be emitted. */
190 static rtx_insn
*add_cfi_insn
;
192 /* When non-null, add_cfi will add the CFI to this vector. */
193 static cfi_vec
*add_cfi_vec
;
195 /* The current instruction trace. */
196 static dw_trace_info
*cur_trace
;
198 /* The current, i.e. most recently generated, row of the CFI table. */
199 static dw_cfi_row
*cur_row
;
201 /* A copy of the current CFA, for use during the processing of a
203 static dw_cfa_location
*cur_cfa
;
205 /* We delay emitting a register save until either (a) we reach the end
206 of the prologue or (b) the register is clobbered. This clusters
207 register saves so that there are fewer pc advances. */
209 struct queued_reg_save
{
212 HOST_WIDE_INT cfa_offset
;
216 static vec
<queued_reg_save
> queued_reg_saves
;
218 /* True if any CFI directives were emitted at the current insn. */
219 static bool any_cfis_emitted
;
221 /* Short-hand for commonly used register numbers. */
222 static unsigned dw_stack_pointer_regnum
;
223 static unsigned dw_frame_pointer_regnum
;
225 /* Hook used by __throw. */
228 expand_builtin_dwarf_sp_column (void)
230 unsigned int dwarf_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
231 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum
, 1));
234 /* MEM is a memory reference for the register size table, each element of
235 which has mode MODE. Initialize column C as a return address column. */
238 init_return_column_size (machine_mode mode
, rtx mem
, unsigned int c
)
240 HOST_WIDE_INT offset
= c
* GET_MODE_SIZE (mode
);
241 HOST_WIDE_INT size
= GET_MODE_SIZE (Pmode
);
242 emit_move_insn (adjust_address (mem
, mode
, offset
),
243 gen_int_mode (size
, mode
));
246 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
247 init_one_dwarf_reg_size to communicate on what has been done by the
250 struct init_one_dwarf_reg_state
252 /* Whether the dwarf return column was initialized. */
253 bool wrote_return_column
;
255 /* For each hard register REGNO, whether init_one_dwarf_reg_size
256 was given REGNO to process already. */
257 bool processed_regno
[FIRST_PSEUDO_REGISTER
];
261 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
262 initialize the dwarf register size table entry corresponding to register
263 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
264 use for the size entry to initialize, and INIT_STATE is the communication
265 datastructure conveying what we're doing to our caller. */
268 void init_one_dwarf_reg_size (int regno
, machine_mode regmode
,
269 rtx table
, machine_mode slotmode
,
270 init_one_dwarf_reg_state
*init_state
)
272 const unsigned int dnum
= DWARF_FRAME_REGNUM (regno
);
273 const unsigned int rnum
= DWARF2_FRAME_REG_OUT (dnum
, 1);
274 const unsigned int dcol
= DWARF_REG_TO_UNWIND_COLUMN (rnum
);
276 const HOST_WIDE_INT slotoffset
= dcol
* GET_MODE_SIZE (slotmode
);
277 const HOST_WIDE_INT regsize
= GET_MODE_SIZE (regmode
);
279 init_state
->processed_regno
[regno
] = true;
281 if (rnum
>= DWARF_FRAME_REGISTERS
)
284 if (dnum
== DWARF_FRAME_RETURN_COLUMN
)
286 if (regmode
== VOIDmode
)
288 init_state
->wrote_return_column
= true;
294 emit_move_insn (adjust_address (table
, slotmode
, slotoffset
),
295 gen_int_mode (regsize
, slotmode
));
298 /* Generate code to initialize the dwarf register size table located
299 at the provided ADDRESS. */
302 expand_builtin_init_dwarf_reg_sizes (tree address
)
305 machine_mode mode
= TYPE_MODE (char_type_node
);
306 rtx addr
= expand_normal (address
);
307 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
309 init_one_dwarf_reg_state init_state
;
311 memset ((char *)&init_state
, 0, sizeof (init_state
));
313 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
315 machine_mode save_mode
;
318 /* No point in processing a register multiple times. This could happen
319 with register spans, e.g. when a reg is first processed as a piece of
320 a span, then as a register on its own later on. */
322 if (init_state
.processed_regno
[i
])
325 save_mode
= targetm
.dwarf_frame_reg_mode (i
);
326 span
= targetm
.dwarf_register_span (gen_rtx_REG (save_mode
, i
));
329 init_one_dwarf_reg_size (i
, save_mode
, mem
, mode
, &init_state
);
332 for (int si
= 0; si
< XVECLEN (span
, 0); si
++)
334 rtx reg
= XVECEXP (span
, 0, si
);
336 init_one_dwarf_reg_size
337 (REGNO (reg
), GET_MODE (reg
), mem
, mode
, &init_state
);
342 if (!init_state
.wrote_return_column
)
343 init_return_column_size (mode
, mem
, DWARF_FRAME_RETURN_COLUMN
);
345 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
346 init_return_column_size (mode
, mem
, DWARF_ALT_FRAME_RETURN_COLUMN
);
349 targetm
.init_dwarf_reg_sizes_extra (address
);
353 static dw_trace_info
*
354 get_trace_info (rtx_insn
*insn
)
358 return trace_index
->find_with_hash (&dummy
, INSN_UID (insn
));
362 save_point_p (rtx_insn
*insn
)
364 /* Labels, except those that are really jump tables. */
366 return inside_basic_block_p (insn
);
368 /* We split traces at the prologue/epilogue notes because those
369 are points at which the unwind info is usually stable. This
370 makes it easier to find spots with identical unwind info so
371 that we can use remember/restore_state opcodes. */
373 switch (NOTE_KIND (insn
))
375 case NOTE_INSN_PROLOGUE_END
:
376 case NOTE_INSN_EPILOGUE_BEG
:
383 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
385 static inline HOST_WIDE_INT
386 div_data_align (HOST_WIDE_INT off
)
388 HOST_WIDE_INT r
= off
/ DWARF_CIE_DATA_ALIGNMENT
;
389 gcc_assert (r
* DWARF_CIE_DATA_ALIGNMENT
== off
);
393 /* Return true if we need a signed version of a given opcode
394 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
397 need_data_align_sf_opcode (HOST_WIDE_INT off
)
399 return DWARF_CIE_DATA_ALIGNMENT
< 0 ? off
> 0 : off
< 0;
402 /* Return a pointer to a newly allocated Call Frame Instruction. */
404 static inline dw_cfi_ref
407 dw_cfi_ref cfi
= ggc_alloc
<dw_cfi_node
> ();
409 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= 0;
410 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= 0;
415 /* Return a newly allocated CFI row, with no defined data. */
420 dw_cfi_row
*row
= ggc_cleared_alloc
<dw_cfi_row
> ();
422 row
->cfa
.reg
= INVALID_REGNUM
;
427 /* Return a copy of an existing CFI row. */
430 copy_cfi_row (dw_cfi_row
*src
)
432 dw_cfi_row
*dst
= ggc_alloc
<dw_cfi_row
> ();
435 dst
->reg_save
= vec_safe_copy (src
->reg_save
);
440 /* Generate a new label for the CFI info to refer to. */
443 dwarf2out_cfi_label (void)
445 int num
= dwarf2out_cfi_label_num
++;
448 ASM_GENERATE_INTERNAL_LABEL (label
, "LCFI", num
);
450 return xstrdup (label
);
453 /* Add CFI either to the current insn stream or to a vector, or both. */
456 add_cfi (dw_cfi_ref cfi
)
458 any_cfis_emitted
= true;
460 if (add_cfi_insn
!= NULL
)
462 add_cfi_insn
= emit_note_after (NOTE_INSN_CFI
, add_cfi_insn
);
463 NOTE_CFI (add_cfi_insn
) = cfi
;
466 if (add_cfi_vec
!= NULL
)
467 vec_safe_push (*add_cfi_vec
, cfi
);
471 add_cfi_args_size (HOST_WIDE_INT size
)
473 dw_cfi_ref cfi
= new_cfi ();
475 /* While we can occasionally have args_size < 0 internally, this state
476 should not persist at a point we actually need an opcode. */
477 gcc_assert (size
>= 0);
479 cfi
->dw_cfi_opc
= DW_CFA_GNU_args_size
;
480 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= size
;
486 add_cfi_restore (unsigned reg
)
488 dw_cfi_ref cfi
= new_cfi ();
490 cfi
->dw_cfi_opc
= (reg
& ~0x3f ? DW_CFA_restore_extended
: DW_CFA_restore
);
491 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
496 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
497 that the register column is no longer saved. */
500 update_row_reg_save (dw_cfi_row
*row
, unsigned column
, dw_cfi_ref cfi
)
502 if (vec_safe_length (row
->reg_save
) <= column
)
503 vec_safe_grow_cleared (row
->reg_save
, column
+ 1);
504 (*row
->reg_save
)[column
] = cfi
;
507 /* This function fills in aa dw_cfa_location structure from a dwarf location
508 descriptor sequence. */
511 get_cfa_from_loc_descr (dw_cfa_location
*cfa
, struct dw_loc_descr_node
*loc
)
513 struct dw_loc_descr_node
*ptr
;
515 cfa
->base_offset
= 0;
519 for (ptr
= loc
; ptr
!= NULL
; ptr
= ptr
->dw_loc_next
)
521 enum dwarf_location_atom op
= ptr
->dw_loc_opc
;
557 cfa
->reg
= op
- DW_OP_reg0
;
560 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
594 cfa
->reg
= op
- DW_OP_breg0
;
595 cfa
->base_offset
= ptr
->dw_loc_oprnd1
.v
.val_int
;
598 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
599 cfa
->base_offset
= ptr
->dw_loc_oprnd2
.v
.val_int
;
604 case DW_OP_plus_uconst
:
605 cfa
->offset
= ptr
->dw_loc_oprnd1
.v
.val_unsigned
;
613 /* Find the previous value for the CFA, iteratively. CFI is the opcode
614 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
615 one level of remember/restore state processing. */
618 lookup_cfa_1 (dw_cfi_ref cfi
, dw_cfa_location
*loc
, dw_cfa_location
*remember
)
620 switch (cfi
->dw_cfi_opc
)
622 case DW_CFA_def_cfa_offset
:
623 case DW_CFA_def_cfa_offset_sf
:
624 loc
->offset
= cfi
->dw_cfi_oprnd1
.dw_cfi_offset
;
626 case DW_CFA_def_cfa_register
:
627 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
630 case DW_CFA_def_cfa_sf
:
631 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
632 loc
->offset
= cfi
->dw_cfi_oprnd2
.dw_cfi_offset
;
634 case DW_CFA_def_cfa_expression
:
635 get_cfa_from_loc_descr (loc
, cfi
->dw_cfi_oprnd1
.dw_cfi_loc
);
638 case DW_CFA_remember_state
:
639 gcc_assert (!remember
->in_use
);
641 remember
->in_use
= 1;
643 case DW_CFA_restore_state
:
644 gcc_assert (remember
->in_use
);
646 remember
->in_use
= 0;
654 /* Determine if two dw_cfa_location structures define the same data. */
657 cfa_equal_p (const dw_cfa_location
*loc1
, const dw_cfa_location
*loc2
)
659 return (loc1
->reg
== loc2
->reg
660 && loc1
->offset
== loc2
->offset
661 && loc1
->indirect
== loc2
->indirect
662 && (loc1
->indirect
== 0
663 || loc1
->base_offset
== loc2
->base_offset
));
666 /* Determine if two CFI operands are identical. */
669 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t
, dw_cfi_oprnd
*a
, dw_cfi_oprnd
*b
)
673 case dw_cfi_oprnd_unused
:
675 case dw_cfi_oprnd_reg_num
:
676 return a
->dw_cfi_reg_num
== b
->dw_cfi_reg_num
;
677 case dw_cfi_oprnd_offset
:
678 return a
->dw_cfi_offset
== b
->dw_cfi_offset
;
679 case dw_cfi_oprnd_addr
:
680 return (a
->dw_cfi_addr
== b
->dw_cfi_addr
681 || strcmp (a
->dw_cfi_addr
, b
->dw_cfi_addr
) == 0);
682 case dw_cfi_oprnd_loc
:
683 return loc_descr_equal_p (a
->dw_cfi_loc
, b
->dw_cfi_loc
);
688 /* Determine if two CFI entries are identical. */
691 cfi_equal_p (dw_cfi_ref a
, dw_cfi_ref b
)
693 enum dwarf_call_frame_info opc
;
695 /* Make things easier for our callers, including missing operands. */
698 if (a
== NULL
|| b
== NULL
)
701 /* Obviously, the opcodes must match. */
703 if (opc
!= b
->dw_cfi_opc
)
706 /* Compare the two operands, re-using the type of the operands as
707 already exposed elsewhere. */
708 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc
),
709 &a
->dw_cfi_oprnd1
, &b
->dw_cfi_oprnd1
)
710 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc
),
711 &a
->dw_cfi_oprnd2
, &b
->dw_cfi_oprnd2
));
714 /* Determine if two CFI_ROW structures are identical. */
717 cfi_row_equal_p (dw_cfi_row
*a
, dw_cfi_row
*b
)
719 size_t i
, n_a
, n_b
, n_max
;
723 if (!cfi_equal_p (a
->cfa_cfi
, b
->cfa_cfi
))
726 else if (!cfa_equal_p (&a
->cfa
, &b
->cfa
))
729 n_a
= vec_safe_length (a
->reg_save
);
730 n_b
= vec_safe_length (b
->reg_save
);
731 n_max
= MAX (n_a
, n_b
);
733 for (i
= 0; i
< n_max
; ++i
)
735 dw_cfi_ref r_a
= NULL
, r_b
= NULL
;
738 r_a
= (*a
->reg_save
)[i
];
740 r_b
= (*b
->reg_save
)[i
];
742 if (!cfi_equal_p (r_a
, r_b
))
749 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
750 what opcode to emit. Returns the CFI opcode to effect the change, or
751 NULL if NEW_CFA == OLD_CFA. */
754 def_cfa_0 (dw_cfa_location
*old_cfa
, dw_cfa_location
*new_cfa
)
758 /* If nothing changed, no need to issue any call frame instructions. */
759 if (cfa_equal_p (old_cfa
, new_cfa
))
764 if (new_cfa
->reg
== old_cfa
->reg
&& !new_cfa
->indirect
&& !old_cfa
->indirect
)
766 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
767 the CFA register did not change but the offset did. The data
768 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
769 in the assembler via the .cfi_def_cfa_offset directive. */
770 if (new_cfa
->offset
< 0)
771 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset_sf
;
773 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset
;
774 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= new_cfa
->offset
;
776 else if (new_cfa
->offset
== old_cfa
->offset
777 && old_cfa
->reg
!= INVALID_REGNUM
778 && !new_cfa
->indirect
779 && !old_cfa
->indirect
)
781 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
782 indicating the CFA register has changed to <register> but the
783 offset has not changed. */
784 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_register
;
785 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
787 else if (new_cfa
->indirect
== 0)
789 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
790 indicating the CFA register has changed to <register> with
791 the specified offset. The data factoring for DW_CFA_def_cfa_sf
792 happens in output_cfi, or in the assembler via the .cfi_def_cfa
794 if (new_cfa
->offset
< 0)
795 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_sf
;
797 cfi
->dw_cfi_opc
= DW_CFA_def_cfa
;
798 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
799 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= new_cfa
->offset
;
803 /* Construct a DW_CFA_def_cfa_expression instruction to
804 calculate the CFA using a full location expression since no
805 register-offset pair is available. */
806 struct dw_loc_descr_node
*loc_list
;
808 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_expression
;
809 loc_list
= build_cfa_loc (new_cfa
, 0);
810 cfi
->dw_cfi_oprnd1
.dw_cfi_loc
= loc_list
;
816 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
819 def_cfa_1 (dw_cfa_location
*new_cfa
)
823 if (cur_trace
->cfa_store
.reg
== new_cfa
->reg
&& new_cfa
->indirect
== 0)
824 cur_trace
->cfa_store
.offset
= new_cfa
->offset
;
826 cfi
= def_cfa_0 (&cur_row
->cfa
, new_cfa
);
829 cur_row
->cfa
= *new_cfa
;
830 cur_row
->cfa_cfi
= (cfi
->dw_cfi_opc
== DW_CFA_def_cfa_expression
837 /* Add the CFI for saving a register. REG is the CFA column number.
838 If SREG is -1, the register is saved at OFFSET from the CFA;
839 otherwise it is saved in SREG. */
842 reg_save (unsigned int reg
, unsigned int sreg
, HOST_WIDE_INT offset
)
844 dw_fde_ref fde
= cfun
? cfun
->fde
: NULL
;
845 dw_cfi_ref cfi
= new_cfi ();
847 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
849 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
851 && fde
->stack_realign
852 && sreg
== INVALID_REGNUM
)
854 cfi
->dw_cfi_opc
= DW_CFA_expression
;
855 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
856 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
857 = build_cfa_aligned_loc (&cur_row
->cfa
, offset
,
858 fde
->stack_realignment
);
860 else if (sreg
== INVALID_REGNUM
)
862 if (need_data_align_sf_opcode (offset
))
863 cfi
->dw_cfi_opc
= DW_CFA_offset_extended_sf
;
864 else if (reg
& ~0x3f)
865 cfi
->dw_cfi_opc
= DW_CFA_offset_extended
;
867 cfi
->dw_cfi_opc
= DW_CFA_offset
;
868 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= offset
;
870 else if (sreg
== reg
)
872 /* While we could emit something like DW_CFA_same_value or
873 DW_CFA_restore, we never expect to see something like that
874 in a prologue. This is more likely to be a bug. A backend
875 can always bypass this by using REG_CFA_RESTORE directly. */
880 cfi
->dw_cfi_opc
= DW_CFA_register
;
881 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= sreg
;
885 update_row_reg_save (cur_row
, reg
, cfi
);
888 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
889 and adjust data structures to match. */
892 notice_args_size (rtx_insn
*insn
)
894 HOST_WIDE_INT args_size
, delta
;
897 note
= find_reg_note (insn
, REG_ARGS_SIZE
, NULL
);
901 args_size
= INTVAL (XEXP (note
, 0));
902 delta
= args_size
- cur_trace
->end_true_args_size
;
906 cur_trace
->end_true_args_size
= args_size
;
908 /* If the CFA is computed off the stack pointer, then we must adjust
909 the computation of the CFA as well. */
910 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
912 gcc_assert (!cur_cfa
->indirect
);
914 /* Convert a change in args_size (always a positive in the
915 direction of stack growth) to a change in stack pointer. */
916 if (!STACK_GROWS_DOWNWARD
)
919 cur_cfa
->offset
+= delta
;
923 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
924 data within the trace related to EH insns and args_size. */
927 notice_eh_throw (rtx_insn
*insn
)
929 HOST_WIDE_INT args_size
;
931 args_size
= cur_trace
->end_true_args_size
;
932 if (cur_trace
->eh_head
== NULL
)
934 cur_trace
->eh_head
= insn
;
935 cur_trace
->beg_delay_args_size
= args_size
;
936 cur_trace
->end_delay_args_size
= args_size
;
938 else if (cur_trace
->end_delay_args_size
!= args_size
)
940 cur_trace
->end_delay_args_size
= args_size
;
942 /* ??? If the CFA is the stack pointer, search backward for the last
943 CFI note and insert there. Given that the stack changed for the
944 args_size change, there *must* be such a note in between here and
946 add_cfi_args_size (args_size
);
950 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
951 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
952 used in places where rtl is prohibited. */
954 static inline unsigned
955 dwf_regno (const_rtx reg
)
957 gcc_assert (REGNO (reg
) < FIRST_PSEUDO_REGISTER
);
958 return DWARF_FRAME_REGNUM (REGNO (reg
));
961 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
964 compare_reg_or_pc (rtx x
, rtx y
)
966 if (REG_P (x
) && REG_P (y
))
967 return REGNO (x
) == REGNO (y
);
971 /* Record SRC as being saved in DEST. DEST may be null to delete an
972 existing entry. SRC may be a register or PC_RTX. */
975 record_reg_saved_in_reg (rtx dest
, rtx src
)
977 reg_saved_in_data
*elt
;
980 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, elt
)
981 if (compare_reg_or_pc (elt
->orig_reg
, src
))
984 cur_trace
->regs_saved_in_regs
.unordered_remove (i
);
986 elt
->saved_in_reg
= dest
;
993 reg_saved_in_data e
= {src
, dest
};
994 cur_trace
->regs_saved_in_regs
.safe_push (e
);
997 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
998 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1001 queue_reg_save (rtx reg
, rtx sreg
, HOST_WIDE_INT offset
)
1004 queued_reg_save e
= {reg
, sreg
, offset
};
1007 /* Duplicates waste space, but it's also necessary to remove them
1008 for correctness, since the queue gets output in reverse order. */
1009 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1010 if (compare_reg_or_pc (q
->reg
, reg
))
1016 queued_reg_saves
.safe_push (e
);
1019 /* Output all the entries in QUEUED_REG_SAVES. */
1022 dwarf2out_flush_queued_reg_saves (void)
1027 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1029 unsigned int reg
, sreg
;
1031 record_reg_saved_in_reg (q
->saved_reg
, q
->reg
);
1033 if (q
->reg
== pc_rtx
)
1034 reg
= DWARF_FRAME_RETURN_COLUMN
;
1036 reg
= dwf_regno (q
->reg
);
1038 sreg
= dwf_regno (q
->saved_reg
);
1040 sreg
= INVALID_REGNUM
;
1041 reg_save (reg
, sreg
, q
->cfa_offset
);
1044 queued_reg_saves
.truncate (0);
1047 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1048 location for? Or, does it clobber a register which we've previously
1049 said that some other register is saved in, and for which we now
1050 have a new location for? */
1053 clobbers_queued_reg_save (const_rtx insn
)
1058 FOR_EACH_VEC_ELT (queued_reg_saves
, iq
, q
)
1061 reg_saved_in_data
*rir
;
1063 if (modified_in_p (q
->reg
, insn
))
1066 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, ir
, rir
)
1067 if (compare_reg_or_pc (q
->reg
, rir
->orig_reg
)
1068 && modified_in_p (rir
->saved_in_reg
, insn
))
1075 /* What register, if any, is currently saved in REG? */
1078 reg_saved_in (rtx reg
)
1080 unsigned int regn
= REGNO (reg
);
1082 reg_saved_in_data
*rir
;
1085 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1086 if (q
->saved_reg
&& regn
== REGNO (q
->saved_reg
))
1089 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, rir
)
1090 if (regn
== REGNO (rir
->saved_in_reg
))
1091 return rir
->orig_reg
;
1096 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1099 dwarf2out_frame_debug_def_cfa (rtx pat
)
1101 memset (cur_cfa
, 0, sizeof (*cur_cfa
));
1103 if (GET_CODE (pat
) == PLUS
)
1105 cur_cfa
->offset
= INTVAL (XEXP (pat
, 1));
1106 pat
= XEXP (pat
, 0);
1110 cur_cfa
->indirect
= 1;
1111 pat
= XEXP (pat
, 0);
1112 if (GET_CODE (pat
) == PLUS
)
1114 cur_cfa
->base_offset
= INTVAL (XEXP (pat
, 1));
1115 pat
= XEXP (pat
, 0);
1118 /* ??? If this fails, we could be calling into the _loc functions to
1119 define a full expression. So far no port does that. */
1120 gcc_assert (REG_P (pat
));
1121 cur_cfa
->reg
= dwf_regno (pat
);
1124 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1127 dwarf2out_frame_debug_adjust_cfa (rtx pat
)
1131 gcc_assert (GET_CODE (pat
) == SET
);
1132 dest
= XEXP (pat
, 0);
1133 src
= XEXP (pat
, 1);
1135 switch (GET_CODE (src
))
1138 gcc_assert (dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
);
1139 cur_cfa
->offset
-= INTVAL (XEXP (src
, 1));
1149 cur_cfa
->reg
= dwf_regno (dest
);
1150 gcc_assert (cur_cfa
->indirect
== 0);
1153 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1156 dwarf2out_frame_debug_cfa_offset (rtx set
)
1158 HOST_WIDE_INT offset
;
1159 rtx src
, addr
, span
;
1160 unsigned int sregno
;
1162 src
= XEXP (set
, 1);
1163 addr
= XEXP (set
, 0);
1164 gcc_assert (MEM_P (addr
));
1165 addr
= XEXP (addr
, 0);
1167 /* As documented, only consider extremely simple addresses. */
1168 switch (GET_CODE (addr
))
1171 gcc_assert (dwf_regno (addr
) == cur_cfa
->reg
);
1172 offset
= -cur_cfa
->offset
;
1175 gcc_assert (dwf_regno (XEXP (addr
, 0)) == cur_cfa
->reg
);
1176 offset
= INTVAL (XEXP (addr
, 1)) - cur_cfa
->offset
;
1185 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1189 span
= targetm
.dwarf_register_span (src
);
1190 sregno
= dwf_regno (src
);
1193 /* ??? We'd like to use queue_reg_save, but we need to come up with
1194 a different flushing heuristic for epilogues. */
1196 reg_save (sregno
, INVALID_REGNUM
, offset
);
1199 /* We have a PARALLEL describing where the contents of SRC live.
1200 Adjust the offset for each piece of the PARALLEL. */
1201 HOST_WIDE_INT span_offset
= offset
;
1203 gcc_assert (GET_CODE (span
) == PARALLEL
);
1205 const int par_len
= XVECLEN (span
, 0);
1206 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1208 rtx elem
= XVECEXP (span
, 0, par_index
);
1209 sregno
= dwf_regno (src
);
1210 reg_save (sregno
, INVALID_REGNUM
, span_offset
);
1211 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1216 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1219 dwarf2out_frame_debug_cfa_register (rtx set
)
1222 unsigned sregno
, dregno
;
1224 src
= XEXP (set
, 1);
1225 dest
= XEXP (set
, 0);
1227 record_reg_saved_in_reg (dest
, src
);
1229 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1231 sregno
= dwf_regno (src
);
1233 dregno
= dwf_regno (dest
);
1235 /* ??? We'd like to use queue_reg_save, but we need to come up with
1236 a different flushing heuristic for epilogues. */
1237 reg_save (sregno
, dregno
, 0);
1240 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1243 dwarf2out_frame_debug_cfa_expression (rtx set
)
1245 rtx src
, dest
, span
;
1246 dw_cfi_ref cfi
= new_cfi ();
1249 dest
= SET_DEST (set
);
1250 src
= SET_SRC (set
);
1252 gcc_assert (REG_P (src
));
1253 gcc_assert (MEM_P (dest
));
1255 span
= targetm
.dwarf_register_span (src
);
1258 regno
= dwf_regno (src
);
1260 cfi
->dw_cfi_opc
= DW_CFA_expression
;
1261 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= regno
;
1262 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1263 = mem_loc_descriptor (XEXP (dest
, 0), get_address_mode (dest
),
1264 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1266 /* ??? We'd like to use queue_reg_save, were the interface different,
1267 and, as above, we could manage flushing for epilogues. */
1269 update_row_reg_save (cur_row
, regno
, cfi
);
1272 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1276 dwarf2out_frame_debug_cfa_val_expression (rtx set
)
1278 rtx dest
= SET_DEST (set
);
1279 gcc_assert (REG_P (dest
));
1281 rtx span
= targetm
.dwarf_register_span (dest
);
1284 rtx src
= SET_SRC (set
);
1285 dw_cfi_ref cfi
= new_cfi ();
1286 cfi
->dw_cfi_opc
= DW_CFA_val_expression
;
1287 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= dwf_regno (dest
);
1288 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1289 = mem_loc_descriptor (src
, GET_MODE (src
),
1290 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1292 update_row_reg_save (cur_row
, dwf_regno (dest
), cfi
);
1295 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1298 dwarf2out_frame_debug_cfa_restore (rtx reg
)
1300 gcc_assert (REG_P (reg
));
1302 rtx span
= targetm
.dwarf_register_span (reg
);
1305 unsigned int regno
= dwf_regno (reg
);
1306 add_cfi_restore (regno
);
1307 update_row_reg_save (cur_row
, regno
, NULL
);
1311 /* We have a PARALLEL describing where the contents of REG live.
1312 Restore the register for each piece of the PARALLEL. */
1313 gcc_assert (GET_CODE (span
) == PARALLEL
);
1315 const int par_len
= XVECLEN (span
, 0);
1316 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1318 reg
= XVECEXP (span
, 0, par_index
);
1319 gcc_assert (REG_P (reg
));
1320 unsigned int regno
= dwf_regno (reg
);
1321 add_cfi_restore (regno
);
1322 update_row_reg_save (cur_row
, regno
, NULL
);
1327 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1328 ??? Perhaps we should note in the CIE where windows are saved (instead of
1329 assuming 0(cfa)) and what registers are in the window. */
1332 dwarf2out_frame_debug_cfa_window_save (void)
1334 dw_cfi_ref cfi
= new_cfi ();
1336 cfi
->dw_cfi_opc
= DW_CFA_GNU_window_save
;
1340 /* Record call frame debugging information for an expression EXPR,
1341 which either sets SP or FP (adjusting how we calculate the frame
1342 address) or saves a register to the stack or another register.
1343 LABEL indicates the address of EXPR.
1345 This function encodes a state machine mapping rtxes to actions on
1346 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1347 users need not read the source code.
1349 The High-Level Picture
1351 Changes in the register we use to calculate the CFA: Currently we
1352 assume that if you copy the CFA register into another register, we
1353 should take the other one as the new CFA register; this seems to
1354 work pretty well. If it's wrong for some target, it's simple
1355 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1357 Changes in the register we use for saving registers to the stack:
1358 This is usually SP, but not always. Again, we deduce that if you
1359 copy SP into another register (and SP is not the CFA register),
1360 then the new register is the one we will be using for register
1361 saves. This also seems to work.
1363 Register saves: There's not much guesswork about this one; if
1364 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1365 register save, and the register used to calculate the destination
1366 had better be the one we think we're using for this purpose.
1367 It's also assumed that a copy from a call-saved register to another
1368 register is saving that register if RTX_FRAME_RELATED_P is set on
1369 that instruction. If the copy is from a call-saved register to
1370 the *same* register, that means that the register is now the same
1371 value as in the caller.
1373 Except: If the register being saved is the CFA register, and the
1374 offset is nonzero, we are saving the CFA, so we assume we have to
1375 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1376 the intent is to save the value of SP from the previous frame.
1378 In addition, if a register has previously been saved to a different
1381 Invariants / Summaries of Rules
1383 cfa current rule for calculating the CFA. It usually
1384 consists of a register and an offset. This is
1385 actually stored in *cur_cfa, but abbreviated
1386 for the purposes of this documentation.
1387 cfa_store register used by prologue code to save things to the stack
1388 cfa_store.offset is the offset from the value of
1389 cfa_store.reg to the actual CFA
1390 cfa_temp register holding an integral value. cfa_temp.offset
1391 stores the value, which will be used to adjust the
1392 stack pointer. cfa_temp is also used like cfa_store,
1393 to track stores to the stack via fp or a temp reg.
1395 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1396 with cfa.reg as the first operand changes the cfa.reg and its
1397 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1400 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1401 expression yielding a constant. This sets cfa_temp.reg
1402 and cfa_temp.offset.
1404 Rule 5: Create a new register cfa_store used to save items to the
1407 Rules 10-14: Save a register to the stack. Define offset as the
1408 difference of the original location and cfa_store's
1409 location (or cfa_temp's location if cfa_temp is used).
1411 Rules 16-20: If AND operation happens on sp in prologue, we assume
1412 stack is realigned. We will use a group of DW_OP_XXX
1413 expressions to represent the location of the stored
1414 register instead of CFA+offset.
1418 "{a,b}" indicates a choice of a xor b.
1419 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1422 (set <reg1> <reg2>:cfa.reg)
1423 effects: cfa.reg = <reg1>
1424 cfa.offset unchanged
1425 cfa_temp.reg = <reg1>
1426 cfa_temp.offset = cfa.offset
1429 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1430 {<const_int>,<reg>:cfa_temp.reg}))
1431 effects: cfa.reg = sp if fp used
1432 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1433 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1434 if cfa_store.reg==sp
1437 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1438 effects: cfa.reg = fp
1439 cfa_offset += +/- <const_int>
1442 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1443 constraints: <reg1> != fp
1445 effects: cfa.reg = <reg1>
1446 cfa_temp.reg = <reg1>
1447 cfa_temp.offset = cfa.offset
1450 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1451 constraints: <reg1> != fp
1453 effects: cfa_store.reg = <reg1>
1454 cfa_store.offset = cfa.offset - cfa_temp.offset
1457 (set <reg> <const_int>)
1458 effects: cfa_temp.reg = <reg>
1459 cfa_temp.offset = <const_int>
1462 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1463 effects: cfa_temp.reg = <reg1>
1464 cfa_temp.offset |= <const_int>
1467 (set <reg> (high <exp>))
1471 (set <reg> (lo_sum <exp> <const_int>))
1472 effects: cfa_temp.reg = <reg>
1473 cfa_temp.offset = <const_int>
1476 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1477 effects: cfa_store.offset -= <const_int>
1478 cfa.offset = cfa_store.offset if cfa.reg == sp
1480 cfa.base_offset = -cfa_store.offset
1483 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1484 effects: cfa_store.offset += -/+ mode_size(mem)
1485 cfa.offset = cfa_store.offset if cfa.reg == sp
1487 cfa.base_offset = -cfa_store.offset
1490 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1493 effects: cfa.reg = <reg1>
1494 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1497 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1498 effects: cfa.reg = <reg1>
1499 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1502 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1503 effects: cfa.reg = <reg1>
1504 cfa.base_offset = -cfa_temp.offset
1505 cfa_temp.offset -= mode_size(mem)
1508 (set <reg> {unspec, unspec_volatile})
1509 effects: target-dependent
1512 (set sp (and: sp <const_int>))
1513 constraints: cfa_store.reg == sp
1514 effects: cfun->fde.stack_realign = 1
1515 cfa_store.offset = 0
1516 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1519 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1520 effects: cfa_store.offset += -/+ mode_size(mem)
1523 (set (mem ({pre_inc, pre_dec} sp)) fp)
1524 constraints: fde->stack_realign == 1
1525 effects: cfa_store.offset = 0
1526 cfa.reg != HARD_FRAME_POINTER_REGNUM
1529 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1530 constraints: fde->stack_realign == 1
1532 && cfa.indirect == 0
1533 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1534 effects: Use DW_CFA_def_cfa_expression to define cfa
1535 cfa.reg == fde->drap_reg */
1538 dwarf2out_frame_debug_expr (rtx expr
)
1540 rtx src
, dest
, span
;
1541 HOST_WIDE_INT offset
;
1544 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1545 the PARALLEL independently. The first element is always processed if
1546 it is a SET. This is for backward compatibility. Other elements
1547 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1548 flag is set in them. */
1549 if (GET_CODE (expr
) == PARALLEL
|| GET_CODE (expr
) == SEQUENCE
)
1552 int limit
= XVECLEN (expr
, 0);
1555 /* PARALLELs have strict read-modify-write semantics, so we
1556 ought to evaluate every rvalue before changing any lvalue.
1557 It's cumbersome to do that in general, but there's an
1558 easy approximation that is enough for all current users:
1559 handle register saves before register assignments. */
1560 if (GET_CODE (expr
) == PARALLEL
)
1561 for (par_index
= 0; par_index
< limit
; par_index
++)
1563 elem
= XVECEXP (expr
, 0, par_index
);
1564 if (GET_CODE (elem
) == SET
1565 && MEM_P (SET_DEST (elem
))
1566 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1567 dwarf2out_frame_debug_expr (elem
);
1570 for (par_index
= 0; par_index
< limit
; par_index
++)
1572 elem
= XVECEXP (expr
, 0, par_index
);
1573 if (GET_CODE (elem
) == SET
1574 && (!MEM_P (SET_DEST (elem
)) || GET_CODE (expr
) == SEQUENCE
)
1575 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1576 dwarf2out_frame_debug_expr (elem
);
1581 gcc_assert (GET_CODE (expr
) == SET
);
1583 src
= SET_SRC (expr
);
1584 dest
= SET_DEST (expr
);
1588 rtx rsi
= reg_saved_in (src
);
1595 switch (GET_CODE (dest
))
1598 switch (GET_CODE (src
))
1600 /* Setting FP from SP. */
1602 if (cur_cfa
->reg
== dwf_regno (src
))
1605 /* Update the CFA rule wrt SP or FP. Make sure src is
1606 relative to the current CFA register.
1608 We used to require that dest be either SP or FP, but the
1609 ARM copies SP to a temporary register, and from there to
1610 FP. So we just rely on the backends to only set
1611 RTX_FRAME_RELATED_P on appropriate insns. */
1612 cur_cfa
->reg
= dwf_regno (dest
);
1613 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1614 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1618 /* Saving a register in a register. */
1619 gcc_assert (!fixed_regs
[REGNO (dest
)]
1620 /* For the SPARC and its register window. */
1621 || (dwf_regno (src
) == DWARF_FRAME_RETURN_COLUMN
));
1623 /* After stack is aligned, we can only save SP in FP
1624 if drap register is used. In this case, we have
1625 to restore stack pointer with the CFA value and we
1626 don't generate this DWARF information. */
1628 && fde
->stack_realign
1629 && REGNO (src
) == STACK_POINTER_REGNUM
)
1630 gcc_assert (REGNO (dest
) == HARD_FRAME_POINTER_REGNUM
1631 && fde
->drap_reg
!= INVALID_REGNUM
1632 && cur_cfa
->reg
!= dwf_regno (src
));
1634 queue_reg_save (src
, dest
, 0);
1641 if (dest
== stack_pointer_rtx
)
1645 switch (GET_CODE (XEXP (src
, 1)))
1648 offset
= INTVAL (XEXP (src
, 1));
1651 gcc_assert (dwf_regno (XEXP (src
, 1))
1652 == cur_trace
->cfa_temp
.reg
);
1653 offset
= cur_trace
->cfa_temp
.offset
;
1659 if (XEXP (src
, 0) == hard_frame_pointer_rtx
)
1661 /* Restoring SP from FP in the epilogue. */
1662 gcc_assert (cur_cfa
->reg
== dw_frame_pointer_regnum
);
1663 cur_cfa
->reg
= dw_stack_pointer_regnum
;
1665 else if (GET_CODE (src
) == LO_SUM
)
1666 /* Assume we've set the source reg of the LO_SUM from sp. */
1669 gcc_assert (XEXP (src
, 0) == stack_pointer_rtx
);
1671 if (GET_CODE (src
) != MINUS
)
1673 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1674 cur_cfa
->offset
+= offset
;
1675 if (cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
)
1676 cur_trace
->cfa_store
.offset
+= offset
;
1678 else if (dest
== hard_frame_pointer_rtx
)
1681 /* Either setting the FP from an offset of the SP,
1682 or adjusting the FP */
1683 gcc_assert (frame_pointer_needed
);
1685 gcc_assert (REG_P (XEXP (src
, 0))
1686 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1687 && CONST_INT_P (XEXP (src
, 1)));
1688 offset
= INTVAL (XEXP (src
, 1));
1689 if (GET_CODE (src
) != MINUS
)
1691 cur_cfa
->offset
+= offset
;
1692 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1696 gcc_assert (GET_CODE (src
) != MINUS
);
1699 if (REG_P (XEXP (src
, 0))
1700 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1701 && CONST_INT_P (XEXP (src
, 1)))
1703 /* Setting a temporary CFA register that will be copied
1704 into the FP later on. */
1705 offset
= - INTVAL (XEXP (src
, 1));
1706 cur_cfa
->offset
+= offset
;
1707 cur_cfa
->reg
= dwf_regno (dest
);
1708 /* Or used to save regs to the stack. */
1709 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1710 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1714 else if (REG_P (XEXP (src
, 0))
1715 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1716 && XEXP (src
, 1) == stack_pointer_rtx
)
1718 /* Setting a scratch register that we will use instead
1719 of SP for saving registers to the stack. */
1720 gcc_assert (cur_cfa
->reg
== dw_stack_pointer_regnum
);
1721 cur_trace
->cfa_store
.reg
= dwf_regno (dest
);
1722 cur_trace
->cfa_store
.offset
1723 = cur_cfa
->offset
- cur_trace
->cfa_temp
.offset
;
1727 else if (GET_CODE (src
) == LO_SUM
1728 && CONST_INT_P (XEXP (src
, 1)))
1730 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1731 cur_trace
->cfa_temp
.offset
= INTVAL (XEXP (src
, 1));
1740 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1741 cur_trace
->cfa_temp
.offset
= INTVAL (src
);
1746 gcc_assert (REG_P (XEXP (src
, 0))
1747 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1748 && CONST_INT_P (XEXP (src
, 1)));
1750 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1751 cur_trace
->cfa_temp
.offset
|= INTVAL (XEXP (src
, 1));
1754 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1755 which will fill in all of the bits. */
1762 case UNSPEC_VOLATILE
:
1763 /* All unspecs should be represented by REG_CFA_* notes. */
1769 /* If this AND operation happens on stack pointer in prologue,
1770 we assume the stack is realigned and we extract the
1772 if (fde
&& XEXP (src
, 0) == stack_pointer_rtx
)
1774 /* We interpret reg_save differently with stack_realign set.
1775 Thus we must flush whatever we have queued first. */
1776 dwarf2out_flush_queued_reg_saves ();
1778 gcc_assert (cur_trace
->cfa_store
.reg
1779 == dwf_regno (XEXP (src
, 0)));
1780 fde
->stack_realign
= 1;
1781 fde
->stack_realignment
= INTVAL (XEXP (src
, 1));
1782 cur_trace
->cfa_store
.offset
= 0;
1784 if (cur_cfa
->reg
!= dw_stack_pointer_regnum
1785 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1786 fde
->drap_reg
= cur_cfa
->reg
;
1797 /* Saving a register to the stack. Make sure dest is relative to the
1799 switch (GET_CODE (XEXP (dest
, 0)))
1805 /* We can't handle variable size modifications. */
1806 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest
, 0), 1), 1))
1808 offset
= -INTVAL (XEXP (XEXP (XEXP (dest
, 0), 1), 1));
1810 gcc_assert (REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
1811 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1813 cur_trace
->cfa_store
.offset
+= offset
;
1814 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1815 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1817 if (GET_CODE (XEXP (dest
, 0)) == POST_MODIFY
)
1818 offset
-= cur_trace
->cfa_store
.offset
;
1820 offset
= -cur_trace
->cfa_store
.offset
;
1827 offset
= GET_MODE_SIZE (GET_MODE (dest
));
1828 if (GET_CODE (XEXP (dest
, 0)) == PRE_INC
)
1831 gcc_assert ((REGNO (XEXP (XEXP (dest
, 0), 0))
1832 == STACK_POINTER_REGNUM
)
1833 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1835 cur_trace
->cfa_store
.offset
+= offset
;
1837 /* Rule 18: If stack is aligned, we will use FP as a
1838 reference to represent the address of the stored
1841 && fde
->stack_realign
1843 && REGNO (src
) == HARD_FRAME_POINTER_REGNUM
)
1845 gcc_assert (cur_cfa
->reg
!= dw_frame_pointer_regnum
);
1846 cur_trace
->cfa_store
.offset
= 0;
1849 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1850 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1852 if (GET_CODE (XEXP (dest
, 0)) == POST_DEC
)
1853 offset
+= -cur_trace
->cfa_store
.offset
;
1855 offset
= -cur_trace
->cfa_store
.offset
;
1859 /* With an offset. */
1866 gcc_assert (CONST_INT_P (XEXP (XEXP (dest
, 0), 1))
1867 && REG_P (XEXP (XEXP (dest
, 0), 0)));
1868 offset
= INTVAL (XEXP (XEXP (dest
, 0), 1));
1869 if (GET_CODE (XEXP (dest
, 0)) == MINUS
)
1872 regno
= dwf_regno (XEXP (XEXP (dest
, 0), 0));
1874 if (cur_cfa
->reg
== regno
)
1875 offset
-= cur_cfa
->offset
;
1876 else if (cur_trace
->cfa_store
.reg
== regno
)
1877 offset
-= cur_trace
->cfa_store
.offset
;
1880 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1881 offset
-= cur_trace
->cfa_temp
.offset
;
1887 /* Without an offset. */
1890 unsigned int regno
= dwf_regno (XEXP (dest
, 0));
1892 if (cur_cfa
->reg
== regno
)
1893 offset
= -cur_cfa
->offset
;
1894 else if (cur_trace
->cfa_store
.reg
== regno
)
1895 offset
= -cur_trace
->cfa_store
.offset
;
1898 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1899 offset
= -cur_trace
->cfa_temp
.offset
;
1906 gcc_assert (cur_trace
->cfa_temp
.reg
1907 == dwf_regno (XEXP (XEXP (dest
, 0), 0)));
1908 offset
= -cur_trace
->cfa_temp
.offset
;
1909 cur_trace
->cfa_temp
.offset
-= GET_MODE_SIZE (GET_MODE (dest
));
1917 /* If the source operand of this MEM operation is a memory,
1918 we only care how much stack grew. */
1923 && REGNO (src
) != STACK_POINTER_REGNUM
1924 && REGNO (src
) != HARD_FRAME_POINTER_REGNUM
1925 && dwf_regno (src
) == cur_cfa
->reg
)
1927 /* We're storing the current CFA reg into the stack. */
1929 if (cur_cfa
->offset
== 0)
1932 /* If stack is aligned, putting CFA reg into stack means
1933 we can no longer use reg + offset to represent CFA.
1934 Here we use DW_CFA_def_cfa_expression instead. The
1935 result of this expression equals to the original CFA
1938 && fde
->stack_realign
1939 && cur_cfa
->indirect
== 0
1940 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1942 gcc_assert (fde
->drap_reg
== cur_cfa
->reg
);
1944 cur_cfa
->indirect
= 1;
1945 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1946 cur_cfa
->base_offset
= offset
;
1947 cur_cfa
->offset
= 0;
1949 fde
->drap_reg_saved
= 1;
1953 /* If the source register is exactly the CFA, assume
1954 we're saving SP like any other register; this happens
1956 queue_reg_save (stack_pointer_rtx
, NULL_RTX
, offset
);
1961 /* Otherwise, we'll need to look in the stack to
1962 calculate the CFA. */
1963 rtx x
= XEXP (dest
, 0);
1967 gcc_assert (REG_P (x
));
1969 cur_cfa
->reg
= dwf_regno (x
);
1970 cur_cfa
->base_offset
= offset
;
1971 cur_cfa
->indirect
= 1;
1977 span
= targetm
.dwarf_register_span (src
);
1982 queue_reg_save (src
, NULL_RTX
, offset
);
1985 /* We have a PARALLEL describing where the contents of SRC live.
1986 Queue register saves for each piece of the PARALLEL. */
1987 HOST_WIDE_INT span_offset
= offset
;
1989 gcc_assert (GET_CODE (span
) == PARALLEL
);
1991 const int par_len
= XVECLEN (span
, 0);
1992 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1994 rtx elem
= XVECEXP (span
, 0, par_index
);
1995 queue_reg_save (elem
, NULL_RTX
, span_offset
);
1996 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
2006 /* Record call frame debugging information for INSN, which either sets
2007 SP or FP (adjusting how we calculate the frame address) or saves a
2008 register to the stack. */
2011 dwarf2out_frame_debug (rtx_insn
*insn
)
2014 bool handled_one
= false;
2016 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
2017 switch (REG_NOTE_KIND (note
))
2019 case REG_FRAME_RELATED_EXPR
:
2020 pat
= XEXP (note
, 0);
2023 case REG_CFA_DEF_CFA
:
2024 dwarf2out_frame_debug_def_cfa (XEXP (note
, 0));
2028 case REG_CFA_ADJUST_CFA
:
2033 if (GET_CODE (n
) == PARALLEL
)
2034 n
= XVECEXP (n
, 0, 0);
2036 dwarf2out_frame_debug_adjust_cfa (n
);
2040 case REG_CFA_OFFSET
:
2043 n
= single_set (insn
);
2044 dwarf2out_frame_debug_cfa_offset (n
);
2048 case REG_CFA_REGISTER
:
2053 if (GET_CODE (n
) == PARALLEL
)
2054 n
= XVECEXP (n
, 0, 0);
2056 dwarf2out_frame_debug_cfa_register (n
);
2060 case REG_CFA_EXPRESSION
:
2061 case REG_CFA_VAL_EXPRESSION
:
2064 n
= single_set (insn
);
2066 if (REG_NOTE_KIND (note
) == REG_CFA_EXPRESSION
)
2067 dwarf2out_frame_debug_cfa_expression (n
);
2069 dwarf2out_frame_debug_cfa_val_expression (n
);
2074 case REG_CFA_RESTORE
:
2079 if (GET_CODE (n
) == PARALLEL
)
2080 n
= XVECEXP (n
, 0, 0);
2083 dwarf2out_frame_debug_cfa_restore (n
);
2087 case REG_CFA_SET_VDRAP
:
2091 dw_fde_ref fde
= cfun
->fde
;
2094 gcc_assert (fde
->vdrap_reg
== INVALID_REGNUM
);
2096 fde
->vdrap_reg
= dwf_regno (n
);
2102 case REG_CFA_TOGGLE_RA_MANGLE
:
2103 case REG_CFA_WINDOW_SAVE
:
2104 /* We overload both of these operations onto the same DWARF opcode. */
2105 dwarf2out_frame_debug_cfa_window_save ();
2109 case REG_CFA_FLUSH_QUEUE
:
2110 /* The actual flush happens elsewhere. */
2120 pat
= PATTERN (insn
);
2122 dwarf2out_frame_debug_expr (pat
);
2124 /* Check again. A parallel can save and update the same register.
2125 We could probably check just once, here, but this is safer than
2126 removing the check at the start of the function. */
2127 if (clobbers_queued_reg_save (pat
))
2128 dwarf2out_flush_queued_reg_saves ();
2132 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2135 change_cfi_row (dw_cfi_row
*old_row
, dw_cfi_row
*new_row
)
2137 size_t i
, n_old
, n_new
, n_max
;
2140 if (new_row
->cfa_cfi
&& !cfi_equal_p (old_row
->cfa_cfi
, new_row
->cfa_cfi
))
2141 add_cfi (new_row
->cfa_cfi
);
2144 cfi
= def_cfa_0 (&old_row
->cfa
, &new_row
->cfa
);
2149 n_old
= vec_safe_length (old_row
->reg_save
);
2150 n_new
= vec_safe_length (new_row
->reg_save
);
2151 n_max
= MAX (n_old
, n_new
);
2153 for (i
= 0; i
< n_max
; ++i
)
2155 dw_cfi_ref r_old
= NULL
, r_new
= NULL
;
2158 r_old
= (*old_row
->reg_save
)[i
];
2160 r_new
= (*new_row
->reg_save
)[i
];
2164 else if (r_new
== NULL
)
2165 add_cfi_restore (i
);
2166 else if (!cfi_equal_p (r_old
, r_new
))
2171 /* Examine CFI and return true if a cfi label and set_loc is needed
2172 beforehand. Even when generating CFI assembler instructions, we
2173 still have to add the cfi to the list so that lookup_cfa_1 works
2174 later on. When -g2 and above we even need to force emitting of
2175 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2176 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2177 and so don't use convert_cfa_to_fb_loc_list. */
2180 cfi_label_required_p (dw_cfi_ref cfi
)
2182 if (!dwarf2out_do_cfi_asm ())
2185 if (dwarf_version
== 2
2186 && debug_info_level
> DINFO_LEVEL_TERSE
2187 && (write_symbols
== DWARF2_DEBUG
2188 || write_symbols
== VMS_AND_DWARF2_DEBUG
))
2190 switch (cfi
->dw_cfi_opc
)
2192 case DW_CFA_def_cfa_offset
:
2193 case DW_CFA_def_cfa_offset_sf
:
2194 case DW_CFA_def_cfa_register
:
2195 case DW_CFA_def_cfa
:
2196 case DW_CFA_def_cfa_sf
:
2197 case DW_CFA_def_cfa_expression
:
2198 case DW_CFA_restore_state
:
2207 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2208 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2211 add_cfis_to_fde (void)
2213 dw_fde_ref fde
= cfun
->fde
;
2214 rtx_insn
*insn
, *next
;
2215 /* We always start with a function_begin label. */
2218 for (insn
= get_insns (); insn
; insn
= next
)
2220 next
= NEXT_INSN (insn
);
2222 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2224 fde
->dw_fde_switch_cfi_index
= vec_safe_length (fde
->dw_fde_cfi
);
2225 /* Don't attempt to advance_loc4 between labels
2226 in different sections. */
2230 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2232 bool required
= cfi_label_required_p (NOTE_CFI (insn
));
2234 if (NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CFI
)
2236 required
|= cfi_label_required_p (NOTE_CFI (next
));
2237 next
= NEXT_INSN (next
);
2239 else if (active_insn_p (next
)
2240 || (NOTE_P (next
) && (NOTE_KIND (next
)
2241 == NOTE_INSN_SWITCH_TEXT_SECTIONS
)))
2244 next
= NEXT_INSN (next
);
2247 int num
= dwarf2out_cfi_label_num
;
2248 const char *label
= dwarf2out_cfi_label ();
2251 /* Set the location counter to the new label. */
2253 xcfi
->dw_cfi_opc
= (first
? DW_CFA_set_loc
2254 : DW_CFA_advance_loc4
);
2255 xcfi
->dw_cfi_oprnd1
.dw_cfi_addr
= label
;
2256 vec_safe_push (fde
->dw_fde_cfi
, xcfi
);
2258 rtx_note
*tmp
= emit_note_before (NOTE_INSN_CFI_LABEL
, insn
);
2259 NOTE_LABEL_NUMBER (tmp
) = num
;
2264 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2265 vec_safe_push (fde
->dw_fde_cfi
, NOTE_CFI (insn
));
2266 insn
= NEXT_INSN (insn
);
2268 while (insn
!= next
);
2274 static void dump_cfi_row (FILE *f
, dw_cfi_row
*row
);
2276 /* If LABEL is the start of a trace, then initialize the state of that
2277 trace from CUR_TRACE and CUR_ROW. */
2280 maybe_record_trace_start (rtx_insn
*start
, rtx_insn
*origin
)
2283 HOST_WIDE_INT args_size
;
2285 ti
= get_trace_info (start
);
2286 gcc_assert (ti
!= NULL
);
2290 fprintf (dump_file
, " saw edge from trace %u to %u (via %s %d)\n",
2291 cur_trace
->id
, ti
->id
,
2292 (origin
? rtx_name
[(int) GET_CODE (origin
)] : "fallthru"),
2293 (origin
? INSN_UID (origin
) : 0));
2296 args_size
= cur_trace
->end_true_args_size
;
2297 if (ti
->beg_row
== NULL
)
2299 /* This is the first time we've encountered this trace. Propagate
2300 state across the edge and push the trace onto the work list. */
2301 ti
->beg_row
= copy_cfi_row (cur_row
);
2302 ti
->beg_true_args_size
= args_size
;
2304 ti
->cfa_store
= cur_trace
->cfa_store
;
2305 ti
->cfa_temp
= cur_trace
->cfa_temp
;
2306 ti
->regs_saved_in_regs
= cur_trace
->regs_saved_in_regs
.copy ();
2308 trace_work_list
.safe_push (ti
);
2311 fprintf (dump_file
, "\tpush trace %u to worklist\n", ti
->id
);
2316 /* We ought to have the same state incoming to a given trace no
2317 matter how we arrive at the trace. Anything else means we've
2318 got some kind of optimization error. */
2320 if (!cfi_row_equal_p (cur_row
, ti
->beg_row
))
2324 fprintf (dump_file
, "Inconsistent CFI state!\n");
2325 fprintf (dump_file
, "SHOULD have:\n");
2326 dump_cfi_row (dump_file
, ti
->beg_row
);
2327 fprintf (dump_file
, "DO have:\n");
2328 dump_cfi_row (dump_file
, cur_row
);
2335 /* The args_size is allowed to conflict if it isn't actually used. */
2336 if (ti
->beg_true_args_size
!= args_size
)
2337 ti
->args_size_undefined
= true;
2341 /* Similarly, but handle the args_size and CFA reset across EH
2342 and non-local goto edges. */
2345 maybe_record_trace_start_abnormal (rtx_insn
*start
, rtx_insn
*origin
)
2347 HOST_WIDE_INT save_args_size
, delta
;
2348 dw_cfa_location save_cfa
;
2350 save_args_size
= cur_trace
->end_true_args_size
;
2351 if (save_args_size
== 0)
2353 maybe_record_trace_start (start
, origin
);
2357 delta
= -save_args_size
;
2358 cur_trace
->end_true_args_size
= 0;
2360 save_cfa
= cur_row
->cfa
;
2361 if (cur_row
->cfa
.reg
== dw_stack_pointer_regnum
)
2363 /* Convert a change in args_size (always a positive in the
2364 direction of stack growth) to a change in stack pointer. */
2365 if (!STACK_GROWS_DOWNWARD
)
2368 cur_row
->cfa
.offset
+= delta
;
2371 maybe_record_trace_start (start
, origin
);
2373 cur_trace
->end_true_args_size
= save_args_size
;
2374 cur_row
->cfa
= save_cfa
;
2377 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2378 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2381 create_trace_edges (rtx_insn
*insn
)
2388 rtx_jump_table_data
*table
;
2390 if (find_reg_note (insn
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
2393 if (tablejump_p (insn
, NULL
, &table
))
2395 rtvec vec
= table
->get_labels ();
2397 n
= GET_NUM_ELEM (vec
);
2398 for (i
= 0; i
< n
; ++i
)
2400 rtx_insn
*lab
= as_a
<rtx_insn
*> (XEXP (RTVEC_ELT (vec
, i
), 0));
2401 maybe_record_trace_start (lab
, insn
);
2404 else if (computed_jump_p (insn
))
2408 FOR_EACH_VEC_SAFE_ELT (forced_labels
, i
, temp
)
2409 maybe_record_trace_start (temp
, insn
);
2411 else if (returnjump_p (insn
))
2413 else if ((tmp
= extract_asm_operands (PATTERN (insn
))) != NULL
)
2415 n
= ASM_OPERANDS_LABEL_LENGTH (tmp
);
2416 for (i
= 0; i
< n
; ++i
)
2419 as_a
<rtx_insn
*> (XEXP (ASM_OPERANDS_LABEL (tmp
, i
), 0));
2420 maybe_record_trace_start (lab
, insn
);
2425 rtx_insn
*lab
= JUMP_LABEL_AS_INSN (insn
);
2426 gcc_assert (lab
!= NULL
);
2427 maybe_record_trace_start (lab
, insn
);
2430 else if (CALL_P (insn
))
2432 /* Sibling calls don't have edges inside this function. */
2433 if (SIBLING_CALL_P (insn
))
2436 /* Process non-local goto edges. */
2437 if (can_nonlocal_goto (insn
))
2438 for (rtx_insn_list
*lab
= nonlocal_goto_handler_labels
;
2441 maybe_record_trace_start_abnormal (lab
->insn (), insn
);
2443 else if (rtx_sequence
*seq
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2445 int i
, n
= seq
->len ();
2446 for (i
= 0; i
< n
; ++i
)
2447 create_trace_edges (seq
->insn (i
));
2451 /* Process EH edges. */
2452 if (CALL_P (insn
) || cfun
->can_throw_non_call_exceptions
)
2454 eh_landing_pad lp
= get_eh_landing_pad_from_rtx (insn
);
2456 maybe_record_trace_start_abnormal (lp
->landing_pad
, insn
);
2460 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2463 scan_insn_after (rtx_insn
*insn
)
2465 if (RTX_FRAME_RELATED_P (insn
))
2466 dwarf2out_frame_debug (insn
);
2467 notice_args_size (insn
);
2470 /* Scan the trace beginning at INSN and create the CFI notes for the
2471 instructions therein. */
2474 scan_trace (dw_trace_info
*trace
)
2476 rtx_insn
*prev
, *insn
= trace
->head
;
2477 dw_cfa_location this_cfa
;
2480 fprintf (dump_file
, "Processing trace %u : start at %s %d\n",
2481 trace
->id
, rtx_name
[(int) GET_CODE (insn
)],
2484 trace
->end_row
= copy_cfi_row (trace
->beg_row
);
2485 trace
->end_true_args_size
= trace
->beg_true_args_size
;
2488 cur_row
= trace
->end_row
;
2490 this_cfa
= cur_row
->cfa
;
2491 cur_cfa
= &this_cfa
;
2493 for (prev
= insn
, insn
= NEXT_INSN (insn
);
2495 prev
= insn
, insn
= NEXT_INSN (insn
))
2499 /* Do everything that happens "before" the insn. */
2500 add_cfi_insn
= prev
;
2502 /* Notice the end of a trace. */
2503 if (BARRIER_P (insn
))
2505 /* Don't bother saving the unneeded queued registers at all. */
2506 queued_reg_saves
.truncate (0);
2509 if (save_point_p (insn
))
2511 /* Propagate across fallthru edges. */
2512 dwarf2out_flush_queued_reg_saves ();
2513 maybe_record_trace_start (insn
, NULL
);
2517 if (DEBUG_INSN_P (insn
) || !inside_basic_block_p (insn
))
2520 /* Handle all changes to the row state. Sequences require special
2521 handling for the positioning of the notes. */
2522 if (rtx_sequence
*pat
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2525 int i
, n
= pat
->len ();
2527 control
= pat
->insn (0);
2528 if (can_throw_internal (control
))
2529 notice_eh_throw (control
);
2530 dwarf2out_flush_queued_reg_saves ();
2532 if (JUMP_P (control
) && INSN_ANNULLED_BRANCH_P (control
))
2534 /* ??? Hopefully multiple delay slots are not annulled. */
2535 gcc_assert (n
== 2);
2536 gcc_assert (!RTX_FRAME_RELATED_P (control
));
2537 gcc_assert (!find_reg_note (control
, REG_ARGS_SIZE
, NULL
));
2539 elt
= pat
->insn (1);
2541 if (INSN_FROM_TARGET_P (elt
))
2543 HOST_WIDE_INT restore_args_size
;
2544 cfi_vec save_row_reg_save
;
2546 /* If ELT is an instruction from target of an annulled
2547 branch, the effects are for the target only and so
2548 the args_size and CFA along the current path
2549 shouldn't change. */
2550 add_cfi_insn
= NULL
;
2551 restore_args_size
= cur_trace
->end_true_args_size
;
2552 cur_cfa
= &cur_row
->cfa
;
2553 save_row_reg_save
= vec_safe_copy (cur_row
->reg_save
);
2555 scan_insn_after (elt
);
2557 /* ??? Should we instead save the entire row state? */
2558 gcc_assert (!queued_reg_saves
.length ());
2560 create_trace_edges (control
);
2562 cur_trace
->end_true_args_size
= restore_args_size
;
2563 cur_row
->cfa
= this_cfa
;
2564 cur_row
->reg_save
= save_row_reg_save
;
2565 cur_cfa
= &this_cfa
;
2569 /* If ELT is a annulled branch-taken instruction (i.e.
2570 executed only when branch is not taken), the args_size
2571 and CFA should not change through the jump. */
2572 create_trace_edges (control
);
2574 /* Update and continue with the trace. */
2575 add_cfi_insn
= insn
;
2576 scan_insn_after (elt
);
2577 def_cfa_1 (&this_cfa
);
2582 /* The insns in the delay slot should all be considered to happen
2583 "before" a call insn. Consider a call with a stack pointer
2584 adjustment in the delay slot. The backtrace from the callee
2585 should include the sp adjustment. Unfortunately, that leaves
2586 us with an unavoidable unwinding error exactly at the call insn
2587 itself. For jump insns we'd prefer to avoid this error by
2588 placing the notes after the sequence. */
2589 if (JUMP_P (control
))
2590 add_cfi_insn
= insn
;
2592 for (i
= 1; i
< n
; ++i
)
2594 elt
= pat
->insn (i
);
2595 scan_insn_after (elt
);
2598 /* Make sure any register saves are visible at the jump target. */
2599 dwarf2out_flush_queued_reg_saves ();
2600 any_cfis_emitted
= false;
2602 /* However, if there is some adjustment on the call itself, e.g.
2603 a call_pop, that action should be considered to happen after
2604 the call returns. */
2605 add_cfi_insn
= insn
;
2606 scan_insn_after (control
);
2610 /* Flush data before calls and jumps, and of course if necessary. */
2611 if (can_throw_internal (insn
))
2613 notice_eh_throw (insn
);
2614 dwarf2out_flush_queued_reg_saves ();
2616 else if (!NONJUMP_INSN_P (insn
)
2617 || clobbers_queued_reg_save (insn
)
2618 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2619 dwarf2out_flush_queued_reg_saves ();
2620 any_cfis_emitted
= false;
2622 add_cfi_insn
= insn
;
2623 scan_insn_after (insn
);
2627 /* Between frame-related-p and args_size we might have otherwise
2628 emitted two cfa adjustments. Do it now. */
2629 def_cfa_1 (&this_cfa
);
2631 /* Minimize the number of advances by emitting the entire queue
2632 once anything is emitted. */
2633 if (any_cfis_emitted
2634 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2635 dwarf2out_flush_queued_reg_saves ();
2637 /* Note that a test for control_flow_insn_p does exactly the
2638 same tests as are done to actually create the edges. So
2639 always call the routine and let it not create edges for
2640 non-control-flow insns. */
2641 create_trace_edges (control
);
2644 add_cfi_insn
= NULL
;
2650 /* Scan the function and create the initial set of CFI notes. */
2653 create_cfi_notes (void)
2657 gcc_checking_assert (!queued_reg_saves
.exists ());
2658 gcc_checking_assert (!trace_work_list
.exists ());
2660 /* Always begin at the entry trace. */
2661 ti
= &trace_info
[0];
2664 while (!trace_work_list
.is_empty ())
2666 ti
= trace_work_list
.pop ();
2670 queued_reg_saves
.release ();
2671 trace_work_list
.release ();
2674 /* Return the insn before the first NOTE_INSN_CFI after START. */
2677 before_next_cfi_note (rtx_insn
*start
)
2679 rtx_insn
*prev
= start
;
2682 if (NOTE_P (start
) && NOTE_KIND (start
) == NOTE_INSN_CFI
)
2685 start
= NEXT_INSN (start
);
2690 /* Insert CFI notes between traces to properly change state between them. */
2693 connect_traces (void)
2695 unsigned i
, n
= trace_info
.length ();
2696 dw_trace_info
*prev_ti
, *ti
;
2698 /* ??? Ideally, we should have both queued and processed every trace.
2699 However the current representation of constant pools on various targets
2700 is indistinguishable from unreachable code. Assume for the moment that
2701 we can simply skip over such traces. */
2702 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2703 these are not "real" instructions, and should not be considered.
2704 This could be generically useful for tablejump data as well. */
2705 /* Remove all unprocessed traces from the list. */
2706 for (i
= n
- 1; i
> 0; --i
)
2708 ti
= &trace_info
[i
];
2709 if (ti
->beg_row
== NULL
)
2711 trace_info
.ordered_remove (i
);
2715 gcc_assert (ti
->end_row
!= NULL
);
2718 /* Work from the end back to the beginning. This lets us easily insert
2719 remember/restore_state notes in the correct order wrt other notes. */
2720 prev_ti
= &trace_info
[n
- 1];
2721 for (i
= n
- 1; i
> 0; --i
)
2723 dw_cfi_row
*old_row
;
2726 prev_ti
= &trace_info
[i
- 1];
2728 add_cfi_insn
= ti
->head
;
2730 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2731 for the portion of the function in the alternate text
2732 section. The row state at the very beginning of that
2733 new FDE will be exactly the row state from the CIE. */
2734 if (ti
->switch_sections
)
2735 old_row
= cie_cfi_row
;
2738 old_row
= prev_ti
->end_row
;
2739 /* If there's no change from the previous end state, fine. */
2740 if (cfi_row_equal_p (old_row
, ti
->beg_row
))
2742 /* Otherwise check for the common case of sharing state with
2743 the beginning of an epilogue, but not the end. Insert
2744 remember/restore opcodes in that case. */
2745 else if (cfi_row_equal_p (prev_ti
->beg_row
, ti
->beg_row
))
2749 /* Note that if we blindly insert the remember at the
2750 start of the trace, we can wind up increasing the
2751 size of the unwind info due to extra advance opcodes.
2752 Instead, put the remember immediately before the next
2753 state change. We know there must be one, because the
2754 state at the beginning and head of the trace differ. */
2755 add_cfi_insn
= before_next_cfi_note (prev_ti
->head
);
2757 cfi
->dw_cfi_opc
= DW_CFA_remember_state
;
2760 add_cfi_insn
= ti
->head
;
2762 cfi
->dw_cfi_opc
= DW_CFA_restore_state
;
2765 old_row
= prev_ti
->beg_row
;
2767 /* Otherwise, we'll simply change state from the previous end. */
2770 change_cfi_row (old_row
, ti
->beg_row
);
2772 if (dump_file
&& add_cfi_insn
!= ti
->head
)
2776 fprintf (dump_file
, "Fixup between trace %u and %u:\n",
2777 prev_ti
->id
, ti
->id
);
2782 note
= NEXT_INSN (note
);
2783 gcc_assert (NOTE_P (note
) && NOTE_KIND (note
) == NOTE_INSN_CFI
);
2784 output_cfi_directive (dump_file
, NOTE_CFI (note
));
2786 while (note
!= add_cfi_insn
);
2790 /* Connect args_size between traces that have can_throw_internal insns. */
2791 if (cfun
->eh
->lp_array
)
2793 HOST_WIDE_INT prev_args_size
= 0;
2795 for (i
= 0; i
< n
; ++i
)
2797 ti
= &trace_info
[i
];
2799 if (ti
->switch_sections
)
2801 if (ti
->eh_head
== NULL
)
2803 gcc_assert (!ti
->args_size_undefined
);
2805 if (ti
->beg_delay_args_size
!= prev_args_size
)
2807 /* ??? Search back to previous CFI note. */
2808 add_cfi_insn
= PREV_INSN (ti
->eh_head
);
2809 add_cfi_args_size (ti
->beg_delay_args_size
);
2812 prev_args_size
= ti
->end_delay_args_size
;
2817 /* Set up the pseudo-cfg of instruction traces, as described at the
2818 block comment at the top of the file. */
2821 create_pseudo_cfg (void)
2823 bool saw_barrier
, switch_sections
;
2828 /* The first trace begins at the start of the function,
2829 and begins with the CIE row state. */
2830 trace_info
.create (16);
2831 memset (&ti
, 0, sizeof (ti
));
2832 ti
.head
= get_insns ();
2833 ti
.beg_row
= cie_cfi_row
;
2834 ti
.cfa_store
= cie_cfi_row
->cfa
;
2835 ti
.cfa_temp
.reg
= INVALID_REGNUM
;
2836 trace_info
.quick_push (ti
);
2838 if (cie_return_save
)
2839 ti
.regs_saved_in_regs
.safe_push (*cie_return_save
);
2841 /* Walk all the insns, collecting start of trace locations. */
2842 saw_barrier
= false;
2843 switch_sections
= false;
2844 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2846 if (BARRIER_P (insn
))
2848 else if (NOTE_P (insn
)
2849 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2851 /* We should have just seen a barrier. */
2852 gcc_assert (saw_barrier
);
2853 switch_sections
= true;
2855 /* Watch out for save_point notes between basic blocks.
2856 In particular, a note after a barrier. Do not record these,
2857 delaying trace creation until the label. */
2858 else if (save_point_p (insn
)
2859 && (LABEL_P (insn
) || !saw_barrier
))
2861 memset (&ti
, 0, sizeof (ti
));
2863 ti
.switch_sections
= switch_sections
;
2864 ti
.id
= trace_info
.length ();
2865 trace_info
.safe_push (ti
);
2867 saw_barrier
= false;
2868 switch_sections
= false;
2872 /* Create the trace index after we've finished building trace_info,
2873 avoiding stale pointer problems due to reallocation. */
2875 = new hash_table
<trace_info_hasher
> (trace_info
.length ());
2877 FOR_EACH_VEC_ELT (trace_info
, i
, tp
)
2879 dw_trace_info
**slot
;
2882 fprintf (dump_file
, "Creating trace %u : start at %s %d%s\n", tp
->id
,
2883 rtx_name
[(int) GET_CODE (tp
->head
)], INSN_UID (tp
->head
),
2884 tp
->switch_sections
? " (section switch)" : "");
2886 slot
= trace_index
->find_slot_with_hash (tp
, INSN_UID (tp
->head
), INSERT
);
2887 gcc_assert (*slot
== NULL
);
2892 /* Record the initial position of the return address. RTL is
2893 INCOMING_RETURN_ADDR_RTX. */
2896 initial_return_save (rtx rtl
)
2898 unsigned int reg
= INVALID_REGNUM
;
2899 HOST_WIDE_INT offset
= 0;
2901 switch (GET_CODE (rtl
))
2904 /* RA is in a register. */
2905 reg
= dwf_regno (rtl
);
2909 /* RA is on the stack. */
2910 rtl
= XEXP (rtl
, 0);
2911 switch (GET_CODE (rtl
))
2914 gcc_assert (REGNO (rtl
) == STACK_POINTER_REGNUM
);
2919 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2920 offset
= INTVAL (XEXP (rtl
, 1));
2924 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2925 offset
= -INTVAL (XEXP (rtl
, 1));
2935 /* The return address is at some offset from any value we can
2936 actually load. For instance, on the SPARC it is in %i7+8. Just
2937 ignore the offset for now; it doesn't matter for unwinding frames. */
2938 gcc_assert (CONST_INT_P (XEXP (rtl
, 1)));
2939 initial_return_save (XEXP (rtl
, 0));
2946 if (reg
!= DWARF_FRAME_RETURN_COLUMN
)
2948 if (reg
!= INVALID_REGNUM
)
2949 record_reg_saved_in_reg (rtl
, pc_rtx
);
2950 reg_save (DWARF_FRAME_RETURN_COLUMN
, reg
, offset
- cur_row
->cfa
.offset
);
2955 create_cie_data (void)
2957 dw_cfa_location loc
;
2958 dw_trace_info cie_trace
;
2960 dw_stack_pointer_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
2962 memset (&cie_trace
, 0, sizeof (cie_trace
));
2963 cur_trace
= &cie_trace
;
2965 add_cfi_vec
= &cie_cfi_vec
;
2966 cie_cfi_row
= cur_row
= new_cfi_row ();
2968 /* On entry, the Canonical Frame Address is at SP. */
2969 memset (&loc
, 0, sizeof (loc
));
2970 loc
.reg
= dw_stack_pointer_regnum
;
2971 loc
.offset
= INCOMING_FRAME_SP_OFFSET
;
2974 if (targetm
.debug_unwind_info () == UI_DWARF2
2975 || targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
2977 initial_return_save (INCOMING_RETURN_ADDR_RTX
);
2979 /* For a few targets, we have the return address incoming into a
2980 register, but choose a different return column. This will result
2981 in a DW_CFA_register for the return, and an entry in
2982 regs_saved_in_regs to match. If the target later stores that
2983 return address register to the stack, we want to be able to emit
2984 the DW_CFA_offset against the return column, not the intermediate
2985 save register. Save the contents of regs_saved_in_regs so that
2986 we can re-initialize it at the start of each function. */
2987 switch (cie_trace
.regs_saved_in_regs
.length ())
2992 cie_return_save
= ggc_alloc
<reg_saved_in_data
> ();
2993 *cie_return_save
= cie_trace
.regs_saved_in_regs
[0];
2994 cie_trace
.regs_saved_in_regs
.release ();
3006 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3007 state at each location within the function. These notes will be
3008 emitted during pass_final. */
3011 execute_dwarf2_frame (void)
3013 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3014 dw_frame_pointer_regnum
= DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM
);
3016 /* The first time we're called, compute the incoming frame state. */
3017 if (cie_cfi_vec
== NULL
)
3020 dwarf2out_alloc_current_fde ();
3022 create_pseudo_cfg ();
3025 create_cfi_notes ();
3029 /* Free all the data we allocated. */
3034 FOR_EACH_VEC_ELT (trace_info
, i
, ti
)
3035 ti
->regs_saved_in_regs
.release ();
3037 trace_info
.release ();
3045 /* Convert a DWARF call frame info. operation to its string name */
3048 dwarf_cfi_name (unsigned int cfi_opc
)
3050 const char *name
= get_DW_CFA_name (cfi_opc
);
3055 return "DW_CFA_<unknown>";
3058 /* This routine will generate the correct assembly data for a location
3059 description based on a cfi entry with a complex address. */
3062 output_cfa_loc (dw_cfi_ref cfi
, int for_eh
)
3064 dw_loc_descr_ref loc
;
3067 if (cfi
->dw_cfi_opc
== DW_CFA_expression
3068 || cfi
->dw_cfi_opc
== DW_CFA_val_expression
)
3071 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3072 dw2_asm_output_data (1, r
, NULL
);
3073 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3076 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3078 /* Output the size of the block. */
3079 size
= size_of_locs (loc
);
3080 dw2_asm_output_data_uleb128 (size
, NULL
);
3082 /* Now output the operations themselves. */
3083 output_loc_sequence (loc
, for_eh
);
3086 /* Similar, but used for .cfi_escape. */
3089 output_cfa_loc_raw (dw_cfi_ref cfi
)
3091 dw_loc_descr_ref loc
;
3094 if (cfi
->dw_cfi_opc
== DW_CFA_expression
3095 || cfi
->dw_cfi_opc
== DW_CFA_val_expression
)
3098 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3099 fprintf (asm_out_file
, "%#x,", r
);
3100 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3103 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3105 /* Output the size of the block. */
3106 size
= size_of_locs (loc
);
3107 dw2_asm_output_data_uleb128_raw (size
);
3108 fputc (',', asm_out_file
);
3110 /* Now output the operations themselves. */
3111 output_loc_sequence_raw (loc
);
3114 /* Output a Call Frame Information opcode and its operand(s). */
3117 output_cfi (dw_cfi_ref cfi
, dw_fde_ref fde
, int for_eh
)
3122 if (cfi
->dw_cfi_opc
== DW_CFA_advance_loc
)
3123 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
3124 | (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
& 0x3f)),
3125 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX
,
3126 ((unsigned HOST_WIDE_INT
)
3127 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
));
3128 else if (cfi
->dw_cfi_opc
== DW_CFA_offset
)
3130 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3131 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3132 "DW_CFA_offset, column %#lx", r
);
3133 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3134 dw2_asm_output_data_uleb128 (off
, NULL
);
3136 else if (cfi
->dw_cfi_opc
== DW_CFA_restore
)
3138 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3139 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3140 "DW_CFA_restore, column %#lx", r
);
3144 dw2_asm_output_data (1, cfi
->dw_cfi_opc
,
3145 "%s", dwarf_cfi_name (cfi
->dw_cfi_opc
));
3147 switch (cfi
->dw_cfi_opc
)
3149 case DW_CFA_set_loc
:
3151 dw2_asm_output_encoded_addr_rtx (
3152 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3153 gen_rtx_SYMBOL_REF (Pmode
, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
),
3156 dw2_asm_output_addr (DWARF2_ADDR_SIZE
,
3157 cfi
->dw_cfi_oprnd1
.dw_cfi_addr
, NULL
);
3158 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3161 case DW_CFA_advance_loc1
:
3162 dw2_asm_output_delta (1, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3163 fde
->dw_fde_current_label
, NULL
);
3164 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3167 case DW_CFA_advance_loc2
:
3168 dw2_asm_output_delta (2, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3169 fde
->dw_fde_current_label
, NULL
);
3170 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3173 case DW_CFA_advance_loc4
:
3174 dw2_asm_output_delta (4, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3175 fde
->dw_fde_current_label
, NULL
);
3176 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3179 case DW_CFA_MIPS_advance_loc8
:
3180 dw2_asm_output_delta (8, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3181 fde
->dw_fde_current_label
, NULL
);
3182 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3185 case DW_CFA_offset_extended
:
3186 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3187 dw2_asm_output_data_uleb128 (r
, NULL
);
3188 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3189 dw2_asm_output_data_uleb128 (off
, NULL
);
3192 case DW_CFA_def_cfa
:
3193 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3194 dw2_asm_output_data_uleb128 (r
, NULL
);
3195 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
, NULL
);
3198 case DW_CFA_offset_extended_sf
:
3199 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3200 dw2_asm_output_data_uleb128 (r
, NULL
);
3201 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3202 dw2_asm_output_data_sleb128 (off
, NULL
);
3205 case DW_CFA_def_cfa_sf
:
3206 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3207 dw2_asm_output_data_uleb128 (r
, NULL
);
3208 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3209 dw2_asm_output_data_sleb128 (off
, NULL
);
3212 case DW_CFA_restore_extended
:
3213 case DW_CFA_undefined
:
3214 case DW_CFA_same_value
:
3215 case DW_CFA_def_cfa_register
:
3216 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3217 dw2_asm_output_data_uleb128 (r
, NULL
);
3220 case DW_CFA_register
:
3221 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3222 dw2_asm_output_data_uleb128 (r
, NULL
);
3223 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, for_eh
);
3224 dw2_asm_output_data_uleb128 (r
, NULL
);
3227 case DW_CFA_def_cfa_offset
:
3228 case DW_CFA_GNU_args_size
:
3229 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
, NULL
);
3232 case DW_CFA_def_cfa_offset_sf
:
3233 off
= div_data_align (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3234 dw2_asm_output_data_sleb128 (off
, NULL
);
3237 case DW_CFA_GNU_window_save
:
3240 case DW_CFA_def_cfa_expression
:
3241 case DW_CFA_expression
:
3242 case DW_CFA_val_expression
:
3243 output_cfa_loc (cfi
, for_eh
);
3246 case DW_CFA_GNU_negative_offset_extended
:
3247 /* Obsoleted by DW_CFA_offset_extended_sf. */
3256 /* Similar, but do it via assembler directives instead. */
3259 output_cfi_directive (FILE *f
, dw_cfi_ref cfi
)
3261 unsigned long r
, r2
;
3263 switch (cfi
->dw_cfi_opc
)
3265 case DW_CFA_advance_loc
:
3266 case DW_CFA_advance_loc1
:
3267 case DW_CFA_advance_loc2
:
3268 case DW_CFA_advance_loc4
:
3269 case DW_CFA_MIPS_advance_loc8
:
3270 case DW_CFA_set_loc
:
3271 /* Should only be created in a code path not followed when emitting
3272 via directives. The assembler is going to take care of this for
3273 us. But this routines is also used for debugging dumps, so
3275 gcc_assert (f
!= asm_out_file
);
3276 fprintf (f
, "\t.cfi_advance_loc\n");
3280 case DW_CFA_offset_extended
:
3281 case DW_CFA_offset_extended_sf
:
3282 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3283 fprintf (f
, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3284 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3287 case DW_CFA_restore
:
3288 case DW_CFA_restore_extended
:
3289 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3290 fprintf (f
, "\t.cfi_restore %lu\n", r
);
3293 case DW_CFA_undefined
:
3294 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3295 fprintf (f
, "\t.cfi_undefined %lu\n", r
);
3298 case DW_CFA_same_value
:
3299 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3300 fprintf (f
, "\t.cfi_same_value %lu\n", r
);
3303 case DW_CFA_def_cfa
:
3304 case DW_CFA_def_cfa_sf
:
3305 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3306 fprintf (f
, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3307 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3310 case DW_CFA_def_cfa_register
:
3311 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3312 fprintf (f
, "\t.cfi_def_cfa_register %lu\n", r
);
3315 case DW_CFA_register
:
3316 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3317 r2
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, 1);
3318 fprintf (f
, "\t.cfi_register %lu, %lu\n", r
, r2
);
3321 case DW_CFA_def_cfa_offset
:
3322 case DW_CFA_def_cfa_offset_sf
:
3323 fprintf (f
, "\t.cfi_def_cfa_offset "
3324 HOST_WIDE_INT_PRINT_DEC
"\n",
3325 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3328 case DW_CFA_remember_state
:
3329 fprintf (f
, "\t.cfi_remember_state\n");
3331 case DW_CFA_restore_state
:
3332 fprintf (f
, "\t.cfi_restore_state\n");
3335 case DW_CFA_GNU_args_size
:
3336 if (f
== asm_out_file
)
3338 fprintf (f
, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size
);
3339 dw2_asm_output_data_uleb128_raw (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3341 fprintf (f
, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC
,
3342 ASM_COMMENT_START
, cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3347 fprintf (f
, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC
"\n",
3348 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3352 case DW_CFA_GNU_window_save
:
3353 fprintf (f
, "\t.cfi_window_save\n");
3356 case DW_CFA_def_cfa_expression
:
3357 case DW_CFA_expression
:
3358 case DW_CFA_val_expression
:
3359 if (f
!= asm_out_file
)
3361 fprintf (f
, "\t.cfi_%scfa_%sexpression ...\n",
3362 cfi
->dw_cfi_opc
== DW_CFA_def_cfa_expression
? "def_" : "",
3363 cfi
->dw_cfi_opc
== DW_CFA_val_expression
? "val_" : "");
3366 fprintf (f
, "\t.cfi_escape %#x,", cfi
->dw_cfi_opc
);
3367 output_cfa_loc_raw (cfi
);
3377 dwarf2out_emit_cfi (dw_cfi_ref cfi
)
3379 if (dwarf2out_do_cfi_asm ())
3380 output_cfi_directive (asm_out_file
, cfi
);
3384 dump_cfi_row (FILE *f
, dw_cfi_row
*row
)
3392 dw_cfa_location dummy
;
3393 memset (&dummy
, 0, sizeof (dummy
));
3394 dummy
.reg
= INVALID_REGNUM
;
3395 cfi
= def_cfa_0 (&dummy
, &row
->cfa
);
3397 output_cfi_directive (f
, cfi
);
3399 FOR_EACH_VEC_SAFE_ELT (row
->reg_save
, i
, cfi
)
3401 output_cfi_directive (f
, cfi
);
3404 void debug_cfi_row (dw_cfi_row
*row
);
3407 debug_cfi_row (dw_cfi_row
*row
)
3409 dump_cfi_row (stderr
, row
);
3413 /* Save the result of dwarf2out_do_frame across PCH.
3414 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3415 static GTY(()) signed char saved_do_cfi_asm
= 0;
3417 /* Decide whether we want to emit frame unwind information for the current
3418 translation unit. */
3421 dwarf2out_do_frame (void)
3423 /* We want to emit correct CFA location expressions or lists, so we
3424 have to return true if we're going to output debug info, even if
3425 we're not going to output frame or unwind info. */
3426 if (write_symbols
== DWARF2_DEBUG
|| write_symbols
== VMS_AND_DWARF2_DEBUG
)
3429 if (saved_do_cfi_asm
> 0)
3432 if (targetm
.debug_unwind_info () == UI_DWARF2
)
3435 if ((flag_unwind_tables
|| flag_exceptions
)
3436 && targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
3442 /* Decide whether to emit frame unwind via assembler directives. */
3445 dwarf2out_do_cfi_asm (void)
3449 if (saved_do_cfi_asm
!= 0)
3450 return saved_do_cfi_asm
> 0;
3452 /* Assume failure for a moment. */
3453 saved_do_cfi_asm
= -1;
3455 if (!flag_dwarf2_cfi_asm
|| !dwarf2out_do_frame ())
3457 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE
)
3460 /* Make sure the personality encoding is one the assembler can support.
3461 In particular, aligned addresses can't be handled. */
3462 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3463 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3465 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3466 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3469 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3470 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3471 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3472 && !flag_unwind_tables
&& !flag_exceptions
3473 && targetm_common
.except_unwind_info (&global_options
) != UI_DWARF2
)
3477 saved_do_cfi_asm
= 1;
3483 const pass_data pass_data_dwarf2_frame
=
3485 RTL_PASS
, /* type */
3486 "dwarf2", /* name */
3487 OPTGROUP_NONE
, /* optinfo_flags */
3488 TV_FINAL
, /* tv_id */
3489 0, /* properties_required */
3490 0, /* properties_provided */
3491 0, /* properties_destroyed */
3492 0, /* todo_flags_start */
3493 0, /* todo_flags_finish */
3496 class pass_dwarf2_frame
: public rtl_opt_pass
3499 pass_dwarf2_frame (gcc::context
*ctxt
)
3500 : rtl_opt_pass (pass_data_dwarf2_frame
, ctxt
)
3503 /* opt_pass methods: */
3504 virtual bool gate (function
*);
3505 virtual unsigned int execute (function
*) { return execute_dwarf2_frame (); }
3507 }; // class pass_dwarf2_frame
3510 pass_dwarf2_frame::gate (function
*)
3512 /* Targets which still implement the prologue in assembler text
3513 cannot use the generic dwarf2 unwinding. */
3514 if (!targetm
.have_prologue ())
3517 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3518 from the optimized shrink-wrapping annotations that we will compute.
3519 For now, only produce the CFI notes for dwarf2. */
3520 return dwarf2out_do_frame ();
3526 make_pass_dwarf2_frame (gcc::context
*ctxt
)
3528 return new pass_dwarf2_frame (ctxt
);
3531 #include "gt-dwarf2cfi.h"