1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
30 #include "stor-layout.h"
31 #include "hard-reg-set.h"
35 #include "dwarf2out.h"
36 #include "dwarf2asm.h"
39 #include "common/common-target.h"
40 #include "tree-pass.h"
42 #include "except.h" /* expand_builtin_dwarf_sp_column */
43 #include "insn-config.h"
51 #include "expr.h" /* init_return_column_size */
52 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
53 #include "output.h" /* asm_out_file */
54 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
57 /* ??? Poison these here until it can be done generically. They've been
58 totally replaced in this file; make sure it stays that way. */
59 #undef DWARF2_UNWIND_INFO
60 #undef DWARF2_FRAME_INFO
61 #if (GCC_VERSION >= 3000)
62 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
65 #ifndef INCOMING_RETURN_ADDR_RTX
66 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
69 /* Maximum size (in bytes) of an artificially generated label. */
70 #define MAX_ARTIFICIAL_LABEL_BYTES 30
72 /* A collected description of an entire row of the abstract CFI table. */
73 typedef struct GTY(()) dw_cfi_row_struct
75 /* The expression that computes the CFA, expressed in two different ways.
76 The CFA member for the simple cases, and the full CFI expression for
77 the complex cases. The later will be a DW_CFA_cfa_expression. */
81 /* The expressions for any register column that is saved. */
85 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
86 typedef struct GTY(()) reg_saved_in_data_struct
{
92 /* Since we no longer have a proper CFG, we're going to create a facsimile
93 of one on the fly while processing the frame-related insns.
95 We create dw_trace_info structures for each extended basic block beginning
96 and ending at a "save point". Save points are labels, barriers, certain
97 notes, and of course the beginning and end of the function.
99 As we encounter control transfer insns, we propagate the "current"
100 row state across the edges to the starts of traces. When checking is
101 enabled, we validate that we propagate the same data from all sources.
103 All traces are members of the TRACE_INFO array, in the order in which
104 they appear in the instruction stream.
106 All save points are present in the TRACE_INDEX hash, mapping the insn
107 starting a trace to the dw_trace_info describing the trace. */
111 /* The insn that begins the trace. */
114 /* The row state at the beginning and end of the trace. */
115 dw_cfi_row
*beg_row
, *end_row
;
117 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
118 while scanning insns. However, the args_size value is irrelevant at
119 any point except can_throw_internal_p insns. Therefore the "delay"
120 sizes the values that must actually be emitted for this trace. */
121 HOST_WIDE_INT beg_true_args_size
, end_true_args_size
;
122 HOST_WIDE_INT beg_delay_args_size
, end_delay_args_size
;
124 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
127 /* The following variables contain data used in interpreting frame related
128 expressions. These are not part of the "real" row state as defined by
129 Dwarf, but it seems like they need to be propagated into a trace in case
130 frame related expressions have been sunk. */
131 /* ??? This seems fragile. These variables are fragments of a larger
132 expression. If we do not keep the entire expression together, we risk
133 not being able to put it together properly. Consider forcing targets
134 to generate self-contained expressions and dropping all of the magic
135 interpretation code in this file. Or at least refusing to shrink wrap
136 any frame related insn that doesn't contain a complete expression. */
138 /* The register used for saving registers to the stack, and its offset
140 dw_cfa_location cfa_store
;
142 /* A temporary register holding an integral value used in adjusting SP
143 or setting up the store_reg. The "offset" field holds the integer
144 value, not an offset. */
145 dw_cfa_location cfa_temp
;
147 /* A set of registers saved in other registers. This is the inverse of
148 the row->reg_save info, if the entry is a DW_CFA_register. This is
149 implemented as a flat array because it normally contains zero or 1
150 entry, depending on the target. IA-64 is the big spender here, using
151 a maximum of 5 entries. */
152 vec
<reg_saved_in_data
> regs_saved_in_regs
;
154 /* An identifier for this trace. Used only for debugging dumps. */
157 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
158 bool switch_sections
;
160 /* True if we've seen different values incoming to beg_true_args_size. */
161 bool args_size_undefined
;
165 typedef dw_trace_info
*dw_trace_info_ref
;
168 /* Hashtable helpers. */
170 struct trace_info_hasher
: typed_noop_remove
<dw_trace_info
>
172 typedef dw_trace_info
*value_type
;
173 typedef dw_trace_info
*compare_type
;
174 static inline hashval_t
hash (const dw_trace_info
*);
175 static inline bool equal (const dw_trace_info
*, const dw_trace_info
*);
179 trace_info_hasher::hash (const dw_trace_info
*ti
)
181 return INSN_UID (ti
->head
);
185 trace_info_hasher::equal (const dw_trace_info
*a
, const dw_trace_info
*b
)
187 return a
->head
== b
->head
;
191 /* The variables making up the pseudo-cfg, as described above. */
192 static vec
<dw_trace_info
> trace_info
;
193 static vec
<dw_trace_info_ref
> trace_work_list
;
194 static hash_table
<trace_info_hasher
> *trace_index
;
196 /* A vector of call frame insns for the CIE. */
199 /* The state of the first row of the FDE table, which includes the
200 state provided by the CIE. */
201 static GTY(()) dw_cfi_row
*cie_cfi_row
;
203 static GTY(()) reg_saved_in_data
*cie_return_save
;
205 static GTY(()) unsigned long dwarf2out_cfi_label_num
;
207 /* The insn after which a new CFI note should be emitted. */
208 static rtx_insn
*add_cfi_insn
;
210 /* When non-null, add_cfi will add the CFI to this vector. */
211 static cfi_vec
*add_cfi_vec
;
213 /* The current instruction trace. */
214 static dw_trace_info
*cur_trace
;
216 /* The current, i.e. most recently generated, row of the CFI table. */
217 static dw_cfi_row
*cur_row
;
219 /* A copy of the current CFA, for use during the processing of a
221 static dw_cfa_location
*cur_cfa
;
223 /* We delay emitting a register save until either (a) we reach the end
224 of the prologue or (b) the register is clobbered. This clusters
225 register saves so that there are fewer pc advances. */
230 HOST_WIDE_INT cfa_offset
;
234 static vec
<queued_reg_save
> queued_reg_saves
;
236 /* True if any CFI directives were emitted at the current insn. */
237 static bool any_cfis_emitted
;
239 /* Short-hand for commonly used register numbers. */
240 static unsigned dw_stack_pointer_regnum
;
241 static unsigned dw_frame_pointer_regnum
;
243 /* Hook used by __throw. */
246 expand_builtin_dwarf_sp_column (void)
248 unsigned int dwarf_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
249 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum
, 1));
252 /* MEM is a memory reference for the register size table, each element of
253 which has mode MODE. Initialize column C as a return address column. */
256 init_return_column_size (machine_mode mode
, rtx mem
, unsigned int c
)
258 HOST_WIDE_INT offset
= c
* GET_MODE_SIZE (mode
);
259 HOST_WIDE_INT size
= GET_MODE_SIZE (Pmode
);
260 emit_move_insn (adjust_address (mem
, mode
, offset
),
261 gen_int_mode (size
, mode
));
264 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
265 init_one_dwarf_reg_size to communicate on what has been done by the
270 /* Whether the dwarf return column was initialized. */
271 bool wrote_return_column
;
273 /* For each hard register REGNO, whether init_one_dwarf_reg_size
274 was given REGNO to process already. */
275 bool processed_regno
[FIRST_PSEUDO_REGISTER
];
277 } init_one_dwarf_reg_state
;
279 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
280 initialize the dwarf register size table entry corresponding to register
281 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
282 use for the size entry to initialize, and INIT_STATE is the communication
283 datastructure conveying what we're doing to our caller. */
286 void init_one_dwarf_reg_size (int regno
, machine_mode regmode
,
287 rtx table
, machine_mode slotmode
,
288 init_one_dwarf_reg_state
*init_state
)
290 const unsigned int dnum
= DWARF_FRAME_REGNUM (regno
);
291 const unsigned int rnum
= DWARF2_FRAME_REG_OUT (dnum
, 1);
292 const unsigned int dcol
= DWARF_REG_TO_UNWIND_COLUMN (rnum
);
294 const HOST_WIDE_INT slotoffset
= dcol
* GET_MODE_SIZE (slotmode
);
295 const HOST_WIDE_INT regsize
= GET_MODE_SIZE (regmode
);
297 init_state
->processed_regno
[regno
] = true;
299 if (rnum
>= DWARF_FRAME_REGISTERS
)
302 if (dnum
== DWARF_FRAME_RETURN_COLUMN
)
304 if (regmode
== VOIDmode
)
306 init_state
->wrote_return_column
= true;
312 emit_move_insn (adjust_address (table
, slotmode
, slotoffset
),
313 gen_int_mode (regsize
, slotmode
));
316 /* Generate code to initialize the dwarf register size table located
317 at the provided ADDRESS. */
320 expand_builtin_init_dwarf_reg_sizes (tree address
)
323 machine_mode mode
= TYPE_MODE (char_type_node
);
324 rtx addr
= expand_normal (address
);
325 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
327 init_one_dwarf_reg_state init_state
;
329 memset ((char *)&init_state
, 0, sizeof (init_state
));
331 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
333 machine_mode save_mode
;
336 /* No point in processing a register multiple times. This could happen
337 with register spans, e.g. when a reg is first processed as a piece of
338 a span, then as a register on its own later on. */
340 if (init_state
.processed_regno
[i
])
343 save_mode
= targetm
.dwarf_frame_reg_mode (i
);
344 span
= targetm
.dwarf_register_span (gen_rtx_REG (save_mode
, i
));
347 init_one_dwarf_reg_size (i
, save_mode
, mem
, mode
, &init_state
);
350 for (int si
= 0; si
< XVECLEN (span
, 0); si
++)
352 rtx reg
= XVECEXP (span
, 0, si
);
354 init_one_dwarf_reg_size
355 (REGNO (reg
), GET_MODE (reg
), mem
, mode
, &init_state
);
360 if (!init_state
.wrote_return_column
)
361 init_return_column_size (mode
, mem
, DWARF_FRAME_RETURN_COLUMN
);
363 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
364 init_return_column_size (mode
, mem
, DWARF_ALT_FRAME_RETURN_COLUMN
);
367 targetm
.init_dwarf_reg_sizes_extra (address
);
371 static dw_trace_info
*
372 get_trace_info (rtx_insn
*insn
)
376 return trace_index
->find_with_hash (&dummy
, INSN_UID (insn
));
380 save_point_p (rtx_insn
*insn
)
382 /* Labels, except those that are really jump tables. */
384 return inside_basic_block_p (insn
);
386 /* We split traces at the prologue/epilogue notes because those
387 are points at which the unwind info is usually stable. This
388 makes it easier to find spots with identical unwind info so
389 that we can use remember/restore_state opcodes. */
391 switch (NOTE_KIND (insn
))
393 case NOTE_INSN_PROLOGUE_END
:
394 case NOTE_INSN_EPILOGUE_BEG
:
401 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
403 static inline HOST_WIDE_INT
404 div_data_align (HOST_WIDE_INT off
)
406 HOST_WIDE_INT r
= off
/ DWARF_CIE_DATA_ALIGNMENT
;
407 gcc_assert (r
* DWARF_CIE_DATA_ALIGNMENT
== off
);
411 /* Return true if we need a signed version of a given opcode
412 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
415 need_data_align_sf_opcode (HOST_WIDE_INT off
)
417 return DWARF_CIE_DATA_ALIGNMENT
< 0 ? off
> 0 : off
< 0;
420 /* Return a pointer to a newly allocated Call Frame Instruction. */
422 static inline dw_cfi_ref
425 dw_cfi_ref cfi
= ggc_alloc
<dw_cfi_node
> ();
427 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= 0;
428 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= 0;
433 /* Return a newly allocated CFI row, with no defined data. */
438 dw_cfi_row
*row
= ggc_cleared_alloc
<dw_cfi_row
> ();
440 row
->cfa
.reg
= INVALID_REGNUM
;
445 /* Return a copy of an existing CFI row. */
448 copy_cfi_row (dw_cfi_row
*src
)
450 dw_cfi_row
*dst
= ggc_alloc
<dw_cfi_row
> ();
453 dst
->reg_save
= vec_safe_copy (src
->reg_save
);
458 /* Generate a new label for the CFI info to refer to. */
461 dwarf2out_cfi_label (void)
463 int num
= dwarf2out_cfi_label_num
++;
466 ASM_GENERATE_INTERNAL_LABEL (label
, "LCFI", num
);
468 return xstrdup (label
);
471 /* Add CFI either to the current insn stream or to a vector, or both. */
474 add_cfi (dw_cfi_ref cfi
)
476 any_cfis_emitted
= true;
478 if (add_cfi_insn
!= NULL
)
480 add_cfi_insn
= emit_note_after (NOTE_INSN_CFI
, add_cfi_insn
);
481 NOTE_CFI (add_cfi_insn
) = cfi
;
484 if (add_cfi_vec
!= NULL
)
485 vec_safe_push (*add_cfi_vec
, cfi
);
489 add_cfi_args_size (HOST_WIDE_INT size
)
491 dw_cfi_ref cfi
= new_cfi ();
493 /* While we can occasionally have args_size < 0 internally, this state
494 should not persist at a point we actually need an opcode. */
495 gcc_assert (size
>= 0);
497 cfi
->dw_cfi_opc
= DW_CFA_GNU_args_size
;
498 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= size
;
504 add_cfi_restore (unsigned reg
)
506 dw_cfi_ref cfi
= new_cfi ();
508 cfi
->dw_cfi_opc
= (reg
& ~0x3f ? DW_CFA_restore_extended
: DW_CFA_restore
);
509 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
514 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
515 that the register column is no longer saved. */
518 update_row_reg_save (dw_cfi_row
*row
, unsigned column
, dw_cfi_ref cfi
)
520 if (vec_safe_length (row
->reg_save
) <= column
)
521 vec_safe_grow_cleared (row
->reg_save
, column
+ 1);
522 (*row
->reg_save
)[column
] = cfi
;
525 /* This function fills in aa dw_cfa_location structure from a dwarf location
526 descriptor sequence. */
529 get_cfa_from_loc_descr (dw_cfa_location
*cfa
, struct dw_loc_descr_node
*loc
)
531 struct dw_loc_descr_node
*ptr
;
533 cfa
->base_offset
= 0;
537 for (ptr
= loc
; ptr
!= NULL
; ptr
= ptr
->dw_loc_next
)
539 enum dwarf_location_atom op
= ptr
->dw_loc_opc
;
575 cfa
->reg
= op
- DW_OP_reg0
;
578 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
612 cfa
->reg
= op
- DW_OP_breg0
;
613 cfa
->base_offset
= ptr
->dw_loc_oprnd1
.v
.val_int
;
616 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
617 cfa
->base_offset
= ptr
->dw_loc_oprnd2
.v
.val_int
;
622 case DW_OP_plus_uconst
:
623 cfa
->offset
= ptr
->dw_loc_oprnd1
.v
.val_unsigned
;
631 /* Find the previous value for the CFA, iteratively. CFI is the opcode
632 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
633 one level of remember/restore state processing. */
636 lookup_cfa_1 (dw_cfi_ref cfi
, dw_cfa_location
*loc
, dw_cfa_location
*remember
)
638 switch (cfi
->dw_cfi_opc
)
640 case DW_CFA_def_cfa_offset
:
641 case DW_CFA_def_cfa_offset_sf
:
642 loc
->offset
= cfi
->dw_cfi_oprnd1
.dw_cfi_offset
;
644 case DW_CFA_def_cfa_register
:
645 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
648 case DW_CFA_def_cfa_sf
:
649 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
650 loc
->offset
= cfi
->dw_cfi_oprnd2
.dw_cfi_offset
;
652 case DW_CFA_def_cfa_expression
:
653 get_cfa_from_loc_descr (loc
, cfi
->dw_cfi_oprnd1
.dw_cfi_loc
);
656 case DW_CFA_remember_state
:
657 gcc_assert (!remember
->in_use
);
659 remember
->in_use
= 1;
661 case DW_CFA_restore_state
:
662 gcc_assert (remember
->in_use
);
664 remember
->in_use
= 0;
672 /* Determine if two dw_cfa_location structures define the same data. */
675 cfa_equal_p (const dw_cfa_location
*loc1
, const dw_cfa_location
*loc2
)
677 return (loc1
->reg
== loc2
->reg
678 && loc1
->offset
== loc2
->offset
679 && loc1
->indirect
== loc2
->indirect
680 && (loc1
->indirect
== 0
681 || loc1
->base_offset
== loc2
->base_offset
));
684 /* Determine if two CFI operands are identical. */
687 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t
, dw_cfi_oprnd
*a
, dw_cfi_oprnd
*b
)
691 case dw_cfi_oprnd_unused
:
693 case dw_cfi_oprnd_reg_num
:
694 return a
->dw_cfi_reg_num
== b
->dw_cfi_reg_num
;
695 case dw_cfi_oprnd_offset
:
696 return a
->dw_cfi_offset
== b
->dw_cfi_offset
;
697 case dw_cfi_oprnd_addr
:
698 return (a
->dw_cfi_addr
== b
->dw_cfi_addr
699 || strcmp (a
->dw_cfi_addr
, b
->dw_cfi_addr
) == 0);
700 case dw_cfi_oprnd_loc
:
701 return loc_descr_equal_p (a
->dw_cfi_loc
, b
->dw_cfi_loc
);
706 /* Determine if two CFI entries are identical. */
709 cfi_equal_p (dw_cfi_ref a
, dw_cfi_ref b
)
711 enum dwarf_call_frame_info opc
;
713 /* Make things easier for our callers, including missing operands. */
716 if (a
== NULL
|| b
== NULL
)
719 /* Obviously, the opcodes must match. */
721 if (opc
!= b
->dw_cfi_opc
)
724 /* Compare the two operands, re-using the type of the operands as
725 already exposed elsewhere. */
726 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc
),
727 &a
->dw_cfi_oprnd1
, &b
->dw_cfi_oprnd1
)
728 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc
),
729 &a
->dw_cfi_oprnd2
, &b
->dw_cfi_oprnd2
));
732 /* Determine if two CFI_ROW structures are identical. */
735 cfi_row_equal_p (dw_cfi_row
*a
, dw_cfi_row
*b
)
737 size_t i
, n_a
, n_b
, n_max
;
741 if (!cfi_equal_p (a
->cfa_cfi
, b
->cfa_cfi
))
744 else if (!cfa_equal_p (&a
->cfa
, &b
->cfa
))
747 n_a
= vec_safe_length (a
->reg_save
);
748 n_b
= vec_safe_length (b
->reg_save
);
749 n_max
= MAX (n_a
, n_b
);
751 for (i
= 0; i
< n_max
; ++i
)
753 dw_cfi_ref r_a
= NULL
, r_b
= NULL
;
756 r_a
= (*a
->reg_save
)[i
];
758 r_b
= (*b
->reg_save
)[i
];
760 if (!cfi_equal_p (r_a
, r_b
))
767 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
768 what opcode to emit. Returns the CFI opcode to effect the change, or
769 NULL if NEW_CFA == OLD_CFA. */
772 def_cfa_0 (dw_cfa_location
*old_cfa
, dw_cfa_location
*new_cfa
)
776 /* If nothing changed, no need to issue any call frame instructions. */
777 if (cfa_equal_p (old_cfa
, new_cfa
))
782 if (new_cfa
->reg
== old_cfa
->reg
&& !new_cfa
->indirect
&& !old_cfa
->indirect
)
784 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
785 the CFA register did not change but the offset did. The data
786 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
787 in the assembler via the .cfi_def_cfa_offset directive. */
788 if (new_cfa
->offset
< 0)
789 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset_sf
;
791 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset
;
792 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= new_cfa
->offset
;
794 else if (new_cfa
->offset
== old_cfa
->offset
795 && old_cfa
->reg
!= INVALID_REGNUM
796 && !new_cfa
->indirect
797 && !old_cfa
->indirect
)
799 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
800 indicating the CFA register has changed to <register> but the
801 offset has not changed. */
802 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_register
;
803 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
805 else if (new_cfa
->indirect
== 0)
807 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
808 indicating the CFA register has changed to <register> with
809 the specified offset. The data factoring for DW_CFA_def_cfa_sf
810 happens in output_cfi, or in the assembler via the .cfi_def_cfa
812 if (new_cfa
->offset
< 0)
813 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_sf
;
815 cfi
->dw_cfi_opc
= DW_CFA_def_cfa
;
816 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
817 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= new_cfa
->offset
;
821 /* Construct a DW_CFA_def_cfa_expression instruction to
822 calculate the CFA using a full location expression since no
823 register-offset pair is available. */
824 struct dw_loc_descr_node
*loc_list
;
826 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_expression
;
827 loc_list
= build_cfa_loc (new_cfa
, 0);
828 cfi
->dw_cfi_oprnd1
.dw_cfi_loc
= loc_list
;
834 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
837 def_cfa_1 (dw_cfa_location
*new_cfa
)
841 if (cur_trace
->cfa_store
.reg
== new_cfa
->reg
&& new_cfa
->indirect
== 0)
842 cur_trace
->cfa_store
.offset
= new_cfa
->offset
;
844 cfi
= def_cfa_0 (&cur_row
->cfa
, new_cfa
);
847 cur_row
->cfa
= *new_cfa
;
848 cur_row
->cfa_cfi
= (cfi
->dw_cfi_opc
== DW_CFA_def_cfa_expression
855 /* Add the CFI for saving a register. REG is the CFA column number.
856 If SREG is -1, the register is saved at OFFSET from the CFA;
857 otherwise it is saved in SREG. */
860 reg_save (unsigned int reg
, unsigned int sreg
, HOST_WIDE_INT offset
)
862 dw_fde_ref fde
= cfun
? cfun
->fde
: NULL
;
863 dw_cfi_ref cfi
= new_cfi ();
865 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
867 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
869 && fde
->stack_realign
870 && sreg
== INVALID_REGNUM
)
872 cfi
->dw_cfi_opc
= DW_CFA_expression
;
873 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
874 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
875 = build_cfa_aligned_loc (&cur_row
->cfa
, offset
,
876 fde
->stack_realignment
);
878 else if (sreg
== INVALID_REGNUM
)
880 if (need_data_align_sf_opcode (offset
))
881 cfi
->dw_cfi_opc
= DW_CFA_offset_extended_sf
;
882 else if (reg
& ~0x3f)
883 cfi
->dw_cfi_opc
= DW_CFA_offset_extended
;
885 cfi
->dw_cfi_opc
= DW_CFA_offset
;
886 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= offset
;
888 else if (sreg
== reg
)
890 /* While we could emit something like DW_CFA_same_value or
891 DW_CFA_restore, we never expect to see something like that
892 in a prologue. This is more likely to be a bug. A backend
893 can always bypass this by using REG_CFA_RESTORE directly. */
898 cfi
->dw_cfi_opc
= DW_CFA_register
;
899 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= sreg
;
903 update_row_reg_save (cur_row
, reg
, cfi
);
906 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
907 and adjust data structures to match. */
910 notice_args_size (rtx_insn
*insn
)
912 HOST_WIDE_INT args_size
, delta
;
915 note
= find_reg_note (insn
, REG_ARGS_SIZE
, NULL
);
919 args_size
= INTVAL (XEXP (note
, 0));
920 delta
= args_size
- cur_trace
->end_true_args_size
;
924 cur_trace
->end_true_args_size
= args_size
;
926 /* If the CFA is computed off the stack pointer, then we must adjust
927 the computation of the CFA as well. */
928 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
930 gcc_assert (!cur_cfa
->indirect
);
932 /* Convert a change in args_size (always a positive in the
933 direction of stack growth) to a change in stack pointer. */
934 if (!STACK_GROWS_DOWNWARD
)
937 cur_cfa
->offset
+= delta
;
941 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
942 data within the trace related to EH insns and args_size. */
945 notice_eh_throw (rtx_insn
*insn
)
947 HOST_WIDE_INT args_size
;
949 args_size
= cur_trace
->end_true_args_size
;
950 if (cur_trace
->eh_head
== NULL
)
952 cur_trace
->eh_head
= insn
;
953 cur_trace
->beg_delay_args_size
= args_size
;
954 cur_trace
->end_delay_args_size
= args_size
;
956 else if (cur_trace
->end_delay_args_size
!= args_size
)
958 cur_trace
->end_delay_args_size
= args_size
;
960 /* ??? If the CFA is the stack pointer, search backward for the last
961 CFI note and insert there. Given that the stack changed for the
962 args_size change, there *must* be such a note in between here and
964 add_cfi_args_size (args_size
);
968 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
969 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
970 used in places where rtl is prohibited. */
972 static inline unsigned
973 dwf_regno (const_rtx reg
)
975 gcc_assert (REGNO (reg
) < FIRST_PSEUDO_REGISTER
);
976 return DWARF_FRAME_REGNUM (REGNO (reg
));
979 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
982 compare_reg_or_pc (rtx x
, rtx y
)
984 if (REG_P (x
) && REG_P (y
))
985 return REGNO (x
) == REGNO (y
);
989 /* Record SRC as being saved in DEST. DEST may be null to delete an
990 existing entry. SRC may be a register or PC_RTX. */
993 record_reg_saved_in_reg (rtx dest
, rtx src
)
995 reg_saved_in_data
*elt
;
998 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, elt
)
999 if (compare_reg_or_pc (elt
->orig_reg
, src
))
1002 cur_trace
->regs_saved_in_regs
.unordered_remove (i
);
1004 elt
->saved_in_reg
= dest
;
1011 reg_saved_in_data e
= {src
, dest
};
1012 cur_trace
->regs_saved_in_regs
.safe_push (e
);
1015 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1016 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1019 queue_reg_save (rtx reg
, rtx sreg
, HOST_WIDE_INT offset
)
1022 queued_reg_save e
= {reg
, sreg
, offset
};
1025 /* Duplicates waste space, but it's also necessary to remove them
1026 for correctness, since the queue gets output in reverse order. */
1027 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1028 if (compare_reg_or_pc (q
->reg
, reg
))
1034 queued_reg_saves
.safe_push (e
);
1037 /* Output all the entries in QUEUED_REG_SAVES. */
1040 dwarf2out_flush_queued_reg_saves (void)
1045 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1047 unsigned int reg
, sreg
;
1049 record_reg_saved_in_reg (q
->saved_reg
, q
->reg
);
1051 if (q
->reg
== pc_rtx
)
1052 reg
= DWARF_FRAME_RETURN_COLUMN
;
1054 reg
= dwf_regno (q
->reg
);
1056 sreg
= dwf_regno (q
->saved_reg
);
1058 sreg
= INVALID_REGNUM
;
1059 reg_save (reg
, sreg
, q
->cfa_offset
);
1062 queued_reg_saves
.truncate (0);
1065 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1066 location for? Or, does it clobber a register which we've previously
1067 said that some other register is saved in, and for which we now
1068 have a new location for? */
1071 clobbers_queued_reg_save (const_rtx insn
)
1076 FOR_EACH_VEC_ELT (queued_reg_saves
, iq
, q
)
1079 reg_saved_in_data
*rir
;
1081 if (modified_in_p (q
->reg
, insn
))
1084 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, ir
, rir
)
1085 if (compare_reg_or_pc (q
->reg
, rir
->orig_reg
)
1086 && modified_in_p (rir
->saved_in_reg
, insn
))
1093 /* What register, if any, is currently saved in REG? */
1096 reg_saved_in (rtx reg
)
1098 unsigned int regn
= REGNO (reg
);
1100 reg_saved_in_data
*rir
;
1103 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1104 if (q
->saved_reg
&& regn
== REGNO (q
->saved_reg
))
1107 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, rir
)
1108 if (regn
== REGNO (rir
->saved_in_reg
))
1109 return rir
->orig_reg
;
1114 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1117 dwarf2out_frame_debug_def_cfa (rtx pat
)
1119 memset (cur_cfa
, 0, sizeof (*cur_cfa
));
1121 if (GET_CODE (pat
) == PLUS
)
1123 cur_cfa
->offset
= INTVAL (XEXP (pat
, 1));
1124 pat
= XEXP (pat
, 0);
1128 cur_cfa
->indirect
= 1;
1129 pat
= XEXP (pat
, 0);
1130 if (GET_CODE (pat
) == PLUS
)
1132 cur_cfa
->base_offset
= INTVAL (XEXP (pat
, 1));
1133 pat
= XEXP (pat
, 0);
1136 /* ??? If this fails, we could be calling into the _loc functions to
1137 define a full expression. So far no port does that. */
1138 gcc_assert (REG_P (pat
));
1139 cur_cfa
->reg
= dwf_regno (pat
);
1142 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1145 dwarf2out_frame_debug_adjust_cfa (rtx pat
)
1149 gcc_assert (GET_CODE (pat
) == SET
);
1150 dest
= XEXP (pat
, 0);
1151 src
= XEXP (pat
, 1);
1153 switch (GET_CODE (src
))
1156 gcc_assert (dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
);
1157 cur_cfa
->offset
-= INTVAL (XEXP (src
, 1));
1167 cur_cfa
->reg
= dwf_regno (dest
);
1168 gcc_assert (cur_cfa
->indirect
== 0);
1171 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1174 dwarf2out_frame_debug_cfa_offset (rtx set
)
1176 HOST_WIDE_INT offset
;
1177 rtx src
, addr
, span
;
1178 unsigned int sregno
;
1180 src
= XEXP (set
, 1);
1181 addr
= XEXP (set
, 0);
1182 gcc_assert (MEM_P (addr
));
1183 addr
= XEXP (addr
, 0);
1185 /* As documented, only consider extremely simple addresses. */
1186 switch (GET_CODE (addr
))
1189 gcc_assert (dwf_regno (addr
) == cur_cfa
->reg
);
1190 offset
= -cur_cfa
->offset
;
1193 gcc_assert (dwf_regno (XEXP (addr
, 0)) == cur_cfa
->reg
);
1194 offset
= INTVAL (XEXP (addr
, 1)) - cur_cfa
->offset
;
1203 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1207 span
= targetm
.dwarf_register_span (src
);
1208 sregno
= dwf_regno (src
);
1211 /* ??? We'd like to use queue_reg_save, but we need to come up with
1212 a different flushing heuristic for epilogues. */
1214 reg_save (sregno
, INVALID_REGNUM
, offset
);
1217 /* We have a PARALLEL describing where the contents of SRC live.
1218 Adjust the offset for each piece of the PARALLEL. */
1219 HOST_WIDE_INT span_offset
= offset
;
1221 gcc_assert (GET_CODE (span
) == PARALLEL
);
1223 const int par_len
= XVECLEN (span
, 0);
1224 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1226 rtx elem
= XVECEXP (span
, 0, par_index
);
1227 sregno
= dwf_regno (src
);
1228 reg_save (sregno
, INVALID_REGNUM
, span_offset
);
1229 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1234 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1237 dwarf2out_frame_debug_cfa_register (rtx set
)
1240 unsigned sregno
, dregno
;
1242 src
= XEXP (set
, 1);
1243 dest
= XEXP (set
, 0);
1245 record_reg_saved_in_reg (dest
, src
);
1247 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1249 sregno
= dwf_regno (src
);
1251 dregno
= dwf_regno (dest
);
1253 /* ??? We'd like to use queue_reg_save, but we need to come up with
1254 a different flushing heuristic for epilogues. */
1255 reg_save (sregno
, dregno
, 0);
1258 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1261 dwarf2out_frame_debug_cfa_expression (rtx set
)
1263 rtx src
, dest
, span
;
1264 dw_cfi_ref cfi
= new_cfi ();
1267 dest
= SET_DEST (set
);
1268 src
= SET_SRC (set
);
1270 gcc_assert (REG_P (src
));
1271 gcc_assert (MEM_P (dest
));
1273 span
= targetm
.dwarf_register_span (src
);
1276 regno
= dwf_regno (src
);
1278 cfi
->dw_cfi_opc
= DW_CFA_expression
;
1279 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= regno
;
1280 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1281 = mem_loc_descriptor (XEXP (dest
, 0), get_address_mode (dest
),
1282 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1284 /* ??? We'd like to use queue_reg_save, were the interface different,
1285 and, as above, we could manage flushing for epilogues. */
1287 update_row_reg_save (cur_row
, regno
, cfi
);
1290 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1293 dwarf2out_frame_debug_cfa_restore (rtx reg
)
1295 gcc_assert (REG_P (reg
));
1297 rtx span
= targetm
.dwarf_register_span (reg
);
1300 unsigned int regno
= dwf_regno (reg
);
1301 add_cfi_restore (regno
);
1302 update_row_reg_save (cur_row
, regno
, NULL
);
1306 /* We have a PARALLEL describing where the contents of REG live.
1307 Restore the register for each piece of the PARALLEL. */
1308 gcc_assert (GET_CODE (span
) == PARALLEL
);
1310 const int par_len
= XVECLEN (span
, 0);
1311 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1313 reg
= XVECEXP (span
, 0, par_index
);
1314 gcc_assert (REG_P (reg
));
1315 unsigned int regno
= dwf_regno (reg
);
1316 add_cfi_restore (regno
);
1317 update_row_reg_save (cur_row
, regno
, NULL
);
1322 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1323 ??? Perhaps we should note in the CIE where windows are saved (instead of
1324 assuming 0(cfa)) and what registers are in the window. */
1327 dwarf2out_frame_debug_cfa_window_save (void)
1329 dw_cfi_ref cfi
= new_cfi ();
1331 cfi
->dw_cfi_opc
= DW_CFA_GNU_window_save
;
1335 /* Record call frame debugging information for an expression EXPR,
1336 which either sets SP or FP (adjusting how we calculate the frame
1337 address) or saves a register to the stack or another register.
1338 LABEL indicates the address of EXPR.
1340 This function encodes a state machine mapping rtxes to actions on
1341 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1342 users need not read the source code.
1344 The High-Level Picture
1346 Changes in the register we use to calculate the CFA: Currently we
1347 assume that if you copy the CFA register into another register, we
1348 should take the other one as the new CFA register; this seems to
1349 work pretty well. If it's wrong for some target, it's simple
1350 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1352 Changes in the register we use for saving registers to the stack:
1353 This is usually SP, but not always. Again, we deduce that if you
1354 copy SP into another register (and SP is not the CFA register),
1355 then the new register is the one we will be using for register
1356 saves. This also seems to work.
1358 Register saves: There's not much guesswork about this one; if
1359 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1360 register save, and the register used to calculate the destination
1361 had better be the one we think we're using for this purpose.
1362 It's also assumed that a copy from a call-saved register to another
1363 register is saving that register if RTX_FRAME_RELATED_P is set on
1364 that instruction. If the copy is from a call-saved register to
1365 the *same* register, that means that the register is now the same
1366 value as in the caller.
1368 Except: If the register being saved is the CFA register, and the
1369 offset is nonzero, we are saving the CFA, so we assume we have to
1370 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1371 the intent is to save the value of SP from the previous frame.
1373 In addition, if a register has previously been saved to a different
1376 Invariants / Summaries of Rules
1378 cfa current rule for calculating the CFA. It usually
1379 consists of a register and an offset. This is
1380 actually stored in *cur_cfa, but abbreviated
1381 for the purposes of this documentation.
1382 cfa_store register used by prologue code to save things to the stack
1383 cfa_store.offset is the offset from the value of
1384 cfa_store.reg to the actual CFA
1385 cfa_temp register holding an integral value. cfa_temp.offset
1386 stores the value, which will be used to adjust the
1387 stack pointer. cfa_temp is also used like cfa_store,
1388 to track stores to the stack via fp or a temp reg.
1390 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1391 with cfa.reg as the first operand changes the cfa.reg and its
1392 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1395 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1396 expression yielding a constant. This sets cfa_temp.reg
1397 and cfa_temp.offset.
1399 Rule 5: Create a new register cfa_store used to save items to the
1402 Rules 10-14: Save a register to the stack. Define offset as the
1403 difference of the original location and cfa_store's
1404 location (or cfa_temp's location if cfa_temp is used).
1406 Rules 16-20: If AND operation happens on sp in prologue, we assume
1407 stack is realigned. We will use a group of DW_OP_XXX
1408 expressions to represent the location of the stored
1409 register instead of CFA+offset.
1413 "{a,b}" indicates a choice of a xor b.
1414 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1417 (set <reg1> <reg2>:cfa.reg)
1418 effects: cfa.reg = <reg1>
1419 cfa.offset unchanged
1420 cfa_temp.reg = <reg1>
1421 cfa_temp.offset = cfa.offset
1424 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1425 {<const_int>,<reg>:cfa_temp.reg}))
1426 effects: cfa.reg = sp if fp used
1427 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1428 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1429 if cfa_store.reg==sp
1432 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1433 effects: cfa.reg = fp
1434 cfa_offset += +/- <const_int>
1437 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1438 constraints: <reg1> != fp
1440 effects: cfa.reg = <reg1>
1441 cfa_temp.reg = <reg1>
1442 cfa_temp.offset = cfa.offset
1445 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1446 constraints: <reg1> != fp
1448 effects: cfa_store.reg = <reg1>
1449 cfa_store.offset = cfa.offset - cfa_temp.offset
1452 (set <reg> <const_int>)
1453 effects: cfa_temp.reg = <reg>
1454 cfa_temp.offset = <const_int>
1457 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1458 effects: cfa_temp.reg = <reg1>
1459 cfa_temp.offset |= <const_int>
1462 (set <reg> (high <exp>))
1466 (set <reg> (lo_sum <exp> <const_int>))
1467 effects: cfa_temp.reg = <reg>
1468 cfa_temp.offset = <const_int>
1471 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1472 effects: cfa_store.offset -= <const_int>
1473 cfa.offset = cfa_store.offset if cfa.reg == sp
1475 cfa.base_offset = -cfa_store.offset
1478 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1479 effects: cfa_store.offset += -/+ mode_size(mem)
1480 cfa.offset = cfa_store.offset if cfa.reg == sp
1482 cfa.base_offset = -cfa_store.offset
1485 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1488 effects: cfa.reg = <reg1>
1489 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1492 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1493 effects: cfa.reg = <reg1>
1494 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1497 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1498 effects: cfa.reg = <reg1>
1499 cfa.base_offset = -cfa_temp.offset
1500 cfa_temp.offset -= mode_size(mem)
1503 (set <reg> {unspec, unspec_volatile})
1504 effects: target-dependent
1507 (set sp (and: sp <const_int>))
1508 constraints: cfa_store.reg == sp
1509 effects: cfun->fde.stack_realign = 1
1510 cfa_store.offset = 0
1511 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1514 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1515 effects: cfa_store.offset += -/+ mode_size(mem)
1518 (set (mem ({pre_inc, pre_dec} sp)) fp)
1519 constraints: fde->stack_realign == 1
1520 effects: cfa_store.offset = 0
1521 cfa.reg != HARD_FRAME_POINTER_REGNUM
1524 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1525 constraints: fde->stack_realign == 1
1527 && cfa.indirect == 0
1528 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1529 effects: Use DW_CFA_def_cfa_expression to define cfa
1530 cfa.reg == fde->drap_reg */
1533 dwarf2out_frame_debug_expr (rtx expr
)
1535 rtx src
, dest
, span
;
1536 HOST_WIDE_INT offset
;
1539 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1540 the PARALLEL independently. The first element is always processed if
1541 it is a SET. This is for backward compatibility. Other elements
1542 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1543 flag is set in them. */
1544 if (GET_CODE (expr
) == PARALLEL
|| GET_CODE (expr
) == SEQUENCE
)
1547 int limit
= XVECLEN (expr
, 0);
1550 /* PARALLELs have strict read-modify-write semantics, so we
1551 ought to evaluate every rvalue before changing any lvalue.
1552 It's cumbersome to do that in general, but there's an
1553 easy approximation that is enough for all current users:
1554 handle register saves before register assignments. */
1555 if (GET_CODE (expr
) == PARALLEL
)
1556 for (par_index
= 0; par_index
< limit
; par_index
++)
1558 elem
= XVECEXP (expr
, 0, par_index
);
1559 if (GET_CODE (elem
) == SET
1560 && MEM_P (SET_DEST (elem
))
1561 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1562 dwarf2out_frame_debug_expr (elem
);
1565 for (par_index
= 0; par_index
< limit
; par_index
++)
1567 elem
= XVECEXP (expr
, 0, par_index
);
1568 if (GET_CODE (elem
) == SET
1569 && (!MEM_P (SET_DEST (elem
)) || GET_CODE (expr
) == SEQUENCE
)
1570 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1571 dwarf2out_frame_debug_expr (elem
);
1576 gcc_assert (GET_CODE (expr
) == SET
);
1578 src
= SET_SRC (expr
);
1579 dest
= SET_DEST (expr
);
1583 rtx rsi
= reg_saved_in (src
);
1590 switch (GET_CODE (dest
))
1593 switch (GET_CODE (src
))
1595 /* Setting FP from SP. */
1597 if (cur_cfa
->reg
== dwf_regno (src
))
1600 /* Update the CFA rule wrt SP or FP. Make sure src is
1601 relative to the current CFA register.
1603 We used to require that dest be either SP or FP, but the
1604 ARM copies SP to a temporary register, and from there to
1605 FP. So we just rely on the backends to only set
1606 RTX_FRAME_RELATED_P on appropriate insns. */
1607 cur_cfa
->reg
= dwf_regno (dest
);
1608 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1609 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1613 /* Saving a register in a register. */
1614 gcc_assert (!fixed_regs
[REGNO (dest
)]
1615 /* For the SPARC and its register window. */
1616 || (dwf_regno (src
) == DWARF_FRAME_RETURN_COLUMN
));
1618 /* After stack is aligned, we can only save SP in FP
1619 if drap register is used. In this case, we have
1620 to restore stack pointer with the CFA value and we
1621 don't generate this DWARF information. */
1623 && fde
->stack_realign
1624 && REGNO (src
) == STACK_POINTER_REGNUM
)
1625 gcc_assert (REGNO (dest
) == HARD_FRAME_POINTER_REGNUM
1626 && fde
->drap_reg
!= INVALID_REGNUM
1627 && cur_cfa
->reg
!= dwf_regno (src
));
1629 queue_reg_save (src
, dest
, 0);
1636 if (dest
== stack_pointer_rtx
)
1640 switch (GET_CODE (XEXP (src
, 1)))
1643 offset
= INTVAL (XEXP (src
, 1));
1646 gcc_assert (dwf_regno (XEXP (src
, 1))
1647 == cur_trace
->cfa_temp
.reg
);
1648 offset
= cur_trace
->cfa_temp
.offset
;
1654 if (XEXP (src
, 0) == hard_frame_pointer_rtx
)
1656 /* Restoring SP from FP in the epilogue. */
1657 gcc_assert (cur_cfa
->reg
== dw_frame_pointer_regnum
);
1658 cur_cfa
->reg
= dw_stack_pointer_regnum
;
1660 else if (GET_CODE (src
) == LO_SUM
)
1661 /* Assume we've set the source reg of the LO_SUM from sp. */
1664 gcc_assert (XEXP (src
, 0) == stack_pointer_rtx
);
1666 if (GET_CODE (src
) != MINUS
)
1668 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1669 cur_cfa
->offset
+= offset
;
1670 if (cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
)
1671 cur_trace
->cfa_store
.offset
+= offset
;
1673 else if (dest
== hard_frame_pointer_rtx
)
1676 /* Either setting the FP from an offset of the SP,
1677 or adjusting the FP */
1678 gcc_assert (frame_pointer_needed
);
1680 gcc_assert (REG_P (XEXP (src
, 0))
1681 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1682 && CONST_INT_P (XEXP (src
, 1)));
1683 offset
= INTVAL (XEXP (src
, 1));
1684 if (GET_CODE (src
) != MINUS
)
1686 cur_cfa
->offset
+= offset
;
1687 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1691 gcc_assert (GET_CODE (src
) != MINUS
);
1694 if (REG_P (XEXP (src
, 0))
1695 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1696 && CONST_INT_P (XEXP (src
, 1)))
1698 /* Setting a temporary CFA register that will be copied
1699 into the FP later on. */
1700 offset
= - INTVAL (XEXP (src
, 1));
1701 cur_cfa
->offset
+= offset
;
1702 cur_cfa
->reg
= dwf_regno (dest
);
1703 /* Or used to save regs to the stack. */
1704 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1705 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1709 else if (REG_P (XEXP (src
, 0))
1710 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1711 && XEXP (src
, 1) == stack_pointer_rtx
)
1713 /* Setting a scratch register that we will use instead
1714 of SP for saving registers to the stack. */
1715 gcc_assert (cur_cfa
->reg
== dw_stack_pointer_regnum
);
1716 cur_trace
->cfa_store
.reg
= dwf_regno (dest
);
1717 cur_trace
->cfa_store
.offset
1718 = cur_cfa
->offset
- cur_trace
->cfa_temp
.offset
;
1722 else if (GET_CODE (src
) == LO_SUM
1723 && CONST_INT_P (XEXP (src
, 1)))
1725 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1726 cur_trace
->cfa_temp
.offset
= INTVAL (XEXP (src
, 1));
1735 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1736 cur_trace
->cfa_temp
.offset
= INTVAL (src
);
1741 gcc_assert (REG_P (XEXP (src
, 0))
1742 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1743 && CONST_INT_P (XEXP (src
, 1)));
1745 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1746 cur_trace
->cfa_temp
.offset
|= INTVAL (XEXP (src
, 1));
1749 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1750 which will fill in all of the bits. */
1757 case UNSPEC_VOLATILE
:
1758 /* All unspecs should be represented by REG_CFA_* notes. */
1764 /* If this AND operation happens on stack pointer in prologue,
1765 we assume the stack is realigned and we extract the
1767 if (fde
&& XEXP (src
, 0) == stack_pointer_rtx
)
1769 /* We interpret reg_save differently with stack_realign set.
1770 Thus we must flush whatever we have queued first. */
1771 dwarf2out_flush_queued_reg_saves ();
1773 gcc_assert (cur_trace
->cfa_store
.reg
1774 == dwf_regno (XEXP (src
, 0)));
1775 fde
->stack_realign
= 1;
1776 fde
->stack_realignment
= INTVAL (XEXP (src
, 1));
1777 cur_trace
->cfa_store
.offset
= 0;
1779 if (cur_cfa
->reg
!= dw_stack_pointer_regnum
1780 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1781 fde
->drap_reg
= cur_cfa
->reg
;
1792 /* Saving a register to the stack. Make sure dest is relative to the
1794 switch (GET_CODE (XEXP (dest
, 0)))
1800 /* We can't handle variable size modifications. */
1801 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest
, 0), 1), 1))
1803 offset
= -INTVAL (XEXP (XEXP (XEXP (dest
, 0), 1), 1));
1805 gcc_assert (REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
1806 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1808 cur_trace
->cfa_store
.offset
+= offset
;
1809 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1810 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1812 if (GET_CODE (XEXP (dest
, 0)) == POST_MODIFY
)
1813 offset
-= cur_trace
->cfa_store
.offset
;
1815 offset
= -cur_trace
->cfa_store
.offset
;
1822 offset
= GET_MODE_SIZE (GET_MODE (dest
));
1823 if (GET_CODE (XEXP (dest
, 0)) == PRE_INC
)
1826 gcc_assert ((REGNO (XEXP (XEXP (dest
, 0), 0))
1827 == STACK_POINTER_REGNUM
)
1828 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1830 cur_trace
->cfa_store
.offset
+= offset
;
1832 /* Rule 18: If stack is aligned, we will use FP as a
1833 reference to represent the address of the stored
1836 && fde
->stack_realign
1838 && REGNO (src
) == HARD_FRAME_POINTER_REGNUM
)
1840 gcc_assert (cur_cfa
->reg
!= dw_frame_pointer_regnum
);
1841 cur_trace
->cfa_store
.offset
= 0;
1844 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1845 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1847 if (GET_CODE (XEXP (dest
, 0)) == POST_DEC
)
1848 offset
+= -cur_trace
->cfa_store
.offset
;
1850 offset
= -cur_trace
->cfa_store
.offset
;
1854 /* With an offset. */
1861 gcc_assert (CONST_INT_P (XEXP (XEXP (dest
, 0), 1))
1862 && REG_P (XEXP (XEXP (dest
, 0), 0)));
1863 offset
= INTVAL (XEXP (XEXP (dest
, 0), 1));
1864 if (GET_CODE (XEXP (dest
, 0)) == MINUS
)
1867 regno
= dwf_regno (XEXP (XEXP (dest
, 0), 0));
1869 if (cur_cfa
->reg
== regno
)
1870 offset
-= cur_cfa
->offset
;
1871 else if (cur_trace
->cfa_store
.reg
== regno
)
1872 offset
-= cur_trace
->cfa_store
.offset
;
1875 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1876 offset
-= cur_trace
->cfa_temp
.offset
;
1882 /* Without an offset. */
1885 unsigned int regno
= dwf_regno (XEXP (dest
, 0));
1887 if (cur_cfa
->reg
== regno
)
1888 offset
= -cur_cfa
->offset
;
1889 else if (cur_trace
->cfa_store
.reg
== regno
)
1890 offset
= -cur_trace
->cfa_store
.offset
;
1893 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1894 offset
= -cur_trace
->cfa_temp
.offset
;
1901 gcc_assert (cur_trace
->cfa_temp
.reg
1902 == dwf_regno (XEXP (XEXP (dest
, 0), 0)));
1903 offset
= -cur_trace
->cfa_temp
.offset
;
1904 cur_trace
->cfa_temp
.offset
-= GET_MODE_SIZE (GET_MODE (dest
));
1912 /* If the source operand of this MEM operation is a memory,
1913 we only care how much stack grew. */
1918 && REGNO (src
) != STACK_POINTER_REGNUM
1919 && REGNO (src
) != HARD_FRAME_POINTER_REGNUM
1920 && dwf_regno (src
) == cur_cfa
->reg
)
1922 /* We're storing the current CFA reg into the stack. */
1924 if (cur_cfa
->offset
== 0)
1927 /* If stack is aligned, putting CFA reg into stack means
1928 we can no longer use reg + offset to represent CFA.
1929 Here we use DW_CFA_def_cfa_expression instead. The
1930 result of this expression equals to the original CFA
1933 && fde
->stack_realign
1934 && cur_cfa
->indirect
== 0
1935 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1937 gcc_assert (fde
->drap_reg
== cur_cfa
->reg
);
1939 cur_cfa
->indirect
= 1;
1940 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1941 cur_cfa
->base_offset
= offset
;
1942 cur_cfa
->offset
= 0;
1944 fde
->drap_reg_saved
= 1;
1948 /* If the source register is exactly the CFA, assume
1949 we're saving SP like any other register; this happens
1951 queue_reg_save (stack_pointer_rtx
, NULL_RTX
, offset
);
1956 /* Otherwise, we'll need to look in the stack to
1957 calculate the CFA. */
1958 rtx x
= XEXP (dest
, 0);
1962 gcc_assert (REG_P (x
));
1964 cur_cfa
->reg
= dwf_regno (x
);
1965 cur_cfa
->base_offset
= offset
;
1966 cur_cfa
->indirect
= 1;
1972 span
= targetm
.dwarf_register_span (src
);
1977 queue_reg_save (src
, NULL_RTX
, offset
);
1980 /* We have a PARALLEL describing where the contents of SRC live.
1981 Queue register saves for each piece of the PARALLEL. */
1982 HOST_WIDE_INT span_offset
= offset
;
1984 gcc_assert (GET_CODE (span
) == PARALLEL
);
1986 const int par_len
= XVECLEN (span
, 0);
1987 for (int par_index
= 0; par_index
< par_len
; par_index
++)
1989 rtx elem
= XVECEXP (span
, 0, par_index
);
1990 queue_reg_save (elem
, NULL_RTX
, span_offset
);
1991 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
2001 /* Record call frame debugging information for INSN, which either sets
2002 SP or FP (adjusting how we calculate the frame address) or saves a
2003 register to the stack. */
2006 dwarf2out_frame_debug (rtx_insn
*insn
)
2009 bool handled_one
= false;
2011 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
2012 switch (REG_NOTE_KIND (note
))
2014 case REG_FRAME_RELATED_EXPR
:
2015 pat
= XEXP (note
, 0);
2018 case REG_CFA_DEF_CFA
:
2019 dwarf2out_frame_debug_def_cfa (XEXP (note
, 0));
2023 case REG_CFA_ADJUST_CFA
:
2028 if (GET_CODE (n
) == PARALLEL
)
2029 n
= XVECEXP (n
, 0, 0);
2031 dwarf2out_frame_debug_adjust_cfa (n
);
2035 case REG_CFA_OFFSET
:
2038 n
= single_set (insn
);
2039 dwarf2out_frame_debug_cfa_offset (n
);
2043 case REG_CFA_REGISTER
:
2048 if (GET_CODE (n
) == PARALLEL
)
2049 n
= XVECEXP (n
, 0, 0);
2051 dwarf2out_frame_debug_cfa_register (n
);
2055 case REG_CFA_EXPRESSION
:
2058 n
= single_set (insn
);
2059 dwarf2out_frame_debug_cfa_expression (n
);
2063 case REG_CFA_RESTORE
:
2068 if (GET_CODE (n
) == PARALLEL
)
2069 n
= XVECEXP (n
, 0, 0);
2072 dwarf2out_frame_debug_cfa_restore (n
);
2076 case REG_CFA_SET_VDRAP
:
2080 dw_fde_ref fde
= cfun
->fde
;
2083 gcc_assert (fde
->vdrap_reg
== INVALID_REGNUM
);
2085 fde
->vdrap_reg
= dwf_regno (n
);
2091 case REG_CFA_WINDOW_SAVE
:
2092 dwarf2out_frame_debug_cfa_window_save ();
2096 case REG_CFA_FLUSH_QUEUE
:
2097 /* The actual flush happens elsewhere. */
2107 pat
= PATTERN (insn
);
2109 dwarf2out_frame_debug_expr (pat
);
2111 /* Check again. A parallel can save and update the same register.
2112 We could probably check just once, here, but this is safer than
2113 removing the check at the start of the function. */
2114 if (clobbers_queued_reg_save (pat
))
2115 dwarf2out_flush_queued_reg_saves ();
2119 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2122 change_cfi_row (dw_cfi_row
*old_row
, dw_cfi_row
*new_row
)
2124 size_t i
, n_old
, n_new
, n_max
;
2127 if (new_row
->cfa_cfi
&& !cfi_equal_p (old_row
->cfa_cfi
, new_row
->cfa_cfi
))
2128 add_cfi (new_row
->cfa_cfi
);
2131 cfi
= def_cfa_0 (&old_row
->cfa
, &new_row
->cfa
);
2136 n_old
= vec_safe_length (old_row
->reg_save
);
2137 n_new
= vec_safe_length (new_row
->reg_save
);
2138 n_max
= MAX (n_old
, n_new
);
2140 for (i
= 0; i
< n_max
; ++i
)
2142 dw_cfi_ref r_old
= NULL
, r_new
= NULL
;
2145 r_old
= (*old_row
->reg_save
)[i
];
2147 r_new
= (*new_row
->reg_save
)[i
];
2151 else if (r_new
== NULL
)
2152 add_cfi_restore (i
);
2153 else if (!cfi_equal_p (r_old
, r_new
))
2158 /* Examine CFI and return true if a cfi label and set_loc is needed
2159 beforehand. Even when generating CFI assembler instructions, we
2160 still have to add the cfi to the list so that lookup_cfa_1 works
2161 later on. When -g2 and above we even need to force emitting of
2162 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2163 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2164 and so don't use convert_cfa_to_fb_loc_list. */
2167 cfi_label_required_p (dw_cfi_ref cfi
)
2169 if (!dwarf2out_do_cfi_asm ())
2172 if (dwarf_version
== 2
2173 && debug_info_level
> DINFO_LEVEL_TERSE
2174 && (write_symbols
== DWARF2_DEBUG
2175 || write_symbols
== VMS_AND_DWARF2_DEBUG
))
2177 switch (cfi
->dw_cfi_opc
)
2179 case DW_CFA_def_cfa_offset
:
2180 case DW_CFA_def_cfa_offset_sf
:
2181 case DW_CFA_def_cfa_register
:
2182 case DW_CFA_def_cfa
:
2183 case DW_CFA_def_cfa_sf
:
2184 case DW_CFA_def_cfa_expression
:
2185 case DW_CFA_restore_state
:
2194 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2195 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2198 add_cfis_to_fde (void)
2200 dw_fde_ref fde
= cfun
->fde
;
2201 rtx_insn
*insn
, *next
;
2202 /* We always start with a function_begin label. */
2205 for (insn
= get_insns (); insn
; insn
= next
)
2207 next
= NEXT_INSN (insn
);
2209 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2211 fde
->dw_fde_switch_cfi_index
= vec_safe_length (fde
->dw_fde_cfi
);
2212 /* Don't attempt to advance_loc4 between labels
2213 in different sections. */
2217 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2219 bool required
= cfi_label_required_p (NOTE_CFI (insn
));
2221 if (NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CFI
)
2223 required
|= cfi_label_required_p (NOTE_CFI (next
));
2224 next
= NEXT_INSN (next
);
2226 else if (active_insn_p (next
)
2227 || (NOTE_P (next
) && (NOTE_KIND (next
)
2228 == NOTE_INSN_SWITCH_TEXT_SECTIONS
)))
2231 next
= NEXT_INSN (next
);
2234 int num
= dwarf2out_cfi_label_num
;
2235 const char *label
= dwarf2out_cfi_label ();
2238 /* Set the location counter to the new label. */
2240 xcfi
->dw_cfi_opc
= (first
? DW_CFA_set_loc
2241 : DW_CFA_advance_loc4
);
2242 xcfi
->dw_cfi_oprnd1
.dw_cfi_addr
= label
;
2243 vec_safe_push (fde
->dw_fde_cfi
, xcfi
);
2245 rtx_note
*tmp
= emit_note_before (NOTE_INSN_CFI_LABEL
, insn
);
2246 NOTE_LABEL_NUMBER (tmp
) = num
;
2251 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2252 vec_safe_push (fde
->dw_fde_cfi
, NOTE_CFI (insn
));
2253 insn
= NEXT_INSN (insn
);
2255 while (insn
!= next
);
2261 /* If LABEL is the start of a trace, then initialize the state of that
2262 trace from CUR_TRACE and CUR_ROW. */
2265 maybe_record_trace_start (rtx_insn
*start
, rtx_insn
*origin
)
2268 HOST_WIDE_INT args_size
;
2270 ti
= get_trace_info (start
);
2271 gcc_assert (ti
!= NULL
);
2275 fprintf (dump_file
, " saw edge from trace %u to %u (via %s %d)\n",
2276 cur_trace
->id
, ti
->id
,
2277 (origin
? rtx_name
[(int) GET_CODE (origin
)] : "fallthru"),
2278 (origin
? INSN_UID (origin
) : 0));
2281 args_size
= cur_trace
->end_true_args_size
;
2282 if (ti
->beg_row
== NULL
)
2284 /* This is the first time we've encountered this trace. Propagate
2285 state across the edge and push the trace onto the work list. */
2286 ti
->beg_row
= copy_cfi_row (cur_row
);
2287 ti
->beg_true_args_size
= args_size
;
2289 ti
->cfa_store
= cur_trace
->cfa_store
;
2290 ti
->cfa_temp
= cur_trace
->cfa_temp
;
2291 ti
->regs_saved_in_regs
= cur_trace
->regs_saved_in_regs
.copy ();
2293 trace_work_list
.safe_push (ti
);
2296 fprintf (dump_file
, "\tpush trace %u to worklist\n", ti
->id
);
2301 /* We ought to have the same state incoming to a given trace no
2302 matter how we arrive at the trace. Anything else means we've
2303 got some kind of optimization error. */
2304 gcc_checking_assert (cfi_row_equal_p (cur_row
, ti
->beg_row
));
2306 /* The args_size is allowed to conflict if it isn't actually used. */
2307 if (ti
->beg_true_args_size
!= args_size
)
2308 ti
->args_size_undefined
= true;
2312 /* Similarly, but handle the args_size and CFA reset across EH
2313 and non-local goto edges. */
2316 maybe_record_trace_start_abnormal (rtx_insn
*start
, rtx_insn
*origin
)
2318 HOST_WIDE_INT save_args_size
, delta
;
2319 dw_cfa_location save_cfa
;
2321 save_args_size
= cur_trace
->end_true_args_size
;
2322 if (save_args_size
== 0)
2324 maybe_record_trace_start (start
, origin
);
2328 delta
= -save_args_size
;
2329 cur_trace
->end_true_args_size
= 0;
2331 save_cfa
= cur_row
->cfa
;
2332 if (cur_row
->cfa
.reg
== dw_stack_pointer_regnum
)
2334 /* Convert a change in args_size (always a positive in the
2335 direction of stack growth) to a change in stack pointer. */
2336 if (!STACK_GROWS_DOWNWARD
)
2339 cur_row
->cfa
.offset
+= delta
;
2342 maybe_record_trace_start (start
, origin
);
2344 cur_trace
->end_true_args_size
= save_args_size
;
2345 cur_row
->cfa
= save_cfa
;
2348 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2349 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2352 create_trace_edges (rtx_insn
*insn
)
2359 rtx_jump_table_data
*table
;
2361 if (find_reg_note (insn
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
2364 if (tablejump_p (insn
, NULL
, &table
))
2366 rtvec vec
= table
->get_labels ();
2368 n
= GET_NUM_ELEM (vec
);
2369 for (i
= 0; i
< n
; ++i
)
2371 rtx_insn
*lab
= as_a
<rtx_insn
*> (XEXP (RTVEC_ELT (vec
, i
), 0));
2372 maybe_record_trace_start (lab
, insn
);
2375 else if (computed_jump_p (insn
))
2377 for (rtx_insn_list
*lab
= forced_labels
; lab
; lab
= lab
->next ())
2378 maybe_record_trace_start (lab
->insn (), insn
);
2380 else if (returnjump_p (insn
))
2382 else if ((tmp
= extract_asm_operands (PATTERN (insn
))) != NULL
)
2384 n
= ASM_OPERANDS_LABEL_LENGTH (tmp
);
2385 for (i
= 0; i
< n
; ++i
)
2388 as_a
<rtx_insn
*> (XEXP (ASM_OPERANDS_LABEL (tmp
, i
), 0));
2389 maybe_record_trace_start (lab
, insn
);
2394 rtx_insn
*lab
= JUMP_LABEL_AS_INSN (insn
);
2395 gcc_assert (lab
!= NULL
);
2396 maybe_record_trace_start (lab
, insn
);
2399 else if (CALL_P (insn
))
2401 /* Sibling calls don't have edges inside this function. */
2402 if (SIBLING_CALL_P (insn
))
2405 /* Process non-local goto edges. */
2406 if (can_nonlocal_goto (insn
))
2407 for (rtx_insn_list
*lab
= nonlocal_goto_handler_labels
;
2410 maybe_record_trace_start_abnormal (lab
->insn (), insn
);
2412 else if (rtx_sequence
*seq
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2414 int i
, n
= seq
->len ();
2415 for (i
= 0; i
< n
; ++i
)
2416 create_trace_edges (seq
->insn (i
));
2420 /* Process EH edges. */
2421 if (CALL_P (insn
) || cfun
->can_throw_non_call_exceptions
)
2423 eh_landing_pad lp
= get_eh_landing_pad_from_rtx (insn
);
2425 maybe_record_trace_start_abnormal (lp
->landing_pad
, insn
);
2429 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2432 scan_insn_after (rtx_insn
*insn
)
2434 if (RTX_FRAME_RELATED_P (insn
))
2435 dwarf2out_frame_debug (insn
);
2436 notice_args_size (insn
);
2439 /* Scan the trace beginning at INSN and create the CFI notes for the
2440 instructions therein. */
2443 scan_trace (dw_trace_info
*trace
)
2445 rtx_insn
*prev
, *insn
= trace
->head
;
2446 dw_cfa_location this_cfa
;
2449 fprintf (dump_file
, "Processing trace %u : start at %s %d\n",
2450 trace
->id
, rtx_name
[(int) GET_CODE (insn
)],
2453 trace
->end_row
= copy_cfi_row (trace
->beg_row
);
2454 trace
->end_true_args_size
= trace
->beg_true_args_size
;
2457 cur_row
= trace
->end_row
;
2459 this_cfa
= cur_row
->cfa
;
2460 cur_cfa
= &this_cfa
;
2462 for (prev
= insn
, insn
= NEXT_INSN (insn
);
2464 prev
= insn
, insn
= NEXT_INSN (insn
))
2468 /* Do everything that happens "before" the insn. */
2469 add_cfi_insn
= prev
;
2471 /* Notice the end of a trace. */
2472 if (BARRIER_P (insn
))
2474 /* Don't bother saving the unneeded queued registers at all. */
2475 queued_reg_saves
.truncate (0);
2478 if (save_point_p (insn
))
2480 /* Propagate across fallthru edges. */
2481 dwarf2out_flush_queued_reg_saves ();
2482 maybe_record_trace_start (insn
, NULL
);
2486 if (DEBUG_INSN_P (insn
) || !inside_basic_block_p (insn
))
2489 /* Handle all changes to the row state. Sequences require special
2490 handling for the positioning of the notes. */
2491 if (rtx_sequence
*pat
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
2494 int i
, n
= pat
->len ();
2496 control
= pat
->insn (0);
2497 if (can_throw_internal (control
))
2498 notice_eh_throw (control
);
2499 dwarf2out_flush_queued_reg_saves ();
2501 if (JUMP_P (control
) && INSN_ANNULLED_BRANCH_P (control
))
2503 /* ??? Hopefully multiple delay slots are not annulled. */
2504 gcc_assert (n
== 2);
2505 gcc_assert (!RTX_FRAME_RELATED_P (control
));
2506 gcc_assert (!find_reg_note (control
, REG_ARGS_SIZE
, NULL
));
2508 elt
= pat
->insn (1);
2510 if (INSN_FROM_TARGET_P (elt
))
2512 HOST_WIDE_INT restore_args_size
;
2513 cfi_vec save_row_reg_save
;
2515 /* If ELT is an instruction from target of an annulled
2516 branch, the effects are for the target only and so
2517 the args_size and CFA along the current path
2518 shouldn't change. */
2519 add_cfi_insn
= NULL
;
2520 restore_args_size
= cur_trace
->end_true_args_size
;
2521 cur_cfa
= &cur_row
->cfa
;
2522 save_row_reg_save
= vec_safe_copy (cur_row
->reg_save
);
2524 scan_insn_after (elt
);
2526 /* ??? Should we instead save the entire row state? */
2527 gcc_assert (!queued_reg_saves
.length ());
2529 create_trace_edges (control
);
2531 cur_trace
->end_true_args_size
= restore_args_size
;
2532 cur_row
->cfa
= this_cfa
;
2533 cur_row
->reg_save
= save_row_reg_save
;
2534 cur_cfa
= &this_cfa
;
2538 /* If ELT is a annulled branch-taken instruction (i.e.
2539 executed only when branch is not taken), the args_size
2540 and CFA should not change through the jump. */
2541 create_trace_edges (control
);
2543 /* Update and continue with the trace. */
2544 add_cfi_insn
= insn
;
2545 scan_insn_after (elt
);
2546 def_cfa_1 (&this_cfa
);
2551 /* The insns in the delay slot should all be considered to happen
2552 "before" a call insn. Consider a call with a stack pointer
2553 adjustment in the delay slot. The backtrace from the callee
2554 should include the sp adjustment. Unfortunately, that leaves
2555 us with an unavoidable unwinding error exactly at the call insn
2556 itself. For jump insns we'd prefer to avoid this error by
2557 placing the notes after the sequence. */
2558 if (JUMP_P (control
))
2559 add_cfi_insn
= insn
;
2561 for (i
= 1; i
< n
; ++i
)
2563 elt
= pat
->insn (i
);
2564 scan_insn_after (elt
);
2567 /* Make sure any register saves are visible at the jump target. */
2568 dwarf2out_flush_queued_reg_saves ();
2569 any_cfis_emitted
= false;
2571 /* However, if there is some adjustment on the call itself, e.g.
2572 a call_pop, that action should be considered to happen after
2573 the call returns. */
2574 add_cfi_insn
= insn
;
2575 scan_insn_after (control
);
2579 /* Flush data before calls and jumps, and of course if necessary. */
2580 if (can_throw_internal (insn
))
2582 notice_eh_throw (insn
);
2583 dwarf2out_flush_queued_reg_saves ();
2585 else if (!NONJUMP_INSN_P (insn
)
2586 || clobbers_queued_reg_save (insn
)
2587 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2588 dwarf2out_flush_queued_reg_saves ();
2589 any_cfis_emitted
= false;
2591 add_cfi_insn
= insn
;
2592 scan_insn_after (insn
);
2596 /* Between frame-related-p and args_size we might have otherwise
2597 emitted two cfa adjustments. Do it now. */
2598 def_cfa_1 (&this_cfa
);
2600 /* Minimize the number of advances by emitting the entire queue
2601 once anything is emitted. */
2602 if (any_cfis_emitted
2603 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2604 dwarf2out_flush_queued_reg_saves ();
2606 /* Note that a test for control_flow_insn_p does exactly the
2607 same tests as are done to actually create the edges. So
2608 always call the routine and let it not create edges for
2609 non-control-flow insns. */
2610 create_trace_edges (control
);
2613 add_cfi_insn
= NULL
;
2619 /* Scan the function and create the initial set of CFI notes. */
2622 create_cfi_notes (void)
2626 gcc_checking_assert (!queued_reg_saves
.exists ());
2627 gcc_checking_assert (!trace_work_list
.exists ());
2629 /* Always begin at the entry trace. */
2630 ti
= &trace_info
[0];
2633 while (!trace_work_list
.is_empty ())
2635 ti
= trace_work_list
.pop ();
2639 queued_reg_saves
.release ();
2640 trace_work_list
.release ();
2643 /* Return the insn before the first NOTE_INSN_CFI after START. */
2646 before_next_cfi_note (rtx_insn
*start
)
2648 rtx_insn
*prev
= start
;
2651 if (NOTE_P (start
) && NOTE_KIND (start
) == NOTE_INSN_CFI
)
2654 start
= NEXT_INSN (start
);
2659 /* Insert CFI notes between traces to properly change state between them. */
2662 connect_traces (void)
2664 unsigned i
, n
= trace_info
.length ();
2665 dw_trace_info
*prev_ti
, *ti
;
2667 /* ??? Ideally, we should have both queued and processed every trace.
2668 However the current representation of constant pools on various targets
2669 is indistinguishable from unreachable code. Assume for the moment that
2670 we can simply skip over such traces. */
2671 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2672 these are not "real" instructions, and should not be considered.
2673 This could be generically useful for tablejump data as well. */
2674 /* Remove all unprocessed traces from the list. */
2675 for (i
= n
- 1; i
> 0; --i
)
2677 ti
= &trace_info
[i
];
2678 if (ti
->beg_row
== NULL
)
2680 trace_info
.ordered_remove (i
);
2684 gcc_assert (ti
->end_row
!= NULL
);
2687 /* Work from the end back to the beginning. This lets us easily insert
2688 remember/restore_state notes in the correct order wrt other notes. */
2689 prev_ti
= &trace_info
[n
- 1];
2690 for (i
= n
- 1; i
> 0; --i
)
2692 dw_cfi_row
*old_row
;
2695 prev_ti
= &trace_info
[i
- 1];
2697 add_cfi_insn
= ti
->head
;
2699 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2700 for the portion of the function in the alternate text
2701 section. The row state at the very beginning of that
2702 new FDE will be exactly the row state from the CIE. */
2703 if (ti
->switch_sections
)
2704 old_row
= cie_cfi_row
;
2707 old_row
= prev_ti
->end_row
;
2708 /* If there's no change from the previous end state, fine. */
2709 if (cfi_row_equal_p (old_row
, ti
->beg_row
))
2711 /* Otherwise check for the common case of sharing state with
2712 the beginning of an epilogue, but not the end. Insert
2713 remember/restore opcodes in that case. */
2714 else if (cfi_row_equal_p (prev_ti
->beg_row
, ti
->beg_row
))
2718 /* Note that if we blindly insert the remember at the
2719 start of the trace, we can wind up increasing the
2720 size of the unwind info due to extra advance opcodes.
2721 Instead, put the remember immediately before the next
2722 state change. We know there must be one, because the
2723 state at the beginning and head of the trace differ. */
2724 add_cfi_insn
= before_next_cfi_note (prev_ti
->head
);
2726 cfi
->dw_cfi_opc
= DW_CFA_remember_state
;
2729 add_cfi_insn
= ti
->head
;
2731 cfi
->dw_cfi_opc
= DW_CFA_restore_state
;
2734 old_row
= prev_ti
->beg_row
;
2736 /* Otherwise, we'll simply change state from the previous end. */
2739 change_cfi_row (old_row
, ti
->beg_row
);
2741 if (dump_file
&& add_cfi_insn
!= ti
->head
)
2745 fprintf (dump_file
, "Fixup between trace %u and %u:\n",
2746 prev_ti
->id
, ti
->id
);
2751 note
= NEXT_INSN (note
);
2752 gcc_assert (NOTE_P (note
) && NOTE_KIND (note
) == NOTE_INSN_CFI
);
2753 output_cfi_directive (dump_file
, NOTE_CFI (note
));
2755 while (note
!= add_cfi_insn
);
2759 /* Connect args_size between traces that have can_throw_internal insns. */
2760 if (cfun
->eh
->lp_array
)
2762 HOST_WIDE_INT prev_args_size
= 0;
2764 for (i
= 0; i
< n
; ++i
)
2766 ti
= &trace_info
[i
];
2768 if (ti
->switch_sections
)
2770 if (ti
->eh_head
== NULL
)
2772 gcc_assert (!ti
->args_size_undefined
);
2774 if (ti
->beg_delay_args_size
!= prev_args_size
)
2776 /* ??? Search back to previous CFI note. */
2777 add_cfi_insn
= PREV_INSN (ti
->eh_head
);
2778 add_cfi_args_size (ti
->beg_delay_args_size
);
2781 prev_args_size
= ti
->end_delay_args_size
;
2786 /* Set up the pseudo-cfg of instruction traces, as described at the
2787 block comment at the top of the file. */
2790 create_pseudo_cfg (void)
2792 bool saw_barrier
, switch_sections
;
2797 /* The first trace begins at the start of the function,
2798 and begins with the CIE row state. */
2799 trace_info
.create (16);
2800 memset (&ti
, 0, sizeof (ti
));
2801 ti
.head
= get_insns ();
2802 ti
.beg_row
= cie_cfi_row
;
2803 ti
.cfa_store
= cie_cfi_row
->cfa
;
2804 ti
.cfa_temp
.reg
= INVALID_REGNUM
;
2805 trace_info
.quick_push (ti
);
2807 if (cie_return_save
)
2808 ti
.regs_saved_in_regs
.safe_push (*cie_return_save
);
2810 /* Walk all the insns, collecting start of trace locations. */
2811 saw_barrier
= false;
2812 switch_sections
= false;
2813 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2815 if (BARRIER_P (insn
))
2817 else if (NOTE_P (insn
)
2818 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2820 /* We should have just seen a barrier. */
2821 gcc_assert (saw_barrier
);
2822 switch_sections
= true;
2824 /* Watch out for save_point notes between basic blocks.
2825 In particular, a note after a barrier. Do not record these,
2826 delaying trace creation until the label. */
2827 else if (save_point_p (insn
)
2828 && (LABEL_P (insn
) || !saw_barrier
))
2830 memset (&ti
, 0, sizeof (ti
));
2832 ti
.switch_sections
= switch_sections
;
2833 ti
.id
= trace_info
.length ();
2834 trace_info
.safe_push (ti
);
2836 saw_barrier
= false;
2837 switch_sections
= false;
2841 /* Create the trace index after we've finished building trace_info,
2842 avoiding stale pointer problems due to reallocation. */
2844 = new hash_table
<trace_info_hasher
> (trace_info
.length ());
2846 FOR_EACH_VEC_ELT (trace_info
, i
, tp
)
2848 dw_trace_info
**slot
;
2851 fprintf (dump_file
, "Creating trace %u : start at %s %d%s\n", tp
->id
,
2852 rtx_name
[(int) GET_CODE (tp
->head
)], INSN_UID (tp
->head
),
2853 tp
->switch_sections
? " (section switch)" : "");
2855 slot
= trace_index
->find_slot_with_hash (tp
, INSN_UID (tp
->head
), INSERT
);
2856 gcc_assert (*slot
== NULL
);
2861 /* Record the initial position of the return address. RTL is
2862 INCOMING_RETURN_ADDR_RTX. */
2865 initial_return_save (rtx rtl
)
2867 unsigned int reg
= INVALID_REGNUM
;
2868 HOST_WIDE_INT offset
= 0;
2870 switch (GET_CODE (rtl
))
2873 /* RA is in a register. */
2874 reg
= dwf_regno (rtl
);
2878 /* RA is on the stack. */
2879 rtl
= XEXP (rtl
, 0);
2880 switch (GET_CODE (rtl
))
2883 gcc_assert (REGNO (rtl
) == STACK_POINTER_REGNUM
);
2888 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2889 offset
= INTVAL (XEXP (rtl
, 1));
2893 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2894 offset
= -INTVAL (XEXP (rtl
, 1));
2904 /* The return address is at some offset from any value we can
2905 actually load. For instance, on the SPARC it is in %i7+8. Just
2906 ignore the offset for now; it doesn't matter for unwinding frames. */
2907 gcc_assert (CONST_INT_P (XEXP (rtl
, 1)));
2908 initial_return_save (XEXP (rtl
, 0));
2915 if (reg
!= DWARF_FRAME_RETURN_COLUMN
)
2917 if (reg
!= INVALID_REGNUM
)
2918 record_reg_saved_in_reg (rtl
, pc_rtx
);
2919 reg_save (DWARF_FRAME_RETURN_COLUMN
, reg
, offset
- cur_row
->cfa
.offset
);
2924 create_cie_data (void)
2926 dw_cfa_location loc
;
2927 dw_trace_info cie_trace
;
2929 dw_stack_pointer_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
2931 memset (&cie_trace
, 0, sizeof (cie_trace
));
2932 cur_trace
= &cie_trace
;
2934 add_cfi_vec
= &cie_cfi_vec
;
2935 cie_cfi_row
= cur_row
= new_cfi_row ();
2937 /* On entry, the Canonical Frame Address is at SP. */
2938 memset (&loc
, 0, sizeof (loc
));
2939 loc
.reg
= dw_stack_pointer_regnum
;
2940 loc
.offset
= INCOMING_FRAME_SP_OFFSET
;
2943 if (targetm
.debug_unwind_info () == UI_DWARF2
2944 || targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
2946 initial_return_save (INCOMING_RETURN_ADDR_RTX
);
2948 /* For a few targets, we have the return address incoming into a
2949 register, but choose a different return column. This will result
2950 in a DW_CFA_register for the return, and an entry in
2951 regs_saved_in_regs to match. If the target later stores that
2952 return address register to the stack, we want to be able to emit
2953 the DW_CFA_offset against the return column, not the intermediate
2954 save register. Save the contents of regs_saved_in_regs so that
2955 we can re-initialize it at the start of each function. */
2956 switch (cie_trace
.regs_saved_in_regs
.length ())
2961 cie_return_save
= ggc_alloc
<reg_saved_in_data
> ();
2962 *cie_return_save
= cie_trace
.regs_saved_in_regs
[0];
2963 cie_trace
.regs_saved_in_regs
.release ();
2975 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2976 state at each location within the function. These notes will be
2977 emitted during pass_final. */
2980 execute_dwarf2_frame (void)
2982 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
2983 dw_frame_pointer_regnum
= DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM
);
2985 /* The first time we're called, compute the incoming frame state. */
2986 if (cie_cfi_vec
== NULL
)
2989 dwarf2out_alloc_current_fde ();
2991 create_pseudo_cfg ();
2994 create_cfi_notes ();
2998 /* Free all the data we allocated. */
3003 FOR_EACH_VEC_ELT (trace_info
, i
, ti
)
3004 ti
->regs_saved_in_regs
.release ();
3006 trace_info
.release ();
3014 /* Convert a DWARF call frame info. operation to its string name */
3017 dwarf_cfi_name (unsigned int cfi_opc
)
3019 const char *name
= get_DW_CFA_name (cfi_opc
);
3024 return "DW_CFA_<unknown>";
3027 /* This routine will generate the correct assembly data for a location
3028 description based on a cfi entry with a complex address. */
3031 output_cfa_loc (dw_cfi_ref cfi
, int for_eh
)
3033 dw_loc_descr_ref loc
;
3036 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
3039 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3040 dw2_asm_output_data (1, r
, NULL
);
3041 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3044 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3046 /* Output the size of the block. */
3047 size
= size_of_locs (loc
);
3048 dw2_asm_output_data_uleb128 (size
, NULL
);
3050 /* Now output the operations themselves. */
3051 output_loc_sequence (loc
, for_eh
);
3054 /* Similar, but used for .cfi_escape. */
3057 output_cfa_loc_raw (dw_cfi_ref cfi
)
3059 dw_loc_descr_ref loc
;
3062 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
3065 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3066 fprintf (asm_out_file
, "%#x,", r
);
3067 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
3070 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
3072 /* Output the size of the block. */
3073 size
= size_of_locs (loc
);
3074 dw2_asm_output_data_uleb128_raw (size
);
3075 fputc (',', asm_out_file
);
3077 /* Now output the operations themselves. */
3078 output_loc_sequence_raw (loc
);
3081 /* Output a Call Frame Information opcode and its operand(s). */
3084 output_cfi (dw_cfi_ref cfi
, dw_fde_ref fde
, int for_eh
)
3089 if (cfi
->dw_cfi_opc
== DW_CFA_advance_loc
)
3090 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
3091 | (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
& 0x3f)),
3092 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX
,
3093 ((unsigned HOST_WIDE_INT
)
3094 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
));
3095 else if (cfi
->dw_cfi_opc
== DW_CFA_offset
)
3097 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3098 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3099 "DW_CFA_offset, column %#lx", r
);
3100 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3101 dw2_asm_output_data_uleb128 (off
, NULL
);
3103 else if (cfi
->dw_cfi_opc
== DW_CFA_restore
)
3105 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3106 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3107 "DW_CFA_restore, column %#lx", r
);
3111 dw2_asm_output_data (1, cfi
->dw_cfi_opc
,
3112 "%s", dwarf_cfi_name (cfi
->dw_cfi_opc
));
3114 switch (cfi
->dw_cfi_opc
)
3116 case DW_CFA_set_loc
:
3118 dw2_asm_output_encoded_addr_rtx (
3119 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3120 gen_rtx_SYMBOL_REF (Pmode
, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
),
3123 dw2_asm_output_addr (DWARF2_ADDR_SIZE
,
3124 cfi
->dw_cfi_oprnd1
.dw_cfi_addr
, NULL
);
3125 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3128 case DW_CFA_advance_loc1
:
3129 dw2_asm_output_delta (1, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3130 fde
->dw_fde_current_label
, NULL
);
3131 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3134 case DW_CFA_advance_loc2
:
3135 dw2_asm_output_delta (2, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3136 fde
->dw_fde_current_label
, NULL
);
3137 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3140 case DW_CFA_advance_loc4
:
3141 dw2_asm_output_delta (4, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3142 fde
->dw_fde_current_label
, NULL
);
3143 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3146 case DW_CFA_MIPS_advance_loc8
:
3147 dw2_asm_output_delta (8, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3148 fde
->dw_fde_current_label
, NULL
);
3149 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3152 case DW_CFA_offset_extended
:
3153 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3154 dw2_asm_output_data_uleb128 (r
, NULL
);
3155 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3156 dw2_asm_output_data_uleb128 (off
, NULL
);
3159 case DW_CFA_def_cfa
:
3160 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3161 dw2_asm_output_data_uleb128 (r
, NULL
);
3162 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
, NULL
);
3165 case DW_CFA_offset_extended_sf
:
3166 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3167 dw2_asm_output_data_uleb128 (r
, NULL
);
3168 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3169 dw2_asm_output_data_sleb128 (off
, NULL
);
3172 case DW_CFA_def_cfa_sf
:
3173 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3174 dw2_asm_output_data_uleb128 (r
, NULL
);
3175 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3176 dw2_asm_output_data_sleb128 (off
, NULL
);
3179 case DW_CFA_restore_extended
:
3180 case DW_CFA_undefined
:
3181 case DW_CFA_same_value
:
3182 case DW_CFA_def_cfa_register
:
3183 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3184 dw2_asm_output_data_uleb128 (r
, NULL
);
3187 case DW_CFA_register
:
3188 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3189 dw2_asm_output_data_uleb128 (r
, NULL
);
3190 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, for_eh
);
3191 dw2_asm_output_data_uleb128 (r
, NULL
);
3194 case DW_CFA_def_cfa_offset
:
3195 case DW_CFA_GNU_args_size
:
3196 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
, NULL
);
3199 case DW_CFA_def_cfa_offset_sf
:
3200 off
= div_data_align (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3201 dw2_asm_output_data_sleb128 (off
, NULL
);
3204 case DW_CFA_GNU_window_save
:
3207 case DW_CFA_def_cfa_expression
:
3208 case DW_CFA_expression
:
3209 output_cfa_loc (cfi
, for_eh
);
3212 case DW_CFA_GNU_negative_offset_extended
:
3213 /* Obsoleted by DW_CFA_offset_extended_sf. */
3222 /* Similar, but do it via assembler directives instead. */
3225 output_cfi_directive (FILE *f
, dw_cfi_ref cfi
)
3227 unsigned long r
, r2
;
3229 switch (cfi
->dw_cfi_opc
)
3231 case DW_CFA_advance_loc
:
3232 case DW_CFA_advance_loc1
:
3233 case DW_CFA_advance_loc2
:
3234 case DW_CFA_advance_loc4
:
3235 case DW_CFA_MIPS_advance_loc8
:
3236 case DW_CFA_set_loc
:
3237 /* Should only be created in a code path not followed when emitting
3238 via directives. The assembler is going to take care of this for
3239 us. But this routines is also used for debugging dumps, so
3241 gcc_assert (f
!= asm_out_file
);
3242 fprintf (f
, "\t.cfi_advance_loc\n");
3246 case DW_CFA_offset_extended
:
3247 case DW_CFA_offset_extended_sf
:
3248 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3249 fprintf (f
, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3250 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3253 case DW_CFA_restore
:
3254 case DW_CFA_restore_extended
:
3255 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3256 fprintf (f
, "\t.cfi_restore %lu\n", r
);
3259 case DW_CFA_undefined
:
3260 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3261 fprintf (f
, "\t.cfi_undefined %lu\n", r
);
3264 case DW_CFA_same_value
:
3265 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3266 fprintf (f
, "\t.cfi_same_value %lu\n", r
);
3269 case DW_CFA_def_cfa
:
3270 case DW_CFA_def_cfa_sf
:
3271 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3272 fprintf (f
, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC
"\n",
3273 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3276 case DW_CFA_def_cfa_register
:
3277 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3278 fprintf (f
, "\t.cfi_def_cfa_register %lu\n", r
);
3281 case DW_CFA_register
:
3282 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3283 r2
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, 1);
3284 fprintf (f
, "\t.cfi_register %lu, %lu\n", r
, r2
);
3287 case DW_CFA_def_cfa_offset
:
3288 case DW_CFA_def_cfa_offset_sf
:
3289 fprintf (f
, "\t.cfi_def_cfa_offset "
3290 HOST_WIDE_INT_PRINT_DEC
"\n",
3291 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3294 case DW_CFA_remember_state
:
3295 fprintf (f
, "\t.cfi_remember_state\n");
3297 case DW_CFA_restore_state
:
3298 fprintf (f
, "\t.cfi_restore_state\n");
3301 case DW_CFA_GNU_args_size
:
3302 if (f
== asm_out_file
)
3304 fprintf (f
, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size
);
3305 dw2_asm_output_data_uleb128_raw (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3307 fprintf (f
, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC
,
3308 ASM_COMMENT_START
, cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3313 fprintf (f
, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC
"\n",
3314 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3318 case DW_CFA_GNU_window_save
:
3319 fprintf (f
, "\t.cfi_window_save\n");
3322 case DW_CFA_def_cfa_expression
:
3323 if (f
!= asm_out_file
)
3325 fprintf (f
, "\t.cfi_def_cfa_expression ...\n");
3329 case DW_CFA_expression
:
3330 if (f
!= asm_out_file
)
3332 fprintf (f
, "\t.cfi_cfa_expression ...\n");
3335 fprintf (f
, "\t.cfi_escape %#x,", cfi
->dw_cfi_opc
);
3336 output_cfa_loc_raw (cfi
);
3346 dwarf2out_emit_cfi (dw_cfi_ref cfi
)
3348 if (dwarf2out_do_cfi_asm ())
3349 output_cfi_directive (asm_out_file
, cfi
);
3353 dump_cfi_row (FILE *f
, dw_cfi_row
*row
)
3361 dw_cfa_location dummy
;
3362 memset (&dummy
, 0, sizeof (dummy
));
3363 dummy
.reg
= INVALID_REGNUM
;
3364 cfi
= def_cfa_0 (&dummy
, &row
->cfa
);
3366 output_cfi_directive (f
, cfi
);
3368 FOR_EACH_VEC_SAFE_ELT (row
->reg_save
, i
, cfi
)
3370 output_cfi_directive (f
, cfi
);
3373 void debug_cfi_row (dw_cfi_row
*row
);
3376 debug_cfi_row (dw_cfi_row
*row
)
3378 dump_cfi_row (stderr
, row
);
3382 /* Save the result of dwarf2out_do_frame across PCH.
3383 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3384 static GTY(()) signed char saved_do_cfi_asm
= 0;
3386 /* Decide whether we want to emit frame unwind information for the current
3387 translation unit. */
3390 dwarf2out_do_frame (void)
3392 /* We want to emit correct CFA location expressions or lists, so we
3393 have to return true if we're going to output debug info, even if
3394 we're not going to output frame or unwind info. */
3395 if (write_symbols
== DWARF2_DEBUG
|| write_symbols
== VMS_AND_DWARF2_DEBUG
)
3398 if (saved_do_cfi_asm
> 0)
3401 if (targetm
.debug_unwind_info () == UI_DWARF2
)
3404 if ((flag_unwind_tables
|| flag_exceptions
)
3405 && targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
3411 /* Decide whether to emit frame unwind via assembler directives. */
3414 dwarf2out_do_cfi_asm (void)
3418 if (saved_do_cfi_asm
!= 0)
3419 return saved_do_cfi_asm
> 0;
3421 /* Assume failure for a moment. */
3422 saved_do_cfi_asm
= -1;
3424 if (!flag_dwarf2_cfi_asm
|| !dwarf2out_do_frame ())
3426 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE
)
3429 /* Make sure the personality encoding is one the assembler can support.
3430 In particular, aligned addresses can't be handled. */
3431 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3432 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3434 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3435 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3438 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3439 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3440 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3441 && !flag_unwind_tables
&& !flag_exceptions
3442 && targetm_common
.except_unwind_info (&global_options
) != UI_DWARF2
)
3446 saved_do_cfi_asm
= 1;
3452 const pass_data pass_data_dwarf2_frame
=
3454 RTL_PASS
, /* type */
3455 "dwarf2", /* name */
3456 OPTGROUP_NONE
, /* optinfo_flags */
3457 TV_FINAL
, /* tv_id */
3458 0, /* properties_required */
3459 0, /* properties_provided */
3460 0, /* properties_destroyed */
3461 0, /* todo_flags_start */
3462 0, /* todo_flags_finish */
3465 class pass_dwarf2_frame
: public rtl_opt_pass
3468 pass_dwarf2_frame (gcc::context
*ctxt
)
3469 : rtl_opt_pass (pass_data_dwarf2_frame
, ctxt
)
3472 /* opt_pass methods: */
3473 virtual bool gate (function
*);
3474 virtual unsigned int execute (function
*) { return execute_dwarf2_frame (); }
3476 }; // class pass_dwarf2_frame
3479 pass_dwarf2_frame::gate (function
*)
3481 #ifndef HAVE_prologue
3482 /* Targets which still implement the prologue in assembler text
3483 cannot use the generic dwarf2 unwinding. */
3487 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3488 from the optimized shrink-wrapping annotations that we will compute.
3489 For now, only produce the CFI notes for dwarf2. */
3490 return dwarf2out_do_frame ();
3496 make_pass_dwarf2_frame (gcc::context
*ctxt
)
3498 return new pass_dwarf2_frame (ctxt
);
3501 #include "gt-dwarf2cfi.h"