1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "basic-block.h"
30 #include "dwarf2out.h"
31 #include "dwarf2asm.h"
35 #include "common/common-target.h"
36 #include "tree-pass.h"
38 #include "except.h" /* expand_builtin_dwarf_sp_column */
39 #include "expr.h" /* init_return_column_size */
40 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
41 #include "output.h" /* asm_out_file */
42 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
45 /* ??? Poison these here until it can be done generically. They've been
46 totally replaced in this file; make sure it stays that way. */
47 #undef DWARF2_UNWIND_INFO
48 #undef DWARF2_FRAME_INFO
49 #if (GCC_VERSION >= 3000)
50 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
53 #ifndef INCOMING_RETURN_ADDR_RTX
54 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
57 /* Maximum size (in bytes) of an artificially generated label. */
58 #define MAX_ARTIFICIAL_LABEL_BYTES 30
60 /* A collected description of an entire row of the abstract CFI table. */
61 typedef struct GTY(()) dw_cfi_row_struct
63 /* The expression that computes the CFA, expressed in two different ways.
64 The CFA member for the simple cases, and the full CFI expression for
65 the complex cases. The later will be a DW_CFA_cfa_expression. */
69 /* The expressions for any register column that is saved. */
73 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
74 typedef struct GTY(()) reg_saved_in_data_struct
{
80 /* Since we no longer have a proper CFG, we're going to create a facsimile
81 of one on the fly while processing the frame-related insns.
83 We create dw_trace_info structures for each extended basic block beginning
84 and ending at a "save point". Save points are labels, barriers, certain
85 notes, and of course the beginning and end of the function.
87 As we encounter control transfer insns, we propagate the "current"
88 row state across the edges to the starts of traces. When checking is
89 enabled, we validate that we propagate the same data from all sources.
91 All traces are members of the TRACE_INFO array, in the order in which
92 they appear in the instruction stream.
94 All save points are present in the TRACE_INDEX hash, mapping the insn
95 starting a trace to the dw_trace_info describing the trace. */
99 /* The insn that begins the trace. */
102 /* The row state at the beginning and end of the trace. */
103 dw_cfi_row
*beg_row
, *end_row
;
105 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
106 while scanning insns. However, the args_size value is irrelevant at
107 any point except can_throw_internal_p insns. Therefore the "delay"
108 sizes the values that must actually be emitted for this trace. */
109 HOST_WIDE_INT beg_true_args_size
, end_true_args_size
;
110 HOST_WIDE_INT beg_delay_args_size
, end_delay_args_size
;
112 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
115 /* The following variables contain data used in interpreting frame related
116 expressions. These are not part of the "real" row state as defined by
117 Dwarf, but it seems like they need to be propagated into a trace in case
118 frame related expressions have been sunk. */
119 /* ??? This seems fragile. These variables are fragments of a larger
120 expression. If we do not keep the entire expression together, we risk
121 not being able to put it together properly. Consider forcing targets
122 to generate self-contained expressions and dropping all of the magic
123 interpretation code in this file. Or at least refusing to shrink wrap
124 any frame related insn that doesn't contain a complete expression. */
126 /* The register used for saving registers to the stack, and its offset
128 dw_cfa_location cfa_store
;
130 /* A temporary register holding an integral value used in adjusting SP
131 or setting up the store_reg. The "offset" field holds the integer
132 value, not an offset. */
133 dw_cfa_location cfa_temp
;
135 /* A set of registers saved in other registers. This is the inverse of
136 the row->reg_save info, if the entry is a DW_CFA_register. This is
137 implemented as a flat array because it normally contains zero or 1
138 entry, depending on the target. IA-64 is the big spender here, using
139 a maximum of 5 entries. */
140 vec
<reg_saved_in_data
> regs_saved_in_regs
;
142 /* An identifier for this trace. Used only for debugging dumps. */
145 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
146 bool switch_sections
;
148 /* True if we've seen different values incoming to beg_true_args_size. */
149 bool args_size_undefined
;
153 typedef dw_trace_info
*dw_trace_info_ref
;
156 /* The variables making up the pseudo-cfg, as described above. */
157 static vec
<dw_trace_info
> trace_info
;
158 static vec
<dw_trace_info_ref
> trace_work_list
;
159 static htab_t trace_index
;
161 /* A vector of call frame insns for the CIE. */
164 /* The state of the first row of the FDE table, which includes the
165 state provided by the CIE. */
166 static GTY(()) dw_cfi_row
*cie_cfi_row
;
168 static GTY(()) reg_saved_in_data
*cie_return_save
;
170 static GTY(()) unsigned long dwarf2out_cfi_label_num
;
172 /* The insn after which a new CFI note should be emitted. */
173 static rtx add_cfi_insn
;
175 /* When non-null, add_cfi will add the CFI to this vector. */
176 static cfi_vec
*add_cfi_vec
;
178 /* The current instruction trace. */
179 static dw_trace_info
*cur_trace
;
181 /* The current, i.e. most recently generated, row of the CFI table. */
182 static dw_cfi_row
*cur_row
;
184 /* A copy of the current CFA, for use during the processing of a
186 static dw_cfa_location
*cur_cfa
;
188 /* We delay emitting a register save until either (a) we reach the end
189 of the prologue or (b) the register is clobbered. This clusters
190 register saves so that there are fewer pc advances. */
195 HOST_WIDE_INT cfa_offset
;
199 static vec
<queued_reg_save
> queued_reg_saves
;
201 /* True if any CFI directives were emitted at the current insn. */
202 static bool any_cfis_emitted
;
204 /* Short-hand for commonly used register numbers. */
205 static unsigned dw_stack_pointer_regnum
;
206 static unsigned dw_frame_pointer_regnum
;
208 /* Hook used by __throw. */
211 expand_builtin_dwarf_sp_column (void)
213 unsigned int dwarf_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
214 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum
, 1));
217 /* MEM is a memory reference for the register size table, each element of
218 which has mode MODE. Initialize column C as a return address column. */
221 init_return_column_size (enum machine_mode mode
, rtx mem
, unsigned int c
)
223 HOST_WIDE_INT offset
= c
* GET_MODE_SIZE (mode
);
224 HOST_WIDE_INT size
= GET_MODE_SIZE (Pmode
);
225 emit_move_insn (adjust_address (mem
, mode
, offset
), GEN_INT (size
));
228 /* Generate code to initialize the register size table. */
231 expand_builtin_init_dwarf_reg_sizes (tree address
)
234 enum machine_mode mode
= TYPE_MODE (char_type_node
);
235 rtx addr
= expand_normal (address
);
236 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
237 bool wrote_return_column
= false;
239 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
241 unsigned int dnum
= DWARF_FRAME_REGNUM (i
);
242 unsigned int rnum
= DWARF2_FRAME_REG_OUT (dnum
, 1);
244 if (rnum
< DWARF_FRAME_REGISTERS
)
246 HOST_WIDE_INT offset
= rnum
* GET_MODE_SIZE (mode
);
247 enum machine_mode save_mode
= reg_raw_mode
[i
];
250 if (HARD_REGNO_CALL_PART_CLOBBERED (i
, save_mode
))
251 save_mode
= choose_hard_reg_mode (i
, 1, true);
252 if (dnum
== DWARF_FRAME_RETURN_COLUMN
)
254 if (save_mode
== VOIDmode
)
256 wrote_return_column
= true;
258 size
= GET_MODE_SIZE (save_mode
);
262 emit_move_insn (adjust_address (mem
, mode
, offset
),
263 gen_int_mode (size
, mode
));
267 if (!wrote_return_column
)
268 init_return_column_size (mode
, mem
, DWARF_FRAME_RETURN_COLUMN
);
270 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
271 init_return_column_size (mode
, mem
, DWARF_ALT_FRAME_RETURN_COLUMN
);
274 targetm
.init_dwarf_reg_sizes_extra (address
);
279 dw_trace_info_hash (const void *ptr
)
281 const dw_trace_info
*ti
= (const dw_trace_info
*) ptr
;
282 return INSN_UID (ti
->head
);
286 dw_trace_info_eq (const void *ptr_a
, const void *ptr_b
)
288 const dw_trace_info
*a
= (const dw_trace_info
*) ptr_a
;
289 const dw_trace_info
*b
= (const dw_trace_info
*) ptr_b
;
290 return a
->head
== b
->head
;
293 static dw_trace_info
*
294 get_trace_info (rtx insn
)
298 return (dw_trace_info
*)
299 htab_find_with_hash (trace_index
, &dummy
, INSN_UID (insn
));
303 save_point_p (rtx insn
)
305 /* Labels, except those that are really jump tables. */
307 return inside_basic_block_p (insn
);
309 /* We split traces at the prologue/epilogue notes because those
310 are points at which the unwind info is usually stable. This
311 makes it easier to find spots with identical unwind info so
312 that we can use remember/restore_state opcodes. */
314 switch (NOTE_KIND (insn
))
316 case NOTE_INSN_PROLOGUE_END
:
317 case NOTE_INSN_EPILOGUE_BEG
:
324 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
326 static inline HOST_WIDE_INT
327 div_data_align (HOST_WIDE_INT off
)
329 HOST_WIDE_INT r
= off
/ DWARF_CIE_DATA_ALIGNMENT
;
330 gcc_assert (r
* DWARF_CIE_DATA_ALIGNMENT
== off
);
334 /* Return true if we need a signed version of a given opcode
335 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
338 need_data_align_sf_opcode (HOST_WIDE_INT off
)
340 return DWARF_CIE_DATA_ALIGNMENT
< 0 ? off
> 0 : off
< 0;
343 /* Return a pointer to a newly allocated Call Frame Instruction. */
345 static inline dw_cfi_ref
348 dw_cfi_ref cfi
= ggc_alloc_dw_cfi_node ();
350 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= 0;
351 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= 0;
356 /* Return a newly allocated CFI row, with no defined data. */
361 dw_cfi_row
*row
= ggc_alloc_cleared_dw_cfi_row ();
363 row
->cfa
.reg
= INVALID_REGNUM
;
368 /* Return a copy of an existing CFI row. */
371 copy_cfi_row (dw_cfi_row
*src
)
373 dw_cfi_row
*dst
= ggc_alloc_dw_cfi_row ();
376 dst
->reg_save
= vec_safe_copy (src
->reg_save
);
381 /* Generate a new label for the CFI info to refer to. */
384 dwarf2out_cfi_label (void)
386 int num
= dwarf2out_cfi_label_num
++;
389 ASM_GENERATE_INTERNAL_LABEL (label
, "LCFI", num
);
391 return xstrdup (label
);
394 /* Add CFI either to the current insn stream or to a vector, or both. */
397 add_cfi (dw_cfi_ref cfi
)
399 any_cfis_emitted
= true;
401 if (add_cfi_insn
!= NULL
)
403 add_cfi_insn
= emit_note_after (NOTE_INSN_CFI
, add_cfi_insn
);
404 NOTE_CFI (add_cfi_insn
) = cfi
;
407 if (add_cfi_vec
!= NULL
)
408 vec_safe_push (*add_cfi_vec
, cfi
);
412 add_cfi_args_size (HOST_WIDE_INT size
)
414 dw_cfi_ref cfi
= new_cfi ();
416 /* While we can occasionally have args_size < 0 internally, this state
417 should not persist at a point we actually need an opcode. */
418 gcc_assert (size
>= 0);
420 cfi
->dw_cfi_opc
= DW_CFA_GNU_args_size
;
421 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= size
;
427 add_cfi_restore (unsigned reg
)
429 dw_cfi_ref cfi
= new_cfi ();
431 cfi
->dw_cfi_opc
= (reg
& ~0x3f ? DW_CFA_restore_extended
: DW_CFA_restore
);
432 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
437 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
438 that the register column is no longer saved. */
441 update_row_reg_save (dw_cfi_row
*row
, unsigned column
, dw_cfi_ref cfi
)
443 if (vec_safe_length (row
->reg_save
) <= column
)
444 vec_safe_grow_cleared (row
->reg_save
, column
+ 1);
445 (*row
->reg_save
)[column
] = cfi
;
448 /* This function fills in aa dw_cfa_location structure from a dwarf location
449 descriptor sequence. */
452 get_cfa_from_loc_descr (dw_cfa_location
*cfa
, struct dw_loc_descr_struct
*loc
)
454 struct dw_loc_descr_struct
*ptr
;
456 cfa
->base_offset
= 0;
460 for (ptr
= loc
; ptr
!= NULL
; ptr
= ptr
->dw_loc_next
)
462 enum dwarf_location_atom op
= ptr
->dw_loc_opc
;
498 cfa
->reg
= op
- DW_OP_reg0
;
501 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
535 cfa
->reg
= op
- DW_OP_breg0
;
536 cfa
->base_offset
= ptr
->dw_loc_oprnd1
.v
.val_int
;
539 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
540 cfa
->base_offset
= ptr
->dw_loc_oprnd2
.v
.val_int
;
545 case DW_OP_plus_uconst
:
546 cfa
->offset
= ptr
->dw_loc_oprnd1
.v
.val_unsigned
;
554 /* Find the previous value for the CFA, iteratively. CFI is the opcode
555 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
556 one level of remember/restore state processing. */
559 lookup_cfa_1 (dw_cfi_ref cfi
, dw_cfa_location
*loc
, dw_cfa_location
*remember
)
561 switch (cfi
->dw_cfi_opc
)
563 case DW_CFA_def_cfa_offset
:
564 case DW_CFA_def_cfa_offset_sf
:
565 loc
->offset
= cfi
->dw_cfi_oprnd1
.dw_cfi_offset
;
567 case DW_CFA_def_cfa_register
:
568 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
571 case DW_CFA_def_cfa_sf
:
572 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
573 loc
->offset
= cfi
->dw_cfi_oprnd2
.dw_cfi_offset
;
575 case DW_CFA_def_cfa_expression
:
576 get_cfa_from_loc_descr (loc
, cfi
->dw_cfi_oprnd1
.dw_cfi_loc
);
579 case DW_CFA_remember_state
:
580 gcc_assert (!remember
->in_use
);
582 remember
->in_use
= 1;
584 case DW_CFA_restore_state
:
585 gcc_assert (remember
->in_use
);
587 remember
->in_use
= 0;
595 /* Determine if two dw_cfa_location structures define the same data. */
598 cfa_equal_p (const dw_cfa_location
*loc1
, const dw_cfa_location
*loc2
)
600 return (loc1
->reg
== loc2
->reg
601 && loc1
->offset
== loc2
->offset
602 && loc1
->indirect
== loc2
->indirect
603 && (loc1
->indirect
== 0
604 || loc1
->base_offset
== loc2
->base_offset
));
607 /* Determine if two CFI operands are identical. */
610 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t
, dw_cfi_oprnd
*a
, dw_cfi_oprnd
*b
)
614 case dw_cfi_oprnd_unused
:
616 case dw_cfi_oprnd_reg_num
:
617 return a
->dw_cfi_reg_num
== b
->dw_cfi_reg_num
;
618 case dw_cfi_oprnd_offset
:
619 return a
->dw_cfi_offset
== b
->dw_cfi_offset
;
620 case dw_cfi_oprnd_addr
:
621 return (a
->dw_cfi_addr
== b
->dw_cfi_addr
622 || strcmp (a
->dw_cfi_addr
, b
->dw_cfi_addr
) == 0);
623 case dw_cfi_oprnd_loc
:
624 return loc_descr_equal_p (a
->dw_cfi_loc
, b
->dw_cfi_loc
);
629 /* Determine if two CFI entries are identical. */
632 cfi_equal_p (dw_cfi_ref a
, dw_cfi_ref b
)
634 enum dwarf_call_frame_info opc
;
636 /* Make things easier for our callers, including missing operands. */
639 if (a
== NULL
|| b
== NULL
)
642 /* Obviously, the opcodes must match. */
644 if (opc
!= b
->dw_cfi_opc
)
647 /* Compare the two operands, re-using the type of the operands as
648 already exposed elsewhere. */
649 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc
),
650 &a
->dw_cfi_oprnd1
, &b
->dw_cfi_oprnd1
)
651 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc
),
652 &a
->dw_cfi_oprnd2
, &b
->dw_cfi_oprnd2
));
655 /* Determine if two CFI_ROW structures are identical. */
658 cfi_row_equal_p (dw_cfi_row
*a
, dw_cfi_row
*b
)
660 size_t i
, n_a
, n_b
, n_max
;
664 if (!cfi_equal_p (a
->cfa_cfi
, b
->cfa_cfi
))
667 else if (!cfa_equal_p (&a
->cfa
, &b
->cfa
))
670 n_a
= vec_safe_length (a
->reg_save
);
671 n_b
= vec_safe_length (b
->reg_save
);
672 n_max
= MAX (n_a
, n_b
);
674 for (i
= 0; i
< n_max
; ++i
)
676 dw_cfi_ref r_a
= NULL
, r_b
= NULL
;
679 r_a
= (*a
->reg_save
)[i
];
681 r_b
= (*b
->reg_save
)[i
];
683 if (!cfi_equal_p (r_a
, r_b
))
690 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
691 what opcode to emit. Returns the CFI opcode to effect the change, or
692 NULL if NEW_CFA == OLD_CFA. */
695 def_cfa_0 (dw_cfa_location
*old_cfa
, dw_cfa_location
*new_cfa
)
699 /* If nothing changed, no need to issue any call frame instructions. */
700 if (cfa_equal_p (old_cfa
, new_cfa
))
705 if (new_cfa
->reg
== old_cfa
->reg
&& !new_cfa
->indirect
&& !old_cfa
->indirect
)
707 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
708 the CFA register did not change but the offset did. The data
709 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
710 in the assembler via the .cfi_def_cfa_offset directive. */
711 if (new_cfa
->offset
< 0)
712 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset_sf
;
714 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset
;
715 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= new_cfa
->offset
;
717 else if (new_cfa
->offset
== old_cfa
->offset
718 && old_cfa
->reg
!= INVALID_REGNUM
719 && !new_cfa
->indirect
720 && !old_cfa
->indirect
)
722 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
723 indicating the CFA register has changed to <register> but the
724 offset has not changed. */
725 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_register
;
726 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
728 else if (new_cfa
->indirect
== 0)
730 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
731 indicating the CFA register has changed to <register> with
732 the specified offset. The data factoring for DW_CFA_def_cfa_sf
733 happens in output_cfi, or in the assembler via the .cfi_def_cfa
735 if (new_cfa
->offset
< 0)
736 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_sf
;
738 cfi
->dw_cfi_opc
= DW_CFA_def_cfa
;
739 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= new_cfa
->reg
;
740 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= new_cfa
->offset
;
744 /* Construct a DW_CFA_def_cfa_expression instruction to
745 calculate the CFA using a full location expression since no
746 register-offset pair is available. */
747 struct dw_loc_descr_struct
*loc_list
;
749 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_expression
;
750 loc_list
= build_cfa_loc (new_cfa
, 0);
751 cfi
->dw_cfi_oprnd1
.dw_cfi_loc
= loc_list
;
757 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
760 def_cfa_1 (dw_cfa_location
*new_cfa
)
764 if (cur_trace
->cfa_store
.reg
== new_cfa
->reg
&& new_cfa
->indirect
== 0)
765 cur_trace
->cfa_store
.offset
= new_cfa
->offset
;
767 cfi
= def_cfa_0 (&cur_row
->cfa
, new_cfa
);
770 cur_row
->cfa
= *new_cfa
;
771 cur_row
->cfa_cfi
= (cfi
->dw_cfi_opc
== DW_CFA_def_cfa_expression
778 /* Add the CFI for saving a register. REG is the CFA column number.
779 If SREG is -1, the register is saved at OFFSET from the CFA;
780 otherwise it is saved in SREG. */
783 reg_save (unsigned int reg
, unsigned int sreg
, HOST_WIDE_INT offset
)
785 dw_fde_ref fde
= cfun
? cfun
->fde
: NULL
;
786 dw_cfi_ref cfi
= new_cfi ();
788 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
790 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
792 && fde
->stack_realign
793 && sreg
== INVALID_REGNUM
)
795 cfi
->dw_cfi_opc
= DW_CFA_expression
;
796 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
797 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
798 = build_cfa_aligned_loc (&cur_row
->cfa
, offset
,
799 fde
->stack_realignment
);
801 else if (sreg
== INVALID_REGNUM
)
803 if (need_data_align_sf_opcode (offset
))
804 cfi
->dw_cfi_opc
= DW_CFA_offset_extended_sf
;
805 else if (reg
& ~0x3f)
806 cfi
->dw_cfi_opc
= DW_CFA_offset_extended
;
808 cfi
->dw_cfi_opc
= DW_CFA_offset
;
809 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= offset
;
811 else if (sreg
== reg
)
813 /* While we could emit something like DW_CFA_same_value or
814 DW_CFA_restore, we never expect to see something like that
815 in a prologue. This is more likely to be a bug. A backend
816 can always bypass this by using REG_CFA_RESTORE directly. */
821 cfi
->dw_cfi_opc
= DW_CFA_register
;
822 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= sreg
;
826 update_row_reg_save (cur_row
, reg
, cfi
);
829 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
830 and adjust data structures to match. */
833 notice_args_size (rtx insn
)
835 HOST_WIDE_INT args_size
, delta
;
838 note
= find_reg_note (insn
, REG_ARGS_SIZE
, NULL
);
842 args_size
= INTVAL (XEXP (note
, 0));
843 delta
= args_size
- cur_trace
->end_true_args_size
;
847 cur_trace
->end_true_args_size
= args_size
;
849 /* If the CFA is computed off the stack pointer, then we must adjust
850 the computation of the CFA as well. */
851 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
853 gcc_assert (!cur_cfa
->indirect
);
855 /* Convert a change in args_size (always a positive in the
856 direction of stack growth) to a change in stack pointer. */
857 #ifndef STACK_GROWS_DOWNWARD
860 cur_cfa
->offset
+= delta
;
864 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
865 data within the trace related to EH insns and args_size. */
868 notice_eh_throw (rtx insn
)
870 HOST_WIDE_INT args_size
;
872 args_size
= cur_trace
->end_true_args_size
;
873 if (cur_trace
->eh_head
== NULL
)
875 cur_trace
->eh_head
= insn
;
876 cur_trace
->beg_delay_args_size
= args_size
;
877 cur_trace
->end_delay_args_size
= args_size
;
879 else if (cur_trace
->end_delay_args_size
!= args_size
)
881 cur_trace
->end_delay_args_size
= args_size
;
883 /* ??? If the CFA is the stack pointer, search backward for the last
884 CFI note and insert there. Given that the stack changed for the
885 args_size change, there *must* be such a note in between here and
887 add_cfi_args_size (args_size
);
891 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
892 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
893 used in places where rtl is prohibited. */
895 static inline unsigned
896 dwf_regno (const_rtx reg
)
898 return DWARF_FRAME_REGNUM (REGNO (reg
));
901 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
904 compare_reg_or_pc (rtx x
, rtx y
)
906 if (REG_P (x
) && REG_P (y
))
907 return REGNO (x
) == REGNO (y
);
911 /* Record SRC as being saved in DEST. DEST may be null to delete an
912 existing entry. SRC may be a register or PC_RTX. */
915 record_reg_saved_in_reg (rtx dest
, rtx src
)
917 reg_saved_in_data
*elt
;
920 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, elt
)
921 if (compare_reg_or_pc (elt
->orig_reg
, src
))
924 cur_trace
->regs_saved_in_regs
.unordered_remove (i
);
926 elt
->saved_in_reg
= dest
;
933 reg_saved_in_data e
= {src
, dest
};
934 cur_trace
->regs_saved_in_regs
.safe_push (e
);
937 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
938 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
941 queue_reg_save (rtx reg
, rtx sreg
, HOST_WIDE_INT offset
)
944 queued_reg_save e
= {reg
, sreg
, offset
};
947 /* Duplicates waste space, but it's also necessary to remove them
948 for correctness, since the queue gets output in reverse order. */
949 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
950 if (compare_reg_or_pc (q
->reg
, reg
))
956 queued_reg_saves
.safe_push (e
);
959 /* Output all the entries in QUEUED_REG_SAVES. */
962 dwarf2out_flush_queued_reg_saves (void)
967 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
969 unsigned int reg
, sreg
;
971 record_reg_saved_in_reg (q
->saved_reg
, q
->reg
);
973 if (q
->reg
== pc_rtx
)
974 reg
= DWARF_FRAME_RETURN_COLUMN
;
976 reg
= dwf_regno (q
->reg
);
978 sreg
= dwf_regno (q
->saved_reg
);
980 sreg
= INVALID_REGNUM
;
981 reg_save (reg
, sreg
, q
->cfa_offset
);
984 queued_reg_saves
.truncate (0);
987 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
988 location for? Or, does it clobber a register which we've previously
989 said that some other register is saved in, and for which we now
990 have a new location for? */
993 clobbers_queued_reg_save (const_rtx insn
)
998 FOR_EACH_VEC_ELT (queued_reg_saves
, iq
, q
)
1001 reg_saved_in_data
*rir
;
1003 if (modified_in_p (q
->reg
, insn
))
1006 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, ir
, rir
)
1007 if (compare_reg_or_pc (q
->reg
, rir
->orig_reg
)
1008 && modified_in_p (rir
->saved_in_reg
, insn
))
1015 /* What register, if any, is currently saved in REG? */
1018 reg_saved_in (rtx reg
)
1020 unsigned int regn
= REGNO (reg
);
1022 reg_saved_in_data
*rir
;
1025 FOR_EACH_VEC_ELT (queued_reg_saves
, i
, q
)
1026 if (q
->saved_reg
&& regn
== REGNO (q
->saved_reg
))
1029 FOR_EACH_VEC_ELT (cur_trace
->regs_saved_in_regs
, i
, rir
)
1030 if (regn
== REGNO (rir
->saved_in_reg
))
1031 return rir
->orig_reg
;
1036 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1039 dwarf2out_frame_debug_def_cfa (rtx pat
)
1041 memset (cur_cfa
, 0, sizeof (*cur_cfa
));
1043 if (GET_CODE (pat
) == PLUS
)
1045 cur_cfa
->offset
= INTVAL (XEXP (pat
, 1));
1046 pat
= XEXP (pat
, 0);
1050 cur_cfa
->indirect
= 1;
1051 pat
= XEXP (pat
, 0);
1052 if (GET_CODE (pat
) == PLUS
)
1054 cur_cfa
->base_offset
= INTVAL (XEXP (pat
, 1));
1055 pat
= XEXP (pat
, 0);
1058 /* ??? If this fails, we could be calling into the _loc functions to
1059 define a full expression. So far no port does that. */
1060 gcc_assert (REG_P (pat
));
1061 cur_cfa
->reg
= dwf_regno (pat
);
1064 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1067 dwarf2out_frame_debug_adjust_cfa (rtx pat
)
1071 gcc_assert (GET_CODE (pat
) == SET
);
1072 dest
= XEXP (pat
, 0);
1073 src
= XEXP (pat
, 1);
1075 switch (GET_CODE (src
))
1078 gcc_assert (dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
);
1079 cur_cfa
->offset
-= INTVAL (XEXP (src
, 1));
1089 cur_cfa
->reg
= dwf_regno (dest
);
1090 gcc_assert (cur_cfa
->indirect
== 0);
1093 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1096 dwarf2out_frame_debug_cfa_offset (rtx set
)
1098 HOST_WIDE_INT offset
;
1099 rtx src
, addr
, span
;
1100 unsigned int sregno
;
1102 src
= XEXP (set
, 1);
1103 addr
= XEXP (set
, 0);
1104 gcc_assert (MEM_P (addr
));
1105 addr
= XEXP (addr
, 0);
1107 /* As documented, only consider extremely simple addresses. */
1108 switch (GET_CODE (addr
))
1111 gcc_assert (dwf_regno (addr
) == cur_cfa
->reg
);
1112 offset
= -cur_cfa
->offset
;
1115 gcc_assert (dwf_regno (XEXP (addr
, 0)) == cur_cfa
->reg
);
1116 offset
= INTVAL (XEXP (addr
, 1)) - cur_cfa
->offset
;
1125 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1129 span
= targetm
.dwarf_register_span (src
);
1130 sregno
= dwf_regno (src
);
1133 /* ??? We'd like to use queue_reg_save, but we need to come up with
1134 a different flushing heuristic for epilogues. */
1136 reg_save (sregno
, INVALID_REGNUM
, offset
);
1139 /* We have a PARALLEL describing where the contents of SRC live.
1140 Queue register saves for each piece of the PARALLEL. */
1143 HOST_WIDE_INT span_offset
= offset
;
1145 gcc_assert (GET_CODE (span
) == PARALLEL
);
1147 limit
= XVECLEN (span
, 0);
1148 for (par_index
= 0; par_index
< limit
; par_index
++)
1150 rtx elem
= XVECEXP (span
, 0, par_index
);
1152 sregno
= dwf_regno (src
);
1153 reg_save (sregno
, INVALID_REGNUM
, span_offset
);
1154 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1159 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1162 dwarf2out_frame_debug_cfa_register (rtx set
)
1165 unsigned sregno
, dregno
;
1167 src
= XEXP (set
, 1);
1168 dest
= XEXP (set
, 0);
1170 record_reg_saved_in_reg (dest
, src
);
1172 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1174 sregno
= dwf_regno (src
);
1176 dregno
= dwf_regno (dest
);
1178 /* ??? We'd like to use queue_reg_save, but we need to come up with
1179 a different flushing heuristic for epilogues. */
1180 reg_save (sregno
, dregno
, 0);
1183 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1186 dwarf2out_frame_debug_cfa_expression (rtx set
)
1188 rtx src
, dest
, span
;
1189 dw_cfi_ref cfi
= new_cfi ();
1192 dest
= SET_DEST (set
);
1193 src
= SET_SRC (set
);
1195 gcc_assert (REG_P (src
));
1196 gcc_assert (MEM_P (dest
));
1198 span
= targetm
.dwarf_register_span (src
);
1201 regno
= dwf_regno (src
);
1203 cfi
->dw_cfi_opc
= DW_CFA_expression
;
1204 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= regno
;
1205 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1206 = mem_loc_descriptor (XEXP (dest
, 0), get_address_mode (dest
),
1207 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1209 /* ??? We'd like to use queue_reg_save, were the interface different,
1210 and, as above, we could manage flushing for epilogues. */
1212 update_row_reg_save (cur_row
, regno
, cfi
);
1215 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1218 dwarf2out_frame_debug_cfa_restore (rtx reg
)
1220 unsigned int regno
= dwf_regno (reg
);
1222 add_cfi_restore (regno
);
1223 update_row_reg_save (cur_row
, regno
, NULL
);
1226 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1227 ??? Perhaps we should note in the CIE where windows are saved (instead of
1228 assuming 0(cfa)) and what registers are in the window. */
1231 dwarf2out_frame_debug_cfa_window_save (void)
1233 dw_cfi_ref cfi
= new_cfi ();
1235 cfi
->dw_cfi_opc
= DW_CFA_GNU_window_save
;
1239 /* Record call frame debugging information for an expression EXPR,
1240 which either sets SP or FP (adjusting how we calculate the frame
1241 address) or saves a register to the stack or another register.
1242 LABEL indicates the address of EXPR.
1244 This function encodes a state machine mapping rtxes to actions on
1245 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1246 users need not read the source code.
1248 The High-Level Picture
1250 Changes in the register we use to calculate the CFA: Currently we
1251 assume that if you copy the CFA register into another register, we
1252 should take the other one as the new CFA register; this seems to
1253 work pretty well. If it's wrong for some target, it's simple
1254 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1256 Changes in the register we use for saving registers to the stack:
1257 This is usually SP, but not always. Again, we deduce that if you
1258 copy SP into another register (and SP is not the CFA register),
1259 then the new register is the one we will be using for register
1260 saves. This also seems to work.
1262 Register saves: There's not much guesswork about this one; if
1263 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1264 register save, and the register used to calculate the destination
1265 had better be the one we think we're using for this purpose.
1266 It's also assumed that a copy from a call-saved register to another
1267 register is saving that register if RTX_FRAME_RELATED_P is set on
1268 that instruction. If the copy is from a call-saved register to
1269 the *same* register, that means that the register is now the same
1270 value as in the caller.
1272 Except: If the register being saved is the CFA register, and the
1273 offset is nonzero, we are saving the CFA, so we assume we have to
1274 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1275 the intent is to save the value of SP from the previous frame.
1277 In addition, if a register has previously been saved to a different
1280 Invariants / Summaries of Rules
1282 cfa current rule for calculating the CFA. It usually
1283 consists of a register and an offset. This is
1284 actually stored in *cur_cfa, but abbreviated
1285 for the purposes of this documentation.
1286 cfa_store register used by prologue code to save things to the stack
1287 cfa_store.offset is the offset from the value of
1288 cfa_store.reg to the actual CFA
1289 cfa_temp register holding an integral value. cfa_temp.offset
1290 stores the value, which will be used to adjust the
1291 stack pointer. cfa_temp is also used like cfa_store,
1292 to track stores to the stack via fp or a temp reg.
1294 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1295 with cfa.reg as the first operand changes the cfa.reg and its
1296 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1299 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1300 expression yielding a constant. This sets cfa_temp.reg
1301 and cfa_temp.offset.
1303 Rule 5: Create a new register cfa_store used to save items to the
1306 Rules 10-14: Save a register to the stack. Define offset as the
1307 difference of the original location and cfa_store's
1308 location (or cfa_temp's location if cfa_temp is used).
1310 Rules 16-20: If AND operation happens on sp in prologue, we assume
1311 stack is realigned. We will use a group of DW_OP_XXX
1312 expressions to represent the location of the stored
1313 register instead of CFA+offset.
1317 "{a,b}" indicates a choice of a xor b.
1318 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1321 (set <reg1> <reg2>:cfa.reg)
1322 effects: cfa.reg = <reg1>
1323 cfa.offset unchanged
1324 cfa_temp.reg = <reg1>
1325 cfa_temp.offset = cfa.offset
1328 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1329 {<const_int>,<reg>:cfa_temp.reg}))
1330 effects: cfa.reg = sp if fp used
1331 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1332 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1333 if cfa_store.reg==sp
1336 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1337 effects: cfa.reg = fp
1338 cfa_offset += +/- <const_int>
1341 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1342 constraints: <reg1> != fp
1344 effects: cfa.reg = <reg1>
1345 cfa_temp.reg = <reg1>
1346 cfa_temp.offset = cfa.offset
1349 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1350 constraints: <reg1> != fp
1352 effects: cfa_store.reg = <reg1>
1353 cfa_store.offset = cfa.offset - cfa_temp.offset
1356 (set <reg> <const_int>)
1357 effects: cfa_temp.reg = <reg>
1358 cfa_temp.offset = <const_int>
1361 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1362 effects: cfa_temp.reg = <reg1>
1363 cfa_temp.offset |= <const_int>
1366 (set <reg> (high <exp>))
1370 (set <reg> (lo_sum <exp> <const_int>))
1371 effects: cfa_temp.reg = <reg>
1372 cfa_temp.offset = <const_int>
1375 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1376 effects: cfa_store.offset -= <const_int>
1377 cfa.offset = cfa_store.offset if cfa.reg == sp
1379 cfa.base_offset = -cfa_store.offset
1382 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1383 effects: cfa_store.offset += -/+ mode_size(mem)
1384 cfa.offset = cfa_store.offset if cfa.reg == sp
1386 cfa.base_offset = -cfa_store.offset
1389 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1392 effects: cfa.reg = <reg1>
1393 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1396 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1397 effects: cfa.reg = <reg1>
1398 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1401 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1402 effects: cfa.reg = <reg1>
1403 cfa.base_offset = -cfa_temp.offset
1404 cfa_temp.offset -= mode_size(mem)
1407 (set <reg> {unspec, unspec_volatile})
1408 effects: target-dependent
1411 (set sp (and: sp <const_int>))
1412 constraints: cfa_store.reg == sp
1413 effects: cfun->fde.stack_realign = 1
1414 cfa_store.offset = 0
1415 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1418 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1419 effects: cfa_store.offset += -/+ mode_size(mem)
1422 (set (mem ({pre_inc, pre_dec} sp)) fp)
1423 constraints: fde->stack_realign == 1
1424 effects: cfa_store.offset = 0
1425 cfa.reg != HARD_FRAME_POINTER_REGNUM
1428 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1429 constraints: fde->stack_realign == 1
1431 && cfa.indirect == 0
1432 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1433 effects: Use DW_CFA_def_cfa_expression to define cfa
1434 cfa.reg == fde->drap_reg */
1437 dwarf2out_frame_debug_expr (rtx expr
)
1439 rtx src
, dest
, span
;
1440 HOST_WIDE_INT offset
;
1443 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1444 the PARALLEL independently. The first element is always processed if
1445 it is a SET. This is for backward compatibility. Other elements
1446 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1447 flag is set in them. */
1448 if (GET_CODE (expr
) == PARALLEL
|| GET_CODE (expr
) == SEQUENCE
)
1451 int limit
= XVECLEN (expr
, 0);
1454 /* PARALLELs have strict read-modify-write semantics, so we
1455 ought to evaluate every rvalue before changing any lvalue.
1456 It's cumbersome to do that in general, but there's an
1457 easy approximation that is enough for all current users:
1458 handle register saves before register assignments. */
1459 if (GET_CODE (expr
) == PARALLEL
)
1460 for (par_index
= 0; par_index
< limit
; par_index
++)
1462 elem
= XVECEXP (expr
, 0, par_index
);
1463 if (GET_CODE (elem
) == SET
1464 && MEM_P (SET_DEST (elem
))
1465 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1466 dwarf2out_frame_debug_expr (elem
);
1469 for (par_index
= 0; par_index
< limit
; par_index
++)
1471 elem
= XVECEXP (expr
, 0, par_index
);
1472 if (GET_CODE (elem
) == SET
1473 && (!MEM_P (SET_DEST (elem
)) || GET_CODE (expr
) == SEQUENCE
)
1474 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1475 dwarf2out_frame_debug_expr (elem
);
1480 gcc_assert (GET_CODE (expr
) == SET
);
1482 src
= SET_SRC (expr
);
1483 dest
= SET_DEST (expr
);
1487 rtx rsi
= reg_saved_in (src
);
1494 switch (GET_CODE (dest
))
1497 switch (GET_CODE (src
))
1499 /* Setting FP from SP. */
1501 if (cur_cfa
->reg
== dwf_regno (src
))
1504 /* Update the CFA rule wrt SP or FP. Make sure src is
1505 relative to the current CFA register.
1507 We used to require that dest be either SP or FP, but the
1508 ARM copies SP to a temporary register, and from there to
1509 FP. So we just rely on the backends to only set
1510 RTX_FRAME_RELATED_P on appropriate insns. */
1511 cur_cfa
->reg
= dwf_regno (dest
);
1512 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1513 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1517 /* Saving a register in a register. */
1518 gcc_assert (!fixed_regs
[REGNO (dest
)]
1519 /* For the SPARC and its register window. */
1520 || (dwf_regno (src
) == DWARF_FRAME_RETURN_COLUMN
));
1522 /* After stack is aligned, we can only save SP in FP
1523 if drap register is used. In this case, we have
1524 to restore stack pointer with the CFA value and we
1525 don't generate this DWARF information. */
1527 && fde
->stack_realign
1528 && REGNO (src
) == STACK_POINTER_REGNUM
)
1529 gcc_assert (REGNO (dest
) == HARD_FRAME_POINTER_REGNUM
1530 && fde
->drap_reg
!= INVALID_REGNUM
1531 && cur_cfa
->reg
!= dwf_regno (src
));
1533 queue_reg_save (src
, dest
, 0);
1540 if (dest
== stack_pointer_rtx
)
1544 switch (GET_CODE (XEXP (src
, 1)))
1547 offset
= INTVAL (XEXP (src
, 1));
1550 gcc_assert (dwf_regno (XEXP (src
, 1))
1551 == cur_trace
->cfa_temp
.reg
);
1552 offset
= cur_trace
->cfa_temp
.offset
;
1558 if (XEXP (src
, 0) == hard_frame_pointer_rtx
)
1560 /* Restoring SP from FP in the epilogue. */
1561 gcc_assert (cur_cfa
->reg
== dw_frame_pointer_regnum
);
1562 cur_cfa
->reg
= dw_stack_pointer_regnum
;
1564 else if (GET_CODE (src
) == LO_SUM
)
1565 /* Assume we've set the source reg of the LO_SUM from sp. */
1568 gcc_assert (XEXP (src
, 0) == stack_pointer_rtx
);
1570 if (GET_CODE (src
) != MINUS
)
1572 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1573 cur_cfa
->offset
+= offset
;
1574 if (cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
)
1575 cur_trace
->cfa_store
.offset
+= offset
;
1577 else if (dest
== hard_frame_pointer_rtx
)
1580 /* Either setting the FP from an offset of the SP,
1581 or adjusting the FP */
1582 gcc_assert (frame_pointer_needed
);
1584 gcc_assert (REG_P (XEXP (src
, 0))
1585 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1586 && CONST_INT_P (XEXP (src
, 1)));
1587 offset
= INTVAL (XEXP (src
, 1));
1588 if (GET_CODE (src
) != MINUS
)
1590 cur_cfa
->offset
+= offset
;
1591 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1595 gcc_assert (GET_CODE (src
) != MINUS
);
1598 if (REG_P (XEXP (src
, 0))
1599 && dwf_regno (XEXP (src
, 0)) == cur_cfa
->reg
1600 && CONST_INT_P (XEXP (src
, 1)))
1602 /* Setting a temporary CFA register that will be copied
1603 into the FP later on. */
1604 offset
= - INTVAL (XEXP (src
, 1));
1605 cur_cfa
->offset
+= offset
;
1606 cur_cfa
->reg
= dwf_regno (dest
);
1607 /* Or used to save regs to the stack. */
1608 cur_trace
->cfa_temp
.reg
= cur_cfa
->reg
;
1609 cur_trace
->cfa_temp
.offset
= cur_cfa
->offset
;
1613 else if (REG_P (XEXP (src
, 0))
1614 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1615 && XEXP (src
, 1) == stack_pointer_rtx
)
1617 /* Setting a scratch register that we will use instead
1618 of SP for saving registers to the stack. */
1619 gcc_assert (cur_cfa
->reg
== dw_stack_pointer_regnum
);
1620 cur_trace
->cfa_store
.reg
= dwf_regno (dest
);
1621 cur_trace
->cfa_store
.offset
1622 = cur_cfa
->offset
- cur_trace
->cfa_temp
.offset
;
1626 else if (GET_CODE (src
) == LO_SUM
1627 && CONST_INT_P (XEXP (src
, 1)))
1629 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1630 cur_trace
->cfa_temp
.offset
= INTVAL (XEXP (src
, 1));
1639 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1640 cur_trace
->cfa_temp
.offset
= INTVAL (src
);
1645 gcc_assert (REG_P (XEXP (src
, 0))
1646 && dwf_regno (XEXP (src
, 0)) == cur_trace
->cfa_temp
.reg
1647 && CONST_INT_P (XEXP (src
, 1)));
1649 cur_trace
->cfa_temp
.reg
= dwf_regno (dest
);
1650 cur_trace
->cfa_temp
.offset
|= INTVAL (XEXP (src
, 1));
1653 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1654 which will fill in all of the bits. */
1661 case UNSPEC_VOLATILE
:
1662 /* All unspecs should be represented by REG_CFA_* notes. */
1668 /* If this AND operation happens on stack pointer in prologue,
1669 we assume the stack is realigned and we extract the
1671 if (fde
&& XEXP (src
, 0) == stack_pointer_rtx
)
1673 /* We interpret reg_save differently with stack_realign set.
1674 Thus we must flush whatever we have queued first. */
1675 dwarf2out_flush_queued_reg_saves ();
1677 gcc_assert (cur_trace
->cfa_store
.reg
1678 == dwf_regno (XEXP (src
, 0)));
1679 fde
->stack_realign
= 1;
1680 fde
->stack_realignment
= INTVAL (XEXP (src
, 1));
1681 cur_trace
->cfa_store
.offset
= 0;
1683 if (cur_cfa
->reg
!= dw_stack_pointer_regnum
1684 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1685 fde
->drap_reg
= cur_cfa
->reg
;
1696 /* Saving a register to the stack. Make sure dest is relative to the
1698 switch (GET_CODE (XEXP (dest
, 0)))
1704 /* We can't handle variable size modifications. */
1705 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest
, 0), 1), 1))
1707 offset
= -INTVAL (XEXP (XEXP (XEXP (dest
, 0), 1), 1));
1709 gcc_assert (REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
1710 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1712 cur_trace
->cfa_store
.offset
+= offset
;
1713 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1714 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1716 if (GET_CODE (XEXP (dest
, 0)) == POST_MODIFY
)
1717 offset
-= cur_trace
->cfa_store
.offset
;
1719 offset
= -cur_trace
->cfa_store
.offset
;
1726 offset
= GET_MODE_SIZE (GET_MODE (dest
));
1727 if (GET_CODE (XEXP (dest
, 0)) == PRE_INC
)
1730 gcc_assert ((REGNO (XEXP (XEXP (dest
, 0), 0))
1731 == STACK_POINTER_REGNUM
)
1732 && cur_trace
->cfa_store
.reg
== dw_stack_pointer_regnum
);
1734 cur_trace
->cfa_store
.offset
+= offset
;
1736 /* Rule 18: If stack is aligned, we will use FP as a
1737 reference to represent the address of the stored
1740 && fde
->stack_realign
1742 && REGNO (src
) == HARD_FRAME_POINTER_REGNUM
)
1744 gcc_assert (cur_cfa
->reg
!= dw_frame_pointer_regnum
);
1745 cur_trace
->cfa_store
.offset
= 0;
1748 if (cur_cfa
->reg
== dw_stack_pointer_regnum
)
1749 cur_cfa
->offset
= cur_trace
->cfa_store
.offset
;
1751 if (GET_CODE (XEXP (dest
, 0)) == POST_DEC
)
1752 offset
+= -cur_trace
->cfa_store
.offset
;
1754 offset
= -cur_trace
->cfa_store
.offset
;
1758 /* With an offset. */
1765 gcc_assert (CONST_INT_P (XEXP (XEXP (dest
, 0), 1))
1766 && REG_P (XEXP (XEXP (dest
, 0), 0)));
1767 offset
= INTVAL (XEXP (XEXP (dest
, 0), 1));
1768 if (GET_CODE (XEXP (dest
, 0)) == MINUS
)
1771 regno
= dwf_regno (XEXP (XEXP (dest
, 0), 0));
1773 if (cur_cfa
->reg
== regno
)
1774 offset
-= cur_cfa
->offset
;
1775 else if (cur_trace
->cfa_store
.reg
== regno
)
1776 offset
-= cur_trace
->cfa_store
.offset
;
1779 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1780 offset
-= cur_trace
->cfa_temp
.offset
;
1786 /* Without an offset. */
1789 unsigned int regno
= dwf_regno (XEXP (dest
, 0));
1791 if (cur_cfa
->reg
== regno
)
1792 offset
= -cur_cfa
->offset
;
1793 else if (cur_trace
->cfa_store
.reg
== regno
)
1794 offset
= -cur_trace
->cfa_store
.offset
;
1797 gcc_assert (cur_trace
->cfa_temp
.reg
== regno
);
1798 offset
= -cur_trace
->cfa_temp
.offset
;
1805 gcc_assert (cur_trace
->cfa_temp
.reg
1806 == dwf_regno (XEXP (XEXP (dest
, 0), 0)));
1807 offset
= -cur_trace
->cfa_temp
.offset
;
1808 cur_trace
->cfa_temp
.offset
-= GET_MODE_SIZE (GET_MODE (dest
));
1816 /* If the source operand of this MEM operation is a memory,
1817 we only care how much stack grew. */
1822 && REGNO (src
) != STACK_POINTER_REGNUM
1823 && REGNO (src
) != HARD_FRAME_POINTER_REGNUM
1824 && dwf_regno (src
) == cur_cfa
->reg
)
1826 /* We're storing the current CFA reg into the stack. */
1828 if (cur_cfa
->offset
== 0)
1831 /* If stack is aligned, putting CFA reg into stack means
1832 we can no longer use reg + offset to represent CFA.
1833 Here we use DW_CFA_def_cfa_expression instead. The
1834 result of this expression equals to the original CFA
1837 && fde
->stack_realign
1838 && cur_cfa
->indirect
== 0
1839 && cur_cfa
->reg
!= dw_frame_pointer_regnum
)
1841 gcc_assert (fde
->drap_reg
== cur_cfa
->reg
);
1843 cur_cfa
->indirect
= 1;
1844 cur_cfa
->reg
= dw_frame_pointer_regnum
;
1845 cur_cfa
->base_offset
= offset
;
1846 cur_cfa
->offset
= 0;
1848 fde
->drap_reg_saved
= 1;
1852 /* If the source register is exactly the CFA, assume
1853 we're saving SP like any other register; this happens
1855 queue_reg_save (stack_pointer_rtx
, NULL_RTX
, offset
);
1860 /* Otherwise, we'll need to look in the stack to
1861 calculate the CFA. */
1862 rtx x
= XEXP (dest
, 0);
1866 gcc_assert (REG_P (x
));
1868 cur_cfa
->reg
= dwf_regno (x
);
1869 cur_cfa
->base_offset
= offset
;
1870 cur_cfa
->indirect
= 1;
1877 span
= targetm
.dwarf_register_span (src
);
1879 queue_reg_save (src
, NULL_RTX
, offset
);
1882 /* We have a PARALLEL describing where the contents of SRC live.
1883 Queue register saves for each piece of the PARALLEL. */
1886 HOST_WIDE_INT span_offset
= offset
;
1888 gcc_assert (GET_CODE (span
) == PARALLEL
);
1890 limit
= XVECLEN (span
, 0);
1891 for (par_index
= 0; par_index
< limit
; par_index
++)
1893 rtx elem
= XVECEXP (span
, 0, par_index
);
1894 queue_reg_save (elem
, NULL_RTX
, span_offset
);
1895 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1905 /* Record call frame debugging information for INSN, which either sets
1906 SP or FP (adjusting how we calculate the frame address) or saves a
1907 register to the stack. */
1910 dwarf2out_frame_debug (rtx insn
)
1913 bool handled_one
= false;
1915 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
1916 switch (REG_NOTE_KIND (note
))
1918 case REG_FRAME_RELATED_EXPR
:
1919 insn
= XEXP (note
, 0);
1922 case REG_CFA_DEF_CFA
:
1923 dwarf2out_frame_debug_def_cfa (XEXP (note
, 0));
1927 case REG_CFA_ADJUST_CFA
:
1932 if (GET_CODE (n
) == PARALLEL
)
1933 n
= XVECEXP (n
, 0, 0);
1935 dwarf2out_frame_debug_adjust_cfa (n
);
1939 case REG_CFA_OFFSET
:
1942 n
= single_set (insn
);
1943 dwarf2out_frame_debug_cfa_offset (n
);
1947 case REG_CFA_REGISTER
:
1952 if (GET_CODE (n
) == PARALLEL
)
1953 n
= XVECEXP (n
, 0, 0);
1955 dwarf2out_frame_debug_cfa_register (n
);
1959 case REG_CFA_EXPRESSION
:
1962 n
= single_set (insn
);
1963 dwarf2out_frame_debug_cfa_expression (n
);
1967 case REG_CFA_RESTORE
:
1972 if (GET_CODE (n
) == PARALLEL
)
1973 n
= XVECEXP (n
, 0, 0);
1976 dwarf2out_frame_debug_cfa_restore (n
);
1980 case REG_CFA_SET_VDRAP
:
1984 dw_fde_ref fde
= cfun
->fde
;
1987 gcc_assert (fde
->vdrap_reg
== INVALID_REGNUM
);
1989 fde
->vdrap_reg
= dwf_regno (n
);
1995 case REG_CFA_WINDOW_SAVE
:
1996 dwarf2out_frame_debug_cfa_window_save ();
2000 case REG_CFA_FLUSH_QUEUE
:
2001 /* The actual flush happens elsewhere. */
2011 insn
= PATTERN (insn
);
2013 dwarf2out_frame_debug_expr (insn
);
2015 /* Check again. A parallel can save and update the same register.
2016 We could probably check just once, here, but this is safer than
2017 removing the check at the start of the function. */
2018 if (clobbers_queued_reg_save (insn
))
2019 dwarf2out_flush_queued_reg_saves ();
2023 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2026 change_cfi_row (dw_cfi_row
*old_row
, dw_cfi_row
*new_row
)
2028 size_t i
, n_old
, n_new
, n_max
;
2031 if (new_row
->cfa_cfi
&& !cfi_equal_p (old_row
->cfa_cfi
, new_row
->cfa_cfi
))
2032 add_cfi (new_row
->cfa_cfi
);
2035 cfi
= def_cfa_0 (&old_row
->cfa
, &new_row
->cfa
);
2040 n_old
= vec_safe_length (old_row
->reg_save
);
2041 n_new
= vec_safe_length (new_row
->reg_save
);
2042 n_max
= MAX (n_old
, n_new
);
2044 for (i
= 0; i
< n_max
; ++i
)
2046 dw_cfi_ref r_old
= NULL
, r_new
= NULL
;
2049 r_old
= (*old_row
->reg_save
)[i
];
2051 r_new
= (*new_row
->reg_save
)[i
];
2055 else if (r_new
== NULL
)
2056 add_cfi_restore (i
);
2057 else if (!cfi_equal_p (r_old
, r_new
))
2062 /* Examine CFI and return true if a cfi label and set_loc is needed
2063 beforehand. Even when generating CFI assembler instructions, we
2064 still have to add the cfi to the list so that lookup_cfa_1 works
2065 later on. When -g2 and above we even need to force emitting of
2066 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2067 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2068 and so don't use convert_cfa_to_fb_loc_list. */
2071 cfi_label_required_p (dw_cfi_ref cfi
)
2073 if (!dwarf2out_do_cfi_asm ())
2076 if (dwarf_version
== 2
2077 && debug_info_level
> DINFO_LEVEL_TERSE
2078 && (write_symbols
== DWARF2_DEBUG
2079 || write_symbols
== VMS_AND_DWARF2_DEBUG
))
2081 switch (cfi
->dw_cfi_opc
)
2083 case DW_CFA_def_cfa_offset
:
2084 case DW_CFA_def_cfa_offset_sf
:
2085 case DW_CFA_def_cfa_register
:
2086 case DW_CFA_def_cfa
:
2087 case DW_CFA_def_cfa_sf
:
2088 case DW_CFA_def_cfa_expression
:
2089 case DW_CFA_restore_state
:
2098 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2099 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2102 add_cfis_to_fde (void)
2104 dw_fde_ref fde
= cfun
->fde
;
2106 /* We always start with a function_begin label. */
2109 for (insn
= get_insns (); insn
; insn
= next
)
2111 next
= NEXT_INSN (insn
);
2113 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2115 fde
->dw_fde_switch_cfi_index
= vec_safe_length (fde
->dw_fde_cfi
);
2116 /* Don't attempt to advance_loc4 between labels
2117 in different sections. */
2121 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2123 bool required
= cfi_label_required_p (NOTE_CFI (insn
));
2125 if (NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CFI
)
2127 required
|= cfi_label_required_p (NOTE_CFI (next
));
2128 next
= NEXT_INSN (next
);
2130 else if (active_insn_p (next
)
2131 || (NOTE_P (next
) && (NOTE_KIND (next
)
2132 == NOTE_INSN_SWITCH_TEXT_SECTIONS
)))
2135 next
= NEXT_INSN (next
);
2138 int num
= dwarf2out_cfi_label_num
;
2139 const char *label
= dwarf2out_cfi_label ();
2143 /* Set the location counter to the new label. */
2145 xcfi
->dw_cfi_opc
= (first
? DW_CFA_set_loc
2146 : DW_CFA_advance_loc4
);
2147 xcfi
->dw_cfi_oprnd1
.dw_cfi_addr
= label
;
2148 vec_safe_push (fde
->dw_fde_cfi
, xcfi
);
2150 tmp
= emit_note_before (NOTE_INSN_CFI_LABEL
, insn
);
2151 NOTE_LABEL_NUMBER (tmp
) = num
;
2156 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2157 vec_safe_push (fde
->dw_fde_cfi
, NOTE_CFI (insn
));
2158 insn
= NEXT_INSN (insn
);
2160 while (insn
!= next
);
2166 /* If LABEL is the start of a trace, then initialize the state of that
2167 trace from CUR_TRACE and CUR_ROW. */
2170 maybe_record_trace_start (rtx start
, rtx origin
)
2173 HOST_WIDE_INT args_size
;
2175 ti
= get_trace_info (start
);
2176 gcc_assert (ti
!= NULL
);
2180 fprintf (dump_file
, " saw edge from trace %u to %u (via %s %d)\n",
2181 cur_trace
->id
, ti
->id
,
2182 (origin
? rtx_name
[(int) GET_CODE (origin
)] : "fallthru"),
2183 (origin
? INSN_UID (origin
) : 0));
2186 args_size
= cur_trace
->end_true_args_size
;
2187 if (ti
->beg_row
== NULL
)
2189 /* This is the first time we've encountered this trace. Propagate
2190 state across the edge and push the trace onto the work list. */
2191 ti
->beg_row
= copy_cfi_row (cur_row
);
2192 ti
->beg_true_args_size
= args_size
;
2194 ti
->cfa_store
= cur_trace
->cfa_store
;
2195 ti
->cfa_temp
= cur_trace
->cfa_temp
;
2196 ti
->regs_saved_in_regs
= cur_trace
->regs_saved_in_regs
.copy ();
2198 trace_work_list
.safe_push (ti
);
2201 fprintf (dump_file
, "\tpush trace %u to worklist\n", ti
->id
);
2206 /* We ought to have the same state incoming to a given trace no
2207 matter how we arrive at the trace. Anything else means we've
2208 got some kind of optimization error. */
2209 gcc_checking_assert (cfi_row_equal_p (cur_row
, ti
->beg_row
));
2211 /* The args_size is allowed to conflict if it isn't actually used. */
2212 if (ti
->beg_true_args_size
!= args_size
)
2213 ti
->args_size_undefined
= true;
2217 /* Similarly, but handle the args_size and CFA reset across EH
2218 and non-local goto edges. */
2221 maybe_record_trace_start_abnormal (rtx start
, rtx origin
)
2223 HOST_WIDE_INT save_args_size
, delta
;
2224 dw_cfa_location save_cfa
;
2226 save_args_size
= cur_trace
->end_true_args_size
;
2227 if (save_args_size
== 0)
2229 maybe_record_trace_start (start
, origin
);
2233 delta
= -save_args_size
;
2234 cur_trace
->end_true_args_size
= 0;
2236 save_cfa
= cur_row
->cfa
;
2237 if (cur_row
->cfa
.reg
== dw_stack_pointer_regnum
)
2239 /* Convert a change in args_size (always a positive in the
2240 direction of stack growth) to a change in stack pointer. */
2241 #ifndef STACK_GROWS_DOWNWARD
2244 cur_row
->cfa
.offset
+= delta
;
2247 maybe_record_trace_start (start
, origin
);
2249 cur_trace
->end_true_args_size
= save_args_size
;
2250 cur_row
->cfa
= save_cfa
;
2253 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2254 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2257 create_trace_edges (rtx insn
)
2264 if (find_reg_note (insn
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
2267 if (tablejump_p (insn
, NULL
, &tmp
))
2271 tmp
= PATTERN (tmp
);
2272 vec
= XVEC (tmp
, GET_CODE (tmp
) == ADDR_DIFF_VEC
);
2274 n
= GET_NUM_ELEM (vec
);
2275 for (i
= 0; i
< n
; ++i
)
2277 lab
= XEXP (RTVEC_ELT (vec
, i
), 0);
2278 maybe_record_trace_start (lab
, insn
);
2281 else if (computed_jump_p (insn
))
2283 for (lab
= forced_labels
; lab
; lab
= XEXP (lab
, 1))
2284 maybe_record_trace_start (XEXP (lab
, 0), insn
);
2286 else if (returnjump_p (insn
))
2288 else if ((tmp
= extract_asm_operands (PATTERN (insn
))) != NULL
)
2290 n
= ASM_OPERANDS_LABEL_LENGTH (tmp
);
2291 for (i
= 0; i
< n
; ++i
)
2293 lab
= XEXP (ASM_OPERANDS_LABEL (tmp
, i
), 0);
2294 maybe_record_trace_start (lab
, insn
);
2299 lab
= JUMP_LABEL (insn
);
2300 gcc_assert (lab
!= NULL
);
2301 maybe_record_trace_start (lab
, insn
);
2304 else if (CALL_P (insn
))
2306 /* Sibling calls don't have edges inside this function. */
2307 if (SIBLING_CALL_P (insn
))
2310 /* Process non-local goto edges. */
2311 if (can_nonlocal_goto (insn
))
2312 for (lab
= nonlocal_goto_handler_labels
; lab
; lab
= XEXP (lab
, 1))
2313 maybe_record_trace_start_abnormal (XEXP (lab
, 0), insn
);
2315 else if (GET_CODE (PATTERN (insn
)) == SEQUENCE
)
2317 rtx seq
= PATTERN (insn
);
2318 int i
, n
= XVECLEN (seq
, 0);
2319 for (i
= 0; i
< n
; ++i
)
2320 create_trace_edges (XVECEXP (seq
, 0, i
));
2324 /* Process EH edges. */
2325 if (CALL_P (insn
) || cfun
->can_throw_non_call_exceptions
)
2327 eh_landing_pad lp
= get_eh_landing_pad_from_rtx (insn
);
2329 maybe_record_trace_start_abnormal (lp
->landing_pad
, insn
);
2333 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2336 scan_insn_after (rtx insn
)
2338 if (RTX_FRAME_RELATED_P (insn
))
2339 dwarf2out_frame_debug (insn
);
2340 notice_args_size (insn
);
2343 /* Scan the trace beginning at INSN and create the CFI notes for the
2344 instructions therein. */
2347 scan_trace (dw_trace_info
*trace
)
2349 rtx prev
, insn
= trace
->head
;
2350 dw_cfa_location this_cfa
;
2353 fprintf (dump_file
, "Processing trace %u : start at %s %d\n",
2354 trace
->id
, rtx_name
[(int) GET_CODE (insn
)],
2357 trace
->end_row
= copy_cfi_row (trace
->beg_row
);
2358 trace
->end_true_args_size
= trace
->beg_true_args_size
;
2361 cur_row
= trace
->end_row
;
2363 this_cfa
= cur_row
->cfa
;
2364 cur_cfa
= &this_cfa
;
2366 for (prev
= insn
, insn
= NEXT_INSN (insn
);
2368 prev
= insn
, insn
= NEXT_INSN (insn
))
2372 /* Do everything that happens "before" the insn. */
2373 add_cfi_insn
= prev
;
2375 /* Notice the end of a trace. */
2376 if (BARRIER_P (insn
))
2378 /* Don't bother saving the unneeded queued registers at all. */
2379 queued_reg_saves
.truncate (0);
2382 if (save_point_p (insn
))
2384 /* Propagate across fallthru edges. */
2385 dwarf2out_flush_queued_reg_saves ();
2386 maybe_record_trace_start (insn
, NULL
);
2390 if (DEBUG_INSN_P (insn
) || !inside_basic_block_p (insn
))
2393 /* Handle all changes to the row state. Sequences require special
2394 handling for the positioning of the notes. */
2395 if (GET_CODE (PATTERN (insn
)) == SEQUENCE
)
2397 rtx elt
, pat
= PATTERN (insn
);
2398 int i
, n
= XVECLEN (pat
, 0);
2400 control
= XVECEXP (pat
, 0, 0);
2401 if (can_throw_internal (control
))
2402 notice_eh_throw (control
);
2403 dwarf2out_flush_queued_reg_saves ();
2405 if (JUMP_P (control
) && INSN_ANNULLED_BRANCH_P (control
))
2407 /* ??? Hopefully multiple delay slots are not annulled. */
2408 gcc_assert (n
== 2);
2409 gcc_assert (!RTX_FRAME_RELATED_P (control
));
2410 gcc_assert (!find_reg_note (control
, REG_ARGS_SIZE
, NULL
));
2412 elt
= XVECEXP (pat
, 0, 1);
2414 if (INSN_FROM_TARGET_P (elt
))
2416 HOST_WIDE_INT restore_args_size
;
2417 cfi_vec save_row_reg_save
;
2419 /* If ELT is an instruction from target of an annulled
2420 branch, the effects are for the target only and so
2421 the args_size and CFA along the current path
2422 shouldn't change. */
2423 add_cfi_insn
= NULL
;
2424 restore_args_size
= cur_trace
->end_true_args_size
;
2425 cur_cfa
= &cur_row
->cfa
;
2426 save_row_reg_save
= vec_safe_copy (cur_row
->reg_save
);
2428 scan_insn_after (elt
);
2430 /* ??? Should we instead save the entire row state? */
2431 gcc_assert (!queued_reg_saves
.length ());
2433 create_trace_edges (control
);
2435 cur_trace
->end_true_args_size
= restore_args_size
;
2436 cur_row
->cfa
= this_cfa
;
2437 cur_row
->reg_save
= save_row_reg_save
;
2438 cur_cfa
= &this_cfa
;
2442 /* If ELT is a annulled branch-taken instruction (i.e.
2443 executed only when branch is not taken), the args_size
2444 and CFA should not change through the jump. */
2445 create_trace_edges (control
);
2447 /* Update and continue with the trace. */
2448 add_cfi_insn
= insn
;
2449 scan_insn_after (elt
);
2450 def_cfa_1 (&this_cfa
);
2455 /* The insns in the delay slot should all be considered to happen
2456 "before" a call insn. Consider a call with a stack pointer
2457 adjustment in the delay slot. The backtrace from the callee
2458 should include the sp adjustment. Unfortunately, that leaves
2459 us with an unavoidable unwinding error exactly at the call insn
2460 itself. For jump insns we'd prefer to avoid this error by
2461 placing the notes after the sequence. */
2462 if (JUMP_P (control
))
2463 add_cfi_insn
= insn
;
2465 for (i
= 1; i
< n
; ++i
)
2467 elt
= XVECEXP (pat
, 0, i
);
2468 scan_insn_after (elt
);
2471 /* Make sure any register saves are visible at the jump target. */
2472 dwarf2out_flush_queued_reg_saves ();
2473 any_cfis_emitted
= false;
2475 /* However, if there is some adjustment on the call itself, e.g.
2476 a call_pop, that action should be considered to happen after
2477 the call returns. */
2478 add_cfi_insn
= insn
;
2479 scan_insn_after (control
);
2483 /* Flush data before calls and jumps, and of course if necessary. */
2484 if (can_throw_internal (insn
))
2486 notice_eh_throw (insn
);
2487 dwarf2out_flush_queued_reg_saves ();
2489 else if (!NONJUMP_INSN_P (insn
)
2490 || clobbers_queued_reg_save (insn
)
2491 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2492 dwarf2out_flush_queued_reg_saves ();
2493 any_cfis_emitted
= false;
2495 add_cfi_insn
= insn
;
2496 scan_insn_after (insn
);
2500 /* Between frame-related-p and args_size we might have otherwise
2501 emitted two cfa adjustments. Do it now. */
2502 def_cfa_1 (&this_cfa
);
2504 /* Minimize the number of advances by emitting the entire queue
2505 once anything is emitted. */
2506 if (any_cfis_emitted
2507 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2508 dwarf2out_flush_queued_reg_saves ();
2510 /* Note that a test for control_flow_insn_p does exactly the
2511 same tests as are done to actually create the edges. So
2512 always call the routine and let it not create edges for
2513 non-control-flow insns. */
2514 create_trace_edges (control
);
2517 add_cfi_insn
= NULL
;
2523 /* Scan the function and create the initial set of CFI notes. */
2526 create_cfi_notes (void)
2530 gcc_checking_assert (!queued_reg_saves
.exists ());
2531 gcc_checking_assert (!trace_work_list
.exists ());
2533 /* Always begin at the entry trace. */
2534 ti
= &trace_info
[0];
2537 while (!trace_work_list
.is_empty ())
2539 ti
= trace_work_list
.pop ();
2543 queued_reg_saves
.release ();
2544 trace_work_list
.release ();
2547 /* Return the insn before the first NOTE_INSN_CFI after START. */
2550 before_next_cfi_note (rtx start
)
2555 if (NOTE_P (start
) && NOTE_KIND (start
) == NOTE_INSN_CFI
)
2558 start
= NEXT_INSN (start
);
2563 /* Insert CFI notes between traces to properly change state between them. */
2566 connect_traces (void)
2568 unsigned i
, n
= trace_info
.length ();
2569 dw_trace_info
*prev_ti
, *ti
;
2571 /* ??? Ideally, we should have both queued and processed every trace.
2572 However the current representation of constant pools on various targets
2573 is indistinguishable from unreachable code. Assume for the moment that
2574 we can simply skip over such traces. */
2575 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2576 these are not "real" instructions, and should not be considered.
2577 This could be generically useful for tablejump data as well. */
2578 /* Remove all unprocessed traces from the list. */
2579 for (i
= n
- 1; i
> 0; --i
)
2581 ti
= &trace_info
[i
];
2582 if (ti
->beg_row
== NULL
)
2584 trace_info
.ordered_remove (i
);
2588 gcc_assert (ti
->end_row
!= NULL
);
2591 /* Work from the end back to the beginning. This lets us easily insert
2592 remember/restore_state notes in the correct order wrt other notes. */
2593 prev_ti
= &trace_info
[n
- 1];
2594 for (i
= n
- 1; i
> 0; --i
)
2596 dw_cfi_row
*old_row
;
2599 prev_ti
= &trace_info
[i
- 1];
2601 add_cfi_insn
= ti
->head
;
2603 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2604 for the portion of the function in the alternate text
2605 section. The row state at the very beginning of that
2606 new FDE will be exactly the row state from the CIE. */
2607 if (ti
->switch_sections
)
2608 old_row
= cie_cfi_row
;
2611 old_row
= prev_ti
->end_row
;
2612 /* If there's no change from the previous end state, fine. */
2613 if (cfi_row_equal_p (old_row
, ti
->beg_row
))
2615 /* Otherwise check for the common case of sharing state with
2616 the beginning of an epilogue, but not the end. Insert
2617 remember/restore opcodes in that case. */
2618 else if (cfi_row_equal_p (prev_ti
->beg_row
, ti
->beg_row
))
2622 /* Note that if we blindly insert the remember at the
2623 start of the trace, we can wind up increasing the
2624 size of the unwind info due to extra advance opcodes.
2625 Instead, put the remember immediately before the next
2626 state change. We know there must be one, because the
2627 state at the beginning and head of the trace differ. */
2628 add_cfi_insn
= before_next_cfi_note (prev_ti
->head
);
2630 cfi
->dw_cfi_opc
= DW_CFA_remember_state
;
2633 add_cfi_insn
= ti
->head
;
2635 cfi
->dw_cfi_opc
= DW_CFA_restore_state
;
2638 old_row
= prev_ti
->beg_row
;
2640 /* Otherwise, we'll simply change state from the previous end. */
2643 change_cfi_row (old_row
, ti
->beg_row
);
2645 if (dump_file
&& add_cfi_insn
!= ti
->head
)
2649 fprintf (dump_file
, "Fixup between trace %u and %u:\n",
2650 prev_ti
->id
, ti
->id
);
2655 note
= NEXT_INSN (note
);
2656 gcc_assert (NOTE_P (note
) && NOTE_KIND (note
) == NOTE_INSN_CFI
);
2657 output_cfi_directive (dump_file
, NOTE_CFI (note
));
2659 while (note
!= add_cfi_insn
);
2663 /* Connect args_size between traces that have can_throw_internal insns. */
2664 if (cfun
->eh
->lp_array
)
2666 HOST_WIDE_INT prev_args_size
= 0;
2668 for (i
= 0; i
< n
; ++i
)
2670 ti
= &trace_info
[i
];
2672 if (ti
->switch_sections
)
2674 if (ti
->eh_head
== NULL
)
2676 gcc_assert (!ti
->args_size_undefined
);
2678 if (ti
->beg_delay_args_size
!= prev_args_size
)
2680 /* ??? Search back to previous CFI note. */
2681 add_cfi_insn
= PREV_INSN (ti
->eh_head
);
2682 add_cfi_args_size (ti
->beg_delay_args_size
);
2685 prev_args_size
= ti
->end_delay_args_size
;
2690 /* Set up the pseudo-cfg of instruction traces, as described at the
2691 block comment at the top of the file. */
2694 create_pseudo_cfg (void)
2696 bool saw_barrier
, switch_sections
;
2701 /* The first trace begins at the start of the function,
2702 and begins with the CIE row state. */
2703 trace_info
.create (16);
2704 memset (&ti
, 0, sizeof (ti
));
2705 ti
.head
= get_insns ();
2706 ti
.beg_row
= cie_cfi_row
;
2707 ti
.cfa_store
= cie_cfi_row
->cfa
;
2708 ti
.cfa_temp
.reg
= INVALID_REGNUM
;
2709 trace_info
.quick_push (ti
);
2711 if (cie_return_save
)
2712 ti
.regs_saved_in_regs
.safe_push (*cie_return_save
);
2714 /* Walk all the insns, collecting start of trace locations. */
2715 saw_barrier
= false;
2716 switch_sections
= false;
2717 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2719 if (BARRIER_P (insn
))
2721 else if (NOTE_P (insn
)
2722 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2724 /* We should have just seen a barrier. */
2725 gcc_assert (saw_barrier
);
2726 switch_sections
= true;
2728 /* Watch out for save_point notes between basic blocks.
2729 In particular, a note after a barrier. Do not record these,
2730 delaying trace creation until the label. */
2731 else if (save_point_p (insn
)
2732 && (LABEL_P (insn
) || !saw_barrier
))
2734 memset (&ti
, 0, sizeof (ti
));
2736 ti
.switch_sections
= switch_sections
;
2737 ti
.id
= trace_info
.length () - 1;
2738 trace_info
.safe_push (ti
);
2740 saw_barrier
= false;
2741 switch_sections
= false;
2745 /* Create the trace index after we've finished building trace_info,
2746 avoiding stale pointer problems due to reallocation. */
2747 trace_index
= htab_create (trace_info
.length (),
2748 dw_trace_info_hash
, dw_trace_info_eq
, NULL
);
2750 FOR_EACH_VEC_ELT (trace_info
, i
, tp
)
2755 fprintf (dump_file
, "Creating trace %u : start at %s %d%s\n", i
,
2756 rtx_name
[(int) GET_CODE (tp
->head
)], INSN_UID (tp
->head
),
2757 tp
->switch_sections
? " (section switch)" : "");
2759 slot
= htab_find_slot_with_hash (trace_index
, tp
,
2760 INSN_UID (tp
->head
), INSERT
);
2761 gcc_assert (*slot
== NULL
);
2762 *slot
= (void *) tp
;
2766 /* Record the initial position of the return address. RTL is
2767 INCOMING_RETURN_ADDR_RTX. */
2770 initial_return_save (rtx rtl
)
2772 unsigned int reg
= INVALID_REGNUM
;
2773 HOST_WIDE_INT offset
= 0;
2775 switch (GET_CODE (rtl
))
2778 /* RA is in a register. */
2779 reg
= dwf_regno (rtl
);
2783 /* RA is on the stack. */
2784 rtl
= XEXP (rtl
, 0);
2785 switch (GET_CODE (rtl
))
2788 gcc_assert (REGNO (rtl
) == STACK_POINTER_REGNUM
);
2793 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2794 offset
= INTVAL (XEXP (rtl
, 1));
2798 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2799 offset
= -INTVAL (XEXP (rtl
, 1));
2809 /* The return address is at some offset from any value we can
2810 actually load. For instance, on the SPARC it is in %i7+8. Just
2811 ignore the offset for now; it doesn't matter for unwinding frames. */
2812 gcc_assert (CONST_INT_P (XEXP (rtl
, 1)));
2813 initial_return_save (XEXP (rtl
, 0));
2820 if (reg
!= DWARF_FRAME_RETURN_COLUMN
)
2822 if (reg
!= INVALID_REGNUM
)
2823 record_reg_saved_in_reg (rtl
, pc_rtx
);
2824 reg_save (DWARF_FRAME_RETURN_COLUMN
, reg
, offset
- cur_row
->cfa
.offset
);
2829 create_cie_data (void)
2831 dw_cfa_location loc
;
2832 dw_trace_info cie_trace
;
2834 dw_stack_pointer_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
2835 dw_frame_pointer_regnum
= DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM
);
2837 memset (&cie_trace
, 0, sizeof(cie_trace
));
2838 cur_trace
= &cie_trace
;
2840 add_cfi_vec
= &cie_cfi_vec
;
2841 cie_cfi_row
= cur_row
= new_cfi_row ();
2843 /* On entry, the Canonical Frame Address is at SP. */
2844 memset(&loc
, 0, sizeof (loc
));
2845 loc
.reg
= dw_stack_pointer_regnum
;
2846 loc
.offset
= INCOMING_FRAME_SP_OFFSET
;
2849 if (targetm
.debug_unwind_info () == UI_DWARF2
2850 || targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
2852 initial_return_save (INCOMING_RETURN_ADDR_RTX
);
2854 /* For a few targets, we have the return address incoming into a
2855 register, but choose a different return column. This will result
2856 in a DW_CFA_register for the return, and an entry in
2857 regs_saved_in_regs to match. If the target later stores that
2858 return address register to the stack, we want to be able to emit
2859 the DW_CFA_offset against the return column, not the intermediate
2860 save register. Save the contents of regs_saved_in_regs so that
2861 we can re-initialize it at the start of each function. */
2862 switch (cie_trace
.regs_saved_in_regs
.length ())
2867 cie_return_save
= ggc_alloc_reg_saved_in_data ();
2868 *cie_return_save
= cie_trace
.regs_saved_in_regs
[0];
2869 cie_trace
.regs_saved_in_regs
.release ();
2881 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2882 state at each location within the function. These notes will be
2883 emitted during pass_final. */
2886 execute_dwarf2_frame (void)
2888 /* The first time we're called, compute the incoming frame state. */
2889 if (cie_cfi_vec
== NULL
)
2892 dwarf2out_alloc_current_fde ();
2894 create_pseudo_cfg ();
2897 create_cfi_notes ();
2901 /* Free all the data we allocated. */
2906 FOR_EACH_VEC_ELT (trace_info
, i
, ti
)
2907 ti
->regs_saved_in_regs
.release ();
2909 trace_info
.release ();
2911 htab_delete (trace_index
);
2917 /* Convert a DWARF call frame info. operation to its string name */
2920 dwarf_cfi_name (unsigned int cfi_opc
)
2922 const char *name
= get_DW_CFA_name (cfi_opc
);
2927 return "DW_CFA_<unknown>";
2930 /* This routine will generate the correct assembly data for a location
2931 description based on a cfi entry with a complex address. */
2934 output_cfa_loc (dw_cfi_ref cfi
, int for_eh
)
2936 dw_loc_descr_ref loc
;
2939 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
2942 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2943 dw2_asm_output_data (1, r
, NULL
);
2944 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
2947 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
2949 /* Output the size of the block. */
2950 size
= size_of_locs (loc
);
2951 dw2_asm_output_data_uleb128 (size
, NULL
);
2953 /* Now output the operations themselves. */
2954 output_loc_sequence (loc
, for_eh
);
2957 /* Similar, but used for .cfi_escape. */
2960 output_cfa_loc_raw (dw_cfi_ref cfi
)
2962 dw_loc_descr_ref loc
;
2965 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
2968 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2969 fprintf (asm_out_file
, "%#x,", r
);
2970 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
2973 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
2975 /* Output the size of the block. */
2976 size
= size_of_locs (loc
);
2977 dw2_asm_output_data_uleb128_raw (size
);
2978 fputc (',', asm_out_file
);
2980 /* Now output the operations themselves. */
2981 output_loc_sequence_raw (loc
);
2984 /* Output a Call Frame Information opcode and its operand(s). */
2987 output_cfi (dw_cfi_ref cfi
, dw_fde_ref fde
, int for_eh
)
2992 if (cfi
->dw_cfi_opc
== DW_CFA_advance_loc
)
2993 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
2994 | (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
& 0x3f)),
2995 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX
,
2996 ((unsigned HOST_WIDE_INT
)
2997 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
));
2998 else if (cfi
->dw_cfi_opc
== DW_CFA_offset
)
3000 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3001 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3002 "DW_CFA_offset, column %#lx", r
);
3003 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3004 dw2_asm_output_data_uleb128 (off
, NULL
);
3006 else if (cfi
->dw_cfi_opc
== DW_CFA_restore
)
3008 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3009 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
3010 "DW_CFA_restore, column %#lx", r
);
3014 dw2_asm_output_data (1, cfi
->dw_cfi_opc
,
3015 "%s", dwarf_cfi_name (cfi
->dw_cfi_opc
));
3017 switch (cfi
->dw_cfi_opc
)
3019 case DW_CFA_set_loc
:
3021 dw2_asm_output_encoded_addr_rtx (
3022 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3023 gen_rtx_SYMBOL_REF (Pmode
, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
),
3026 dw2_asm_output_addr (DWARF2_ADDR_SIZE
,
3027 cfi
->dw_cfi_oprnd1
.dw_cfi_addr
, NULL
);
3028 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3031 case DW_CFA_advance_loc1
:
3032 dw2_asm_output_delta (1, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3033 fde
->dw_fde_current_label
, NULL
);
3034 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3037 case DW_CFA_advance_loc2
:
3038 dw2_asm_output_delta (2, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3039 fde
->dw_fde_current_label
, NULL
);
3040 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3043 case DW_CFA_advance_loc4
:
3044 dw2_asm_output_delta (4, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3045 fde
->dw_fde_current_label
, NULL
);
3046 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3049 case DW_CFA_MIPS_advance_loc8
:
3050 dw2_asm_output_delta (8, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
3051 fde
->dw_fde_current_label
, NULL
);
3052 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
3055 case DW_CFA_offset_extended
:
3056 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3057 dw2_asm_output_data_uleb128 (r
, NULL
);
3058 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3059 dw2_asm_output_data_uleb128 (off
, NULL
);
3062 case DW_CFA_def_cfa
:
3063 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3064 dw2_asm_output_data_uleb128 (r
, NULL
);
3065 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
, NULL
);
3068 case DW_CFA_offset_extended_sf
:
3069 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3070 dw2_asm_output_data_uleb128 (r
, NULL
);
3071 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3072 dw2_asm_output_data_sleb128 (off
, NULL
);
3075 case DW_CFA_def_cfa_sf
:
3076 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3077 dw2_asm_output_data_uleb128 (r
, NULL
);
3078 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3079 dw2_asm_output_data_sleb128 (off
, NULL
);
3082 case DW_CFA_restore_extended
:
3083 case DW_CFA_undefined
:
3084 case DW_CFA_same_value
:
3085 case DW_CFA_def_cfa_register
:
3086 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3087 dw2_asm_output_data_uleb128 (r
, NULL
);
3090 case DW_CFA_register
:
3091 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
3092 dw2_asm_output_data_uleb128 (r
, NULL
);
3093 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, for_eh
);
3094 dw2_asm_output_data_uleb128 (r
, NULL
);
3097 case DW_CFA_def_cfa_offset
:
3098 case DW_CFA_GNU_args_size
:
3099 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
, NULL
);
3102 case DW_CFA_def_cfa_offset_sf
:
3103 off
= div_data_align (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3104 dw2_asm_output_data_sleb128 (off
, NULL
);
3107 case DW_CFA_GNU_window_save
:
3110 case DW_CFA_def_cfa_expression
:
3111 case DW_CFA_expression
:
3112 output_cfa_loc (cfi
, for_eh
);
3115 case DW_CFA_GNU_negative_offset_extended
:
3116 /* Obsoleted by DW_CFA_offset_extended_sf. */
3125 /* Similar, but do it via assembler directives instead. */
3128 output_cfi_directive (FILE *f
, dw_cfi_ref cfi
)
3130 unsigned long r
, r2
;
3132 switch (cfi
->dw_cfi_opc
)
3134 case DW_CFA_advance_loc
:
3135 case DW_CFA_advance_loc1
:
3136 case DW_CFA_advance_loc2
:
3137 case DW_CFA_advance_loc4
:
3138 case DW_CFA_MIPS_advance_loc8
:
3139 case DW_CFA_set_loc
:
3140 /* Should only be created in a code path not followed when emitting
3141 via directives. The assembler is going to take care of this for
3142 us. But this routines is also used for debugging dumps, so
3144 gcc_assert (f
!= asm_out_file
);
3145 fprintf (f
, "\t.cfi_advance_loc\n");
3149 case DW_CFA_offset_extended
:
3150 case DW_CFA_offset_extended_sf
:
3151 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3152 fprintf (f
, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC
"\n",
3153 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3156 case DW_CFA_restore
:
3157 case DW_CFA_restore_extended
:
3158 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3159 fprintf (f
, "\t.cfi_restore %lu\n", r
);
3162 case DW_CFA_undefined
:
3163 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3164 fprintf (f
, "\t.cfi_undefined %lu\n", r
);
3167 case DW_CFA_same_value
:
3168 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3169 fprintf (f
, "\t.cfi_same_value %lu\n", r
);
3172 case DW_CFA_def_cfa
:
3173 case DW_CFA_def_cfa_sf
:
3174 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3175 fprintf (f
, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC
"\n",
3176 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
3179 case DW_CFA_def_cfa_register
:
3180 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3181 fprintf (f
, "\t.cfi_def_cfa_register %lu\n", r
);
3184 case DW_CFA_register
:
3185 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
3186 r2
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, 1);
3187 fprintf (f
, "\t.cfi_register %lu, %lu\n", r
, r2
);
3190 case DW_CFA_def_cfa_offset
:
3191 case DW_CFA_def_cfa_offset_sf
:
3192 fprintf (f
, "\t.cfi_def_cfa_offset "
3193 HOST_WIDE_INT_PRINT_DEC
"\n",
3194 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3197 case DW_CFA_remember_state
:
3198 fprintf (f
, "\t.cfi_remember_state\n");
3200 case DW_CFA_restore_state
:
3201 fprintf (f
, "\t.cfi_restore_state\n");
3204 case DW_CFA_GNU_args_size
:
3205 if (f
== asm_out_file
)
3207 fprintf (f
, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size
);
3208 dw2_asm_output_data_uleb128_raw (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3210 fprintf (f
, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC
,
3211 ASM_COMMENT_START
, cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3216 fprintf (f
, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC
"\n",
3217 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
3221 case DW_CFA_GNU_window_save
:
3222 fprintf (f
, "\t.cfi_window_save\n");
3225 case DW_CFA_def_cfa_expression
:
3226 if (f
!= asm_out_file
)
3228 fprintf (f
, "\t.cfi_def_cfa_expression ...\n");
3232 case DW_CFA_expression
:
3233 if (f
!= asm_out_file
)
3235 fprintf (f
, "\t.cfi_cfa_expression ...\n");
3238 fprintf (f
, "\t.cfi_escape %#x,", cfi
->dw_cfi_opc
);
3239 output_cfa_loc_raw (cfi
);
3249 dwarf2out_emit_cfi (dw_cfi_ref cfi
)
3251 if (dwarf2out_do_cfi_asm ())
3252 output_cfi_directive (asm_out_file
, cfi
);
3256 dump_cfi_row (FILE *f
, dw_cfi_row
*row
)
3264 dw_cfa_location dummy
;
3265 memset(&dummy
, 0, sizeof(dummy
));
3266 dummy
.reg
= INVALID_REGNUM
;
3267 cfi
= def_cfa_0 (&dummy
, &row
->cfa
);
3269 output_cfi_directive (f
, cfi
);
3271 FOR_EACH_VEC_SAFE_ELT (row
->reg_save
, i
, cfi
)
3273 output_cfi_directive (f
, cfi
);
3276 void debug_cfi_row (dw_cfi_row
*row
);
3279 debug_cfi_row (dw_cfi_row
*row
)
3281 dump_cfi_row (stderr
, row
);
3285 /* Save the result of dwarf2out_do_frame across PCH.
3286 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3287 static GTY(()) signed char saved_do_cfi_asm
= 0;
3289 /* Decide whether we want to emit frame unwind information for the current
3290 translation unit. */
3293 dwarf2out_do_frame (void)
3295 /* We want to emit correct CFA location expressions or lists, so we
3296 have to return true if we're going to output debug info, even if
3297 we're not going to output frame or unwind info. */
3298 if (write_symbols
== DWARF2_DEBUG
|| write_symbols
== VMS_AND_DWARF2_DEBUG
)
3301 if (saved_do_cfi_asm
> 0)
3304 if (targetm
.debug_unwind_info () == UI_DWARF2
)
3307 if ((flag_unwind_tables
|| flag_exceptions
)
3308 && targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
3314 /* Decide whether to emit frame unwind via assembler directives. */
3317 dwarf2out_do_cfi_asm (void)
3321 if (saved_do_cfi_asm
!= 0)
3322 return saved_do_cfi_asm
> 0;
3324 /* Assume failure for a moment. */
3325 saved_do_cfi_asm
= -1;
3327 if (!flag_dwarf2_cfi_asm
|| !dwarf2out_do_frame ())
3329 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE
)
3332 /* Make sure the personality encoding is one the assembler can support.
3333 In particular, aligned addresses can't be handled. */
3334 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3335 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3337 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3338 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3341 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3342 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3343 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3344 && !flag_unwind_tables
&& !flag_exceptions
3345 && targetm_common
.except_unwind_info (&global_options
) != UI_DWARF2
)
3349 saved_do_cfi_asm
= 1;
3354 gate_dwarf2_frame (void)
3356 #ifndef HAVE_prologue
3357 /* Targets which still implement the prologue in assembler text
3358 cannot use the generic dwarf2 unwinding. */
3362 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3363 from the optimized shrink-wrapping annotations that we will compute.
3364 For now, only produce the CFI notes for dwarf2. */
3365 return dwarf2out_do_frame ();
3368 struct rtl_opt_pass pass_dwarf2_frame
=
3372 "dwarf2", /* name */
3373 OPTGROUP_NONE
, /* optinfo_flags */
3374 gate_dwarf2_frame
, /* gate */
3375 execute_dwarf2_frame
, /* execute */
3378 0, /* static_pass_number */
3379 TV_FINAL
, /* tv_id */
3380 0, /* properties_required */
3381 0, /* properties_provided */
3382 0, /* properties_destroyed */
3383 0, /* todo_flags_start */
3384 0 /* todo_flags_finish */
3388 #include "gt-dwarf2cfi.h"