2015-06-11 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / dwarf2cfi.c
blob1c91d975e6ef87090474a172ffe01232497b9f7f
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "version.h"
25 #include "flags.h"
26 #include "rtl.h"
27 #include "input.h"
28 #include "alias.h"
29 #include "symtab.h"
30 #include "tree.h"
31 #include "stor-layout.h"
32 #include "hard-reg-set.h"
33 #include "function.h"
34 #include "cfgbuild.h"
35 #include "dwarf2.h"
36 #include "dwarf2out.h"
37 #include "dwarf2asm.h"
38 #include "tm_p.h"
39 #include "target.h"
40 #include "common/common-target.h"
41 #include "tree-pass.h"
43 #include "except.h" /* expand_builtin_dwarf_sp_column */
44 #include "insn-config.h"
45 #include "expmed.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "calls.h"
49 #include "emit-rtl.h"
50 #include "varasm.h"
51 #include "stmt.h"
52 #include "expr.h" /* init_return_column_size */
53 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
54 #include "output.h" /* asm_out_file */
55 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
58 /* ??? Poison these here until it can be done generically. They've been
59 totally replaced in this file; make sure it stays that way. */
60 #undef DWARF2_UNWIND_INFO
61 #undef DWARF2_FRAME_INFO
62 #if (GCC_VERSION >= 3000)
63 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
64 #endif
66 #ifndef INCOMING_RETURN_ADDR_RTX
67 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
68 #endif
70 /* Maximum size (in bytes) of an artificially generated label. */
71 #define MAX_ARTIFICIAL_LABEL_BYTES 30
73 /* A collected description of an entire row of the abstract CFI table. */
74 typedef struct GTY(()) dw_cfi_row_struct
76 /* The expression that computes the CFA, expressed in two different ways.
77 The CFA member for the simple cases, and the full CFI expression for
78 the complex cases. The later will be a DW_CFA_cfa_expression. */
79 dw_cfa_location cfa;
80 dw_cfi_ref cfa_cfi;
82 /* The expressions for any register column that is saved. */
83 cfi_vec reg_save;
84 } dw_cfi_row;
86 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
87 typedef struct GTY(()) reg_saved_in_data_struct {
88 rtx orig_reg;
89 rtx saved_in_reg;
90 } reg_saved_in_data;
93 /* Since we no longer have a proper CFG, we're going to create a facsimile
94 of one on the fly while processing the frame-related insns.
96 We create dw_trace_info structures for each extended basic block beginning
97 and ending at a "save point". Save points are labels, barriers, certain
98 notes, and of course the beginning and end of the function.
100 As we encounter control transfer insns, we propagate the "current"
101 row state across the edges to the starts of traces. When checking is
102 enabled, we validate that we propagate the same data from all sources.
104 All traces are members of the TRACE_INFO array, in the order in which
105 they appear in the instruction stream.
107 All save points are present in the TRACE_INDEX hash, mapping the insn
108 starting a trace to the dw_trace_info describing the trace. */
110 typedef struct
112 /* The insn that begins the trace. */
113 rtx_insn *head;
115 /* The row state at the beginning and end of the trace. */
116 dw_cfi_row *beg_row, *end_row;
118 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
119 while scanning insns. However, the args_size value is irrelevant at
120 any point except can_throw_internal_p insns. Therefore the "delay"
121 sizes the values that must actually be emitted for this trace. */
122 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
123 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
125 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
126 rtx_insn *eh_head;
128 /* The following variables contain data used in interpreting frame related
129 expressions. These are not part of the "real" row state as defined by
130 Dwarf, but it seems like they need to be propagated into a trace in case
131 frame related expressions have been sunk. */
132 /* ??? This seems fragile. These variables are fragments of a larger
133 expression. If we do not keep the entire expression together, we risk
134 not being able to put it together properly. Consider forcing targets
135 to generate self-contained expressions and dropping all of the magic
136 interpretation code in this file. Or at least refusing to shrink wrap
137 any frame related insn that doesn't contain a complete expression. */
139 /* The register used for saving registers to the stack, and its offset
140 from the CFA. */
141 dw_cfa_location cfa_store;
143 /* A temporary register holding an integral value used in adjusting SP
144 or setting up the store_reg. The "offset" field holds the integer
145 value, not an offset. */
146 dw_cfa_location cfa_temp;
148 /* A set of registers saved in other registers. This is the inverse of
149 the row->reg_save info, if the entry is a DW_CFA_register. This is
150 implemented as a flat array because it normally contains zero or 1
151 entry, depending on the target. IA-64 is the big spender here, using
152 a maximum of 5 entries. */
153 vec<reg_saved_in_data> regs_saved_in_regs;
155 /* An identifier for this trace. Used only for debugging dumps. */
156 unsigned id;
158 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
159 bool switch_sections;
161 /* True if we've seen different values incoming to beg_true_args_size. */
162 bool args_size_undefined;
163 } dw_trace_info;
166 typedef dw_trace_info *dw_trace_info_ref;
169 /* Hashtable helpers. */
171 struct trace_info_hasher : typed_noop_remove <dw_trace_info>
173 typedef dw_trace_info *value_type;
174 typedef dw_trace_info *compare_type;
175 static inline hashval_t hash (const dw_trace_info *);
176 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
179 inline hashval_t
180 trace_info_hasher::hash (const dw_trace_info *ti)
182 return INSN_UID (ti->head);
185 inline bool
186 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
188 return a->head == b->head;
192 /* The variables making up the pseudo-cfg, as described above. */
193 static vec<dw_trace_info> trace_info;
194 static vec<dw_trace_info_ref> trace_work_list;
195 static hash_table<trace_info_hasher> *trace_index;
197 /* A vector of call frame insns for the CIE. */
198 cfi_vec cie_cfi_vec;
200 /* The state of the first row of the FDE table, which includes the
201 state provided by the CIE. */
202 static GTY(()) dw_cfi_row *cie_cfi_row;
204 static GTY(()) reg_saved_in_data *cie_return_save;
206 static GTY(()) unsigned long dwarf2out_cfi_label_num;
208 /* The insn after which a new CFI note should be emitted. */
209 static rtx_insn *add_cfi_insn;
211 /* When non-null, add_cfi will add the CFI to this vector. */
212 static cfi_vec *add_cfi_vec;
214 /* The current instruction trace. */
215 static dw_trace_info *cur_trace;
217 /* The current, i.e. most recently generated, row of the CFI table. */
218 static dw_cfi_row *cur_row;
220 /* A copy of the current CFA, for use during the processing of a
221 single insn. */
222 static dw_cfa_location *cur_cfa;
224 /* We delay emitting a register save until either (a) we reach the end
225 of the prologue or (b) the register is clobbered. This clusters
226 register saves so that there are fewer pc advances. */
228 typedef struct {
229 rtx reg;
230 rtx saved_reg;
231 HOST_WIDE_INT cfa_offset;
232 } queued_reg_save;
235 static vec<queued_reg_save> queued_reg_saves;
237 /* True if any CFI directives were emitted at the current insn. */
238 static bool any_cfis_emitted;
240 /* Short-hand for commonly used register numbers. */
241 static unsigned dw_stack_pointer_regnum;
242 static unsigned dw_frame_pointer_regnum;
244 /* Hook used by __throw. */
247 expand_builtin_dwarf_sp_column (void)
249 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
250 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
253 /* MEM is a memory reference for the register size table, each element of
254 which has mode MODE. Initialize column C as a return address column. */
256 static void
257 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
259 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
260 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
261 emit_move_insn (adjust_address (mem, mode, offset),
262 gen_int_mode (size, mode));
265 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
266 init_one_dwarf_reg_size to communicate on what has been done by the
267 latter. */
269 typedef struct
271 /* Whether the dwarf return column was initialized. */
272 bool wrote_return_column;
274 /* For each hard register REGNO, whether init_one_dwarf_reg_size
275 was given REGNO to process already. */
276 bool processed_regno [FIRST_PSEUDO_REGISTER];
278 } init_one_dwarf_reg_state;
280 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
281 initialize the dwarf register size table entry corresponding to register
282 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
283 use for the size entry to initialize, and INIT_STATE is the communication
284 datastructure conveying what we're doing to our caller. */
286 static
287 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
288 rtx table, machine_mode slotmode,
289 init_one_dwarf_reg_state *init_state)
291 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
292 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
293 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
295 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
296 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
298 init_state->processed_regno[regno] = true;
300 if (rnum >= DWARF_FRAME_REGISTERS)
301 return;
303 if (dnum == DWARF_FRAME_RETURN_COLUMN)
305 if (regmode == VOIDmode)
306 return;
307 init_state->wrote_return_column = true;
310 if (slotoffset < 0)
311 return;
313 emit_move_insn (adjust_address (table, slotmode, slotoffset),
314 gen_int_mode (regsize, slotmode));
317 /* Generate code to initialize the dwarf register size table located
318 at the provided ADDRESS. */
320 void
321 expand_builtin_init_dwarf_reg_sizes (tree address)
323 unsigned int i;
324 machine_mode mode = TYPE_MODE (char_type_node);
325 rtx addr = expand_normal (address);
326 rtx mem = gen_rtx_MEM (BLKmode, addr);
328 init_one_dwarf_reg_state init_state;
330 memset ((char *)&init_state, 0, sizeof (init_state));
332 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
334 machine_mode save_mode;
335 rtx span;
337 /* No point in processing a register multiple times. This could happen
338 with register spans, e.g. when a reg is first processed as a piece of
339 a span, then as a register on its own later on. */
341 if (init_state.processed_regno[i])
342 continue;
344 save_mode = targetm.dwarf_frame_reg_mode (i);
345 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
347 if (!span)
348 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
349 else
351 for (int si = 0; si < XVECLEN (span, 0); si++)
353 rtx reg = XVECEXP (span, 0, si);
355 init_one_dwarf_reg_size
356 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
361 if (!init_state.wrote_return_column)
362 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
364 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
365 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
366 #endif
368 targetm.init_dwarf_reg_sizes_extra (address);
372 static dw_trace_info *
373 get_trace_info (rtx_insn *insn)
375 dw_trace_info dummy;
376 dummy.head = insn;
377 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
380 static bool
381 save_point_p (rtx_insn *insn)
383 /* Labels, except those that are really jump tables. */
384 if (LABEL_P (insn))
385 return inside_basic_block_p (insn);
387 /* We split traces at the prologue/epilogue notes because those
388 are points at which the unwind info is usually stable. This
389 makes it easier to find spots with identical unwind info so
390 that we can use remember/restore_state opcodes. */
391 if (NOTE_P (insn))
392 switch (NOTE_KIND (insn))
394 case NOTE_INSN_PROLOGUE_END:
395 case NOTE_INSN_EPILOGUE_BEG:
396 return true;
399 return false;
402 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
404 static inline HOST_WIDE_INT
405 div_data_align (HOST_WIDE_INT off)
407 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
408 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
409 return r;
412 /* Return true if we need a signed version of a given opcode
413 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
415 static inline bool
416 need_data_align_sf_opcode (HOST_WIDE_INT off)
418 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
421 /* Return a pointer to a newly allocated Call Frame Instruction. */
423 static inline dw_cfi_ref
424 new_cfi (void)
426 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
428 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
429 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
431 return cfi;
434 /* Return a newly allocated CFI row, with no defined data. */
436 static dw_cfi_row *
437 new_cfi_row (void)
439 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
441 row->cfa.reg = INVALID_REGNUM;
443 return row;
446 /* Return a copy of an existing CFI row. */
448 static dw_cfi_row *
449 copy_cfi_row (dw_cfi_row *src)
451 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
453 *dst = *src;
454 dst->reg_save = vec_safe_copy (src->reg_save);
456 return dst;
459 /* Generate a new label for the CFI info to refer to. */
461 static char *
462 dwarf2out_cfi_label (void)
464 int num = dwarf2out_cfi_label_num++;
465 char label[20];
467 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
469 return xstrdup (label);
472 /* Add CFI either to the current insn stream or to a vector, or both. */
474 static void
475 add_cfi (dw_cfi_ref cfi)
477 any_cfis_emitted = true;
479 if (add_cfi_insn != NULL)
481 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
482 NOTE_CFI (add_cfi_insn) = cfi;
485 if (add_cfi_vec != NULL)
486 vec_safe_push (*add_cfi_vec, cfi);
489 static void
490 add_cfi_args_size (HOST_WIDE_INT size)
492 dw_cfi_ref cfi = new_cfi ();
494 /* While we can occasionally have args_size < 0 internally, this state
495 should not persist at a point we actually need an opcode. */
496 gcc_assert (size >= 0);
498 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
499 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
501 add_cfi (cfi);
504 static void
505 add_cfi_restore (unsigned reg)
507 dw_cfi_ref cfi = new_cfi ();
509 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
510 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
512 add_cfi (cfi);
515 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
516 that the register column is no longer saved. */
518 static void
519 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
521 if (vec_safe_length (row->reg_save) <= column)
522 vec_safe_grow_cleared (row->reg_save, column + 1);
523 (*row->reg_save)[column] = cfi;
526 /* This function fills in aa dw_cfa_location structure from a dwarf location
527 descriptor sequence. */
529 static void
530 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
532 struct dw_loc_descr_node *ptr;
533 cfa->offset = 0;
534 cfa->base_offset = 0;
535 cfa->indirect = 0;
536 cfa->reg = -1;
538 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
540 enum dwarf_location_atom op = ptr->dw_loc_opc;
542 switch (op)
544 case DW_OP_reg0:
545 case DW_OP_reg1:
546 case DW_OP_reg2:
547 case DW_OP_reg3:
548 case DW_OP_reg4:
549 case DW_OP_reg5:
550 case DW_OP_reg6:
551 case DW_OP_reg7:
552 case DW_OP_reg8:
553 case DW_OP_reg9:
554 case DW_OP_reg10:
555 case DW_OP_reg11:
556 case DW_OP_reg12:
557 case DW_OP_reg13:
558 case DW_OP_reg14:
559 case DW_OP_reg15:
560 case DW_OP_reg16:
561 case DW_OP_reg17:
562 case DW_OP_reg18:
563 case DW_OP_reg19:
564 case DW_OP_reg20:
565 case DW_OP_reg21:
566 case DW_OP_reg22:
567 case DW_OP_reg23:
568 case DW_OP_reg24:
569 case DW_OP_reg25:
570 case DW_OP_reg26:
571 case DW_OP_reg27:
572 case DW_OP_reg28:
573 case DW_OP_reg29:
574 case DW_OP_reg30:
575 case DW_OP_reg31:
576 cfa->reg = op - DW_OP_reg0;
577 break;
578 case DW_OP_regx:
579 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
580 break;
581 case DW_OP_breg0:
582 case DW_OP_breg1:
583 case DW_OP_breg2:
584 case DW_OP_breg3:
585 case DW_OP_breg4:
586 case DW_OP_breg5:
587 case DW_OP_breg6:
588 case DW_OP_breg7:
589 case DW_OP_breg8:
590 case DW_OP_breg9:
591 case DW_OP_breg10:
592 case DW_OP_breg11:
593 case DW_OP_breg12:
594 case DW_OP_breg13:
595 case DW_OP_breg14:
596 case DW_OP_breg15:
597 case DW_OP_breg16:
598 case DW_OP_breg17:
599 case DW_OP_breg18:
600 case DW_OP_breg19:
601 case DW_OP_breg20:
602 case DW_OP_breg21:
603 case DW_OP_breg22:
604 case DW_OP_breg23:
605 case DW_OP_breg24:
606 case DW_OP_breg25:
607 case DW_OP_breg26:
608 case DW_OP_breg27:
609 case DW_OP_breg28:
610 case DW_OP_breg29:
611 case DW_OP_breg30:
612 case DW_OP_breg31:
613 cfa->reg = op - DW_OP_breg0;
614 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
615 break;
616 case DW_OP_bregx:
617 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
618 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
619 break;
620 case DW_OP_deref:
621 cfa->indirect = 1;
622 break;
623 case DW_OP_plus_uconst:
624 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
625 break;
626 default:
627 gcc_unreachable ();
632 /* Find the previous value for the CFA, iteratively. CFI is the opcode
633 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
634 one level of remember/restore state processing. */
636 void
637 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
639 switch (cfi->dw_cfi_opc)
641 case DW_CFA_def_cfa_offset:
642 case DW_CFA_def_cfa_offset_sf:
643 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
644 break;
645 case DW_CFA_def_cfa_register:
646 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
647 break;
648 case DW_CFA_def_cfa:
649 case DW_CFA_def_cfa_sf:
650 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
651 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
652 break;
653 case DW_CFA_def_cfa_expression:
654 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
655 break;
657 case DW_CFA_remember_state:
658 gcc_assert (!remember->in_use);
659 *remember = *loc;
660 remember->in_use = 1;
661 break;
662 case DW_CFA_restore_state:
663 gcc_assert (remember->in_use);
664 *loc = *remember;
665 remember->in_use = 0;
666 break;
668 default:
669 break;
673 /* Determine if two dw_cfa_location structures define the same data. */
675 bool
676 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
678 return (loc1->reg == loc2->reg
679 && loc1->offset == loc2->offset
680 && loc1->indirect == loc2->indirect
681 && (loc1->indirect == 0
682 || loc1->base_offset == loc2->base_offset));
685 /* Determine if two CFI operands are identical. */
687 static bool
688 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
690 switch (t)
692 case dw_cfi_oprnd_unused:
693 return true;
694 case dw_cfi_oprnd_reg_num:
695 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
696 case dw_cfi_oprnd_offset:
697 return a->dw_cfi_offset == b->dw_cfi_offset;
698 case dw_cfi_oprnd_addr:
699 return (a->dw_cfi_addr == b->dw_cfi_addr
700 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
701 case dw_cfi_oprnd_loc:
702 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
704 gcc_unreachable ();
707 /* Determine if two CFI entries are identical. */
709 static bool
710 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
712 enum dwarf_call_frame_info opc;
714 /* Make things easier for our callers, including missing operands. */
715 if (a == b)
716 return true;
717 if (a == NULL || b == NULL)
718 return false;
720 /* Obviously, the opcodes must match. */
721 opc = a->dw_cfi_opc;
722 if (opc != b->dw_cfi_opc)
723 return false;
725 /* Compare the two operands, re-using the type of the operands as
726 already exposed elsewhere. */
727 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
728 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
729 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
730 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
733 /* Determine if two CFI_ROW structures are identical. */
735 static bool
736 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
738 size_t i, n_a, n_b, n_max;
740 if (a->cfa_cfi)
742 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
743 return false;
745 else if (!cfa_equal_p (&a->cfa, &b->cfa))
746 return false;
748 n_a = vec_safe_length (a->reg_save);
749 n_b = vec_safe_length (b->reg_save);
750 n_max = MAX (n_a, n_b);
752 for (i = 0; i < n_max; ++i)
754 dw_cfi_ref r_a = NULL, r_b = NULL;
756 if (i < n_a)
757 r_a = (*a->reg_save)[i];
758 if (i < n_b)
759 r_b = (*b->reg_save)[i];
761 if (!cfi_equal_p (r_a, r_b))
762 return false;
765 return true;
768 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
769 what opcode to emit. Returns the CFI opcode to effect the change, or
770 NULL if NEW_CFA == OLD_CFA. */
772 static dw_cfi_ref
773 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
775 dw_cfi_ref cfi;
777 /* If nothing changed, no need to issue any call frame instructions. */
778 if (cfa_equal_p (old_cfa, new_cfa))
779 return NULL;
781 cfi = new_cfi ();
783 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
785 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
786 the CFA register did not change but the offset did. The data
787 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
788 in the assembler via the .cfi_def_cfa_offset directive. */
789 if (new_cfa->offset < 0)
790 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
791 else
792 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
793 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
795 else if (new_cfa->offset == old_cfa->offset
796 && old_cfa->reg != INVALID_REGNUM
797 && !new_cfa->indirect
798 && !old_cfa->indirect)
800 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
801 indicating the CFA register has changed to <register> but the
802 offset has not changed. */
803 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
804 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
806 else if (new_cfa->indirect == 0)
808 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
809 indicating the CFA register has changed to <register> with
810 the specified offset. The data factoring for DW_CFA_def_cfa_sf
811 happens in output_cfi, or in the assembler via the .cfi_def_cfa
812 directive. */
813 if (new_cfa->offset < 0)
814 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
815 else
816 cfi->dw_cfi_opc = DW_CFA_def_cfa;
817 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
818 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
820 else
822 /* Construct a DW_CFA_def_cfa_expression instruction to
823 calculate the CFA using a full location expression since no
824 register-offset pair is available. */
825 struct dw_loc_descr_node *loc_list;
827 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
828 loc_list = build_cfa_loc (new_cfa, 0);
829 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
832 return cfi;
835 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
837 static void
838 def_cfa_1 (dw_cfa_location *new_cfa)
840 dw_cfi_ref cfi;
842 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
843 cur_trace->cfa_store.offset = new_cfa->offset;
845 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
846 if (cfi)
848 cur_row->cfa = *new_cfa;
849 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
850 ? cfi : NULL);
852 add_cfi (cfi);
856 /* Add the CFI for saving a register. REG is the CFA column number.
857 If SREG is -1, the register is saved at OFFSET from the CFA;
858 otherwise it is saved in SREG. */
860 static void
861 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
863 dw_fde_ref fde = cfun ? cfun->fde : NULL;
864 dw_cfi_ref cfi = new_cfi ();
866 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
868 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
869 if (fde
870 && fde->stack_realign
871 && sreg == INVALID_REGNUM)
873 cfi->dw_cfi_opc = DW_CFA_expression;
874 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
875 cfi->dw_cfi_oprnd2.dw_cfi_loc
876 = build_cfa_aligned_loc (&cur_row->cfa, offset,
877 fde->stack_realignment);
879 else if (sreg == INVALID_REGNUM)
881 if (need_data_align_sf_opcode (offset))
882 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
883 else if (reg & ~0x3f)
884 cfi->dw_cfi_opc = DW_CFA_offset_extended;
885 else
886 cfi->dw_cfi_opc = DW_CFA_offset;
887 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
889 else if (sreg == reg)
891 /* While we could emit something like DW_CFA_same_value or
892 DW_CFA_restore, we never expect to see something like that
893 in a prologue. This is more likely to be a bug. A backend
894 can always bypass this by using REG_CFA_RESTORE directly. */
895 gcc_unreachable ();
897 else
899 cfi->dw_cfi_opc = DW_CFA_register;
900 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
903 add_cfi (cfi);
904 update_row_reg_save (cur_row, reg, cfi);
907 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
908 and adjust data structures to match. */
910 static void
911 notice_args_size (rtx_insn *insn)
913 HOST_WIDE_INT args_size, delta;
914 rtx note;
916 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
917 if (note == NULL)
918 return;
920 args_size = INTVAL (XEXP (note, 0));
921 delta = args_size - cur_trace->end_true_args_size;
922 if (delta == 0)
923 return;
925 cur_trace->end_true_args_size = args_size;
927 /* If the CFA is computed off the stack pointer, then we must adjust
928 the computation of the CFA as well. */
929 if (cur_cfa->reg == dw_stack_pointer_regnum)
931 gcc_assert (!cur_cfa->indirect);
933 /* Convert a change in args_size (always a positive in the
934 direction of stack growth) to a change in stack pointer. */
935 if (!STACK_GROWS_DOWNWARD)
936 delta = -delta;
938 cur_cfa->offset += delta;
942 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
943 data within the trace related to EH insns and args_size. */
945 static void
946 notice_eh_throw (rtx_insn *insn)
948 HOST_WIDE_INT args_size;
950 args_size = cur_trace->end_true_args_size;
951 if (cur_trace->eh_head == NULL)
953 cur_trace->eh_head = insn;
954 cur_trace->beg_delay_args_size = args_size;
955 cur_trace->end_delay_args_size = args_size;
957 else if (cur_trace->end_delay_args_size != args_size)
959 cur_trace->end_delay_args_size = args_size;
961 /* ??? If the CFA is the stack pointer, search backward for the last
962 CFI note and insert there. Given that the stack changed for the
963 args_size change, there *must* be such a note in between here and
964 the last eh insn. */
965 add_cfi_args_size (args_size);
969 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
970 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
971 used in places where rtl is prohibited. */
973 static inline unsigned
974 dwf_regno (const_rtx reg)
976 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
977 return DWARF_FRAME_REGNUM (REGNO (reg));
980 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
982 static bool
983 compare_reg_or_pc (rtx x, rtx y)
985 if (REG_P (x) && REG_P (y))
986 return REGNO (x) == REGNO (y);
987 return x == y;
990 /* Record SRC as being saved in DEST. DEST may be null to delete an
991 existing entry. SRC may be a register or PC_RTX. */
993 static void
994 record_reg_saved_in_reg (rtx dest, rtx src)
996 reg_saved_in_data *elt;
997 size_t i;
999 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1000 if (compare_reg_or_pc (elt->orig_reg, src))
1002 if (dest == NULL)
1003 cur_trace->regs_saved_in_regs.unordered_remove (i);
1004 else
1005 elt->saved_in_reg = dest;
1006 return;
1009 if (dest == NULL)
1010 return;
1012 reg_saved_in_data e = {src, dest};
1013 cur_trace->regs_saved_in_regs.safe_push (e);
1016 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1017 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1019 static void
1020 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1022 queued_reg_save *q;
1023 queued_reg_save e = {reg, sreg, offset};
1024 size_t i;
1026 /* Duplicates waste space, but it's also necessary to remove them
1027 for correctness, since the queue gets output in reverse order. */
1028 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1029 if (compare_reg_or_pc (q->reg, reg))
1031 *q = e;
1032 return;
1035 queued_reg_saves.safe_push (e);
1038 /* Output all the entries in QUEUED_REG_SAVES. */
1040 static void
1041 dwarf2out_flush_queued_reg_saves (void)
1043 queued_reg_save *q;
1044 size_t i;
1046 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1048 unsigned int reg, sreg;
1050 record_reg_saved_in_reg (q->saved_reg, q->reg);
1052 if (q->reg == pc_rtx)
1053 reg = DWARF_FRAME_RETURN_COLUMN;
1054 else
1055 reg = dwf_regno (q->reg);
1056 if (q->saved_reg)
1057 sreg = dwf_regno (q->saved_reg);
1058 else
1059 sreg = INVALID_REGNUM;
1060 reg_save (reg, sreg, q->cfa_offset);
1063 queued_reg_saves.truncate (0);
1066 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1067 location for? Or, does it clobber a register which we've previously
1068 said that some other register is saved in, and for which we now
1069 have a new location for? */
1071 static bool
1072 clobbers_queued_reg_save (const_rtx insn)
1074 queued_reg_save *q;
1075 size_t iq;
1077 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1079 size_t ir;
1080 reg_saved_in_data *rir;
1082 if (modified_in_p (q->reg, insn))
1083 return true;
1085 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1086 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1087 && modified_in_p (rir->saved_in_reg, insn))
1088 return true;
1091 return false;
1094 /* What register, if any, is currently saved in REG? */
1096 static rtx
1097 reg_saved_in (rtx reg)
1099 unsigned int regn = REGNO (reg);
1100 queued_reg_save *q;
1101 reg_saved_in_data *rir;
1102 size_t i;
1104 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1105 if (q->saved_reg && regn == REGNO (q->saved_reg))
1106 return q->reg;
1108 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1109 if (regn == REGNO (rir->saved_in_reg))
1110 return rir->orig_reg;
1112 return NULL_RTX;
1115 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1117 static void
1118 dwarf2out_frame_debug_def_cfa (rtx pat)
1120 memset (cur_cfa, 0, sizeof (*cur_cfa));
1122 if (GET_CODE (pat) == PLUS)
1124 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1125 pat = XEXP (pat, 0);
1127 if (MEM_P (pat))
1129 cur_cfa->indirect = 1;
1130 pat = XEXP (pat, 0);
1131 if (GET_CODE (pat) == PLUS)
1133 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1134 pat = XEXP (pat, 0);
1137 /* ??? If this fails, we could be calling into the _loc functions to
1138 define a full expression. So far no port does that. */
1139 gcc_assert (REG_P (pat));
1140 cur_cfa->reg = dwf_regno (pat);
1143 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1145 static void
1146 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1148 rtx src, dest;
1150 gcc_assert (GET_CODE (pat) == SET);
1151 dest = XEXP (pat, 0);
1152 src = XEXP (pat, 1);
1154 switch (GET_CODE (src))
1156 case PLUS:
1157 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1158 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1159 break;
1161 case REG:
1162 break;
1164 default:
1165 gcc_unreachable ();
1168 cur_cfa->reg = dwf_regno (dest);
1169 gcc_assert (cur_cfa->indirect == 0);
1172 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1174 static void
1175 dwarf2out_frame_debug_cfa_offset (rtx set)
1177 HOST_WIDE_INT offset;
1178 rtx src, addr, span;
1179 unsigned int sregno;
1181 src = XEXP (set, 1);
1182 addr = XEXP (set, 0);
1183 gcc_assert (MEM_P (addr));
1184 addr = XEXP (addr, 0);
1186 /* As documented, only consider extremely simple addresses. */
1187 switch (GET_CODE (addr))
1189 case REG:
1190 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1191 offset = -cur_cfa->offset;
1192 break;
1193 case PLUS:
1194 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1195 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1196 break;
1197 default:
1198 gcc_unreachable ();
1201 if (src == pc_rtx)
1203 span = NULL;
1204 sregno = DWARF_FRAME_RETURN_COLUMN;
1206 else
1208 span = targetm.dwarf_register_span (src);
1209 sregno = dwf_regno (src);
1212 /* ??? We'd like to use queue_reg_save, but we need to come up with
1213 a different flushing heuristic for epilogues. */
1214 if (!span)
1215 reg_save (sregno, INVALID_REGNUM, offset);
1216 else
1218 /* We have a PARALLEL describing where the contents of SRC live.
1219 Adjust the offset for each piece of the PARALLEL. */
1220 HOST_WIDE_INT span_offset = offset;
1222 gcc_assert (GET_CODE (span) == PARALLEL);
1224 const int par_len = XVECLEN (span, 0);
1225 for (int par_index = 0; par_index < par_len; par_index++)
1227 rtx elem = XVECEXP (span, 0, par_index);
1228 sregno = dwf_regno (src);
1229 reg_save (sregno, INVALID_REGNUM, span_offset);
1230 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1235 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1237 static void
1238 dwarf2out_frame_debug_cfa_register (rtx set)
1240 rtx src, dest;
1241 unsigned sregno, dregno;
1243 src = XEXP (set, 1);
1244 dest = XEXP (set, 0);
1246 record_reg_saved_in_reg (dest, src);
1247 if (src == pc_rtx)
1248 sregno = DWARF_FRAME_RETURN_COLUMN;
1249 else
1250 sregno = dwf_regno (src);
1252 dregno = dwf_regno (dest);
1254 /* ??? We'd like to use queue_reg_save, but we need to come up with
1255 a different flushing heuristic for epilogues. */
1256 reg_save (sregno, dregno, 0);
1259 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1261 static void
1262 dwarf2out_frame_debug_cfa_expression (rtx set)
1264 rtx src, dest, span;
1265 dw_cfi_ref cfi = new_cfi ();
1266 unsigned regno;
1268 dest = SET_DEST (set);
1269 src = SET_SRC (set);
1271 gcc_assert (REG_P (src));
1272 gcc_assert (MEM_P (dest));
1274 span = targetm.dwarf_register_span (src);
1275 gcc_assert (!span);
1277 regno = dwf_regno (src);
1279 cfi->dw_cfi_opc = DW_CFA_expression;
1280 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1281 cfi->dw_cfi_oprnd2.dw_cfi_loc
1282 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1283 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1285 /* ??? We'd like to use queue_reg_save, were the interface different,
1286 and, as above, we could manage flushing for epilogues. */
1287 add_cfi (cfi);
1288 update_row_reg_save (cur_row, regno, cfi);
1291 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1293 static void
1294 dwarf2out_frame_debug_cfa_restore (rtx reg)
1296 gcc_assert (REG_P (reg));
1298 rtx span = targetm.dwarf_register_span (reg);
1299 if (!span)
1301 unsigned int regno = dwf_regno (reg);
1302 add_cfi_restore (regno);
1303 update_row_reg_save (cur_row, regno, NULL);
1305 else
1307 /* We have a PARALLEL describing where the contents of REG live.
1308 Restore the register for each piece of the PARALLEL. */
1309 gcc_assert (GET_CODE (span) == PARALLEL);
1311 const int par_len = XVECLEN (span, 0);
1312 for (int par_index = 0; par_index < par_len; par_index++)
1314 reg = XVECEXP (span, 0, par_index);
1315 gcc_assert (REG_P (reg));
1316 unsigned int regno = dwf_regno (reg);
1317 add_cfi_restore (regno);
1318 update_row_reg_save (cur_row, regno, NULL);
1323 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1324 ??? Perhaps we should note in the CIE where windows are saved (instead of
1325 assuming 0(cfa)) and what registers are in the window. */
1327 static void
1328 dwarf2out_frame_debug_cfa_window_save (void)
1330 dw_cfi_ref cfi = new_cfi ();
1332 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1333 add_cfi (cfi);
1336 /* Record call frame debugging information for an expression EXPR,
1337 which either sets SP or FP (adjusting how we calculate the frame
1338 address) or saves a register to the stack or another register.
1339 LABEL indicates the address of EXPR.
1341 This function encodes a state machine mapping rtxes to actions on
1342 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1343 users need not read the source code.
1345 The High-Level Picture
1347 Changes in the register we use to calculate the CFA: Currently we
1348 assume that if you copy the CFA register into another register, we
1349 should take the other one as the new CFA register; this seems to
1350 work pretty well. If it's wrong for some target, it's simple
1351 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1353 Changes in the register we use for saving registers to the stack:
1354 This is usually SP, but not always. Again, we deduce that if you
1355 copy SP into another register (and SP is not the CFA register),
1356 then the new register is the one we will be using for register
1357 saves. This also seems to work.
1359 Register saves: There's not much guesswork about this one; if
1360 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1361 register save, and the register used to calculate the destination
1362 had better be the one we think we're using for this purpose.
1363 It's also assumed that a copy from a call-saved register to another
1364 register is saving that register if RTX_FRAME_RELATED_P is set on
1365 that instruction. If the copy is from a call-saved register to
1366 the *same* register, that means that the register is now the same
1367 value as in the caller.
1369 Except: If the register being saved is the CFA register, and the
1370 offset is nonzero, we are saving the CFA, so we assume we have to
1371 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1372 the intent is to save the value of SP from the previous frame.
1374 In addition, if a register has previously been saved to a different
1375 register,
1377 Invariants / Summaries of Rules
1379 cfa current rule for calculating the CFA. It usually
1380 consists of a register and an offset. This is
1381 actually stored in *cur_cfa, but abbreviated
1382 for the purposes of this documentation.
1383 cfa_store register used by prologue code to save things to the stack
1384 cfa_store.offset is the offset from the value of
1385 cfa_store.reg to the actual CFA
1386 cfa_temp register holding an integral value. cfa_temp.offset
1387 stores the value, which will be used to adjust the
1388 stack pointer. cfa_temp is also used like cfa_store,
1389 to track stores to the stack via fp or a temp reg.
1391 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1392 with cfa.reg as the first operand changes the cfa.reg and its
1393 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1394 cfa_temp.offset.
1396 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1397 expression yielding a constant. This sets cfa_temp.reg
1398 and cfa_temp.offset.
1400 Rule 5: Create a new register cfa_store used to save items to the
1401 stack.
1403 Rules 10-14: Save a register to the stack. Define offset as the
1404 difference of the original location and cfa_store's
1405 location (or cfa_temp's location if cfa_temp is used).
1407 Rules 16-20: If AND operation happens on sp in prologue, we assume
1408 stack is realigned. We will use a group of DW_OP_XXX
1409 expressions to represent the location of the stored
1410 register instead of CFA+offset.
1412 The Rules
1414 "{a,b}" indicates a choice of a xor b.
1415 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1417 Rule 1:
1418 (set <reg1> <reg2>:cfa.reg)
1419 effects: cfa.reg = <reg1>
1420 cfa.offset unchanged
1421 cfa_temp.reg = <reg1>
1422 cfa_temp.offset = cfa.offset
1424 Rule 2:
1425 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1426 {<const_int>,<reg>:cfa_temp.reg}))
1427 effects: cfa.reg = sp if fp used
1428 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1429 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1430 if cfa_store.reg==sp
1432 Rule 3:
1433 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1434 effects: cfa.reg = fp
1435 cfa_offset += +/- <const_int>
1437 Rule 4:
1438 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1439 constraints: <reg1> != fp
1440 <reg1> != sp
1441 effects: cfa.reg = <reg1>
1442 cfa_temp.reg = <reg1>
1443 cfa_temp.offset = cfa.offset
1445 Rule 5:
1446 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1447 constraints: <reg1> != fp
1448 <reg1> != sp
1449 effects: cfa_store.reg = <reg1>
1450 cfa_store.offset = cfa.offset - cfa_temp.offset
1452 Rule 6:
1453 (set <reg> <const_int>)
1454 effects: cfa_temp.reg = <reg>
1455 cfa_temp.offset = <const_int>
1457 Rule 7:
1458 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1459 effects: cfa_temp.reg = <reg1>
1460 cfa_temp.offset |= <const_int>
1462 Rule 8:
1463 (set <reg> (high <exp>))
1464 effects: none
1466 Rule 9:
1467 (set <reg> (lo_sum <exp> <const_int>))
1468 effects: cfa_temp.reg = <reg>
1469 cfa_temp.offset = <const_int>
1471 Rule 10:
1472 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1473 effects: cfa_store.offset -= <const_int>
1474 cfa.offset = cfa_store.offset if cfa.reg == sp
1475 cfa.reg = sp
1476 cfa.base_offset = -cfa_store.offset
1478 Rule 11:
1479 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1480 effects: cfa_store.offset += -/+ mode_size(mem)
1481 cfa.offset = cfa_store.offset if cfa.reg == sp
1482 cfa.reg = sp
1483 cfa.base_offset = -cfa_store.offset
1485 Rule 12:
1486 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1488 <reg2>)
1489 effects: cfa.reg = <reg1>
1490 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1492 Rule 13:
1493 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1494 effects: cfa.reg = <reg1>
1495 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1497 Rule 14:
1498 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1499 effects: cfa.reg = <reg1>
1500 cfa.base_offset = -cfa_temp.offset
1501 cfa_temp.offset -= mode_size(mem)
1503 Rule 15:
1504 (set <reg> {unspec, unspec_volatile})
1505 effects: target-dependent
1507 Rule 16:
1508 (set sp (and: sp <const_int>))
1509 constraints: cfa_store.reg == sp
1510 effects: cfun->fde.stack_realign = 1
1511 cfa_store.offset = 0
1512 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1514 Rule 17:
1515 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1516 effects: cfa_store.offset += -/+ mode_size(mem)
1518 Rule 18:
1519 (set (mem ({pre_inc, pre_dec} sp)) fp)
1520 constraints: fde->stack_realign == 1
1521 effects: cfa_store.offset = 0
1522 cfa.reg != HARD_FRAME_POINTER_REGNUM
1524 Rule 19:
1525 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1526 constraints: fde->stack_realign == 1
1527 && cfa.offset == 0
1528 && cfa.indirect == 0
1529 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1530 effects: Use DW_CFA_def_cfa_expression to define cfa
1531 cfa.reg == fde->drap_reg */
1533 static void
1534 dwarf2out_frame_debug_expr (rtx expr)
1536 rtx src, dest, span;
1537 HOST_WIDE_INT offset;
1538 dw_fde_ref fde;
1540 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1541 the PARALLEL independently. The first element is always processed if
1542 it is a SET. This is for backward compatibility. Other elements
1543 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1544 flag is set in them. */
1545 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1547 int par_index;
1548 int limit = XVECLEN (expr, 0);
1549 rtx elem;
1551 /* PARALLELs have strict read-modify-write semantics, so we
1552 ought to evaluate every rvalue before changing any lvalue.
1553 It's cumbersome to do that in general, but there's an
1554 easy approximation that is enough for all current users:
1555 handle register saves before register assignments. */
1556 if (GET_CODE (expr) == PARALLEL)
1557 for (par_index = 0; par_index < limit; par_index++)
1559 elem = XVECEXP (expr, 0, par_index);
1560 if (GET_CODE (elem) == SET
1561 && MEM_P (SET_DEST (elem))
1562 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1563 dwarf2out_frame_debug_expr (elem);
1566 for (par_index = 0; par_index < limit; par_index++)
1568 elem = XVECEXP (expr, 0, par_index);
1569 if (GET_CODE (elem) == SET
1570 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1571 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1572 dwarf2out_frame_debug_expr (elem);
1574 return;
1577 gcc_assert (GET_CODE (expr) == SET);
1579 src = SET_SRC (expr);
1580 dest = SET_DEST (expr);
1582 if (REG_P (src))
1584 rtx rsi = reg_saved_in (src);
1585 if (rsi)
1586 src = rsi;
1589 fde = cfun->fde;
1591 switch (GET_CODE (dest))
1593 case REG:
1594 switch (GET_CODE (src))
1596 /* Setting FP from SP. */
1597 case REG:
1598 if (cur_cfa->reg == dwf_regno (src))
1600 /* Rule 1 */
1601 /* Update the CFA rule wrt SP or FP. Make sure src is
1602 relative to the current CFA register.
1604 We used to require that dest be either SP or FP, but the
1605 ARM copies SP to a temporary register, and from there to
1606 FP. So we just rely on the backends to only set
1607 RTX_FRAME_RELATED_P on appropriate insns. */
1608 cur_cfa->reg = dwf_regno (dest);
1609 cur_trace->cfa_temp.reg = cur_cfa->reg;
1610 cur_trace->cfa_temp.offset = cur_cfa->offset;
1612 else
1614 /* Saving a register in a register. */
1615 gcc_assert (!fixed_regs [REGNO (dest)]
1616 /* For the SPARC and its register window. */
1617 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1619 /* After stack is aligned, we can only save SP in FP
1620 if drap register is used. In this case, we have
1621 to restore stack pointer with the CFA value and we
1622 don't generate this DWARF information. */
1623 if (fde
1624 && fde->stack_realign
1625 && REGNO (src) == STACK_POINTER_REGNUM)
1626 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1627 && fde->drap_reg != INVALID_REGNUM
1628 && cur_cfa->reg != dwf_regno (src));
1629 else
1630 queue_reg_save (src, dest, 0);
1632 break;
1634 case PLUS:
1635 case MINUS:
1636 case LO_SUM:
1637 if (dest == stack_pointer_rtx)
1639 /* Rule 2 */
1640 /* Adjusting SP. */
1641 switch (GET_CODE (XEXP (src, 1)))
1643 case CONST_INT:
1644 offset = INTVAL (XEXP (src, 1));
1645 break;
1646 case REG:
1647 gcc_assert (dwf_regno (XEXP (src, 1))
1648 == cur_trace->cfa_temp.reg);
1649 offset = cur_trace->cfa_temp.offset;
1650 break;
1651 default:
1652 gcc_unreachable ();
1655 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1657 /* Restoring SP from FP in the epilogue. */
1658 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1659 cur_cfa->reg = dw_stack_pointer_regnum;
1661 else if (GET_CODE (src) == LO_SUM)
1662 /* Assume we've set the source reg of the LO_SUM from sp. */
1664 else
1665 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1667 if (GET_CODE (src) != MINUS)
1668 offset = -offset;
1669 if (cur_cfa->reg == dw_stack_pointer_regnum)
1670 cur_cfa->offset += offset;
1671 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1672 cur_trace->cfa_store.offset += offset;
1674 else if (dest == hard_frame_pointer_rtx)
1676 /* Rule 3 */
1677 /* Either setting the FP from an offset of the SP,
1678 or adjusting the FP */
1679 gcc_assert (frame_pointer_needed);
1681 gcc_assert (REG_P (XEXP (src, 0))
1682 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1683 && CONST_INT_P (XEXP (src, 1)));
1684 offset = INTVAL (XEXP (src, 1));
1685 if (GET_CODE (src) != MINUS)
1686 offset = -offset;
1687 cur_cfa->offset += offset;
1688 cur_cfa->reg = dw_frame_pointer_regnum;
1690 else
1692 gcc_assert (GET_CODE (src) != MINUS);
1694 /* Rule 4 */
1695 if (REG_P (XEXP (src, 0))
1696 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1697 && CONST_INT_P (XEXP (src, 1)))
1699 /* Setting a temporary CFA register that will be copied
1700 into the FP later on. */
1701 offset = - INTVAL (XEXP (src, 1));
1702 cur_cfa->offset += offset;
1703 cur_cfa->reg = dwf_regno (dest);
1704 /* Or used to save regs to the stack. */
1705 cur_trace->cfa_temp.reg = cur_cfa->reg;
1706 cur_trace->cfa_temp.offset = cur_cfa->offset;
1709 /* Rule 5 */
1710 else if (REG_P (XEXP (src, 0))
1711 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1712 && XEXP (src, 1) == stack_pointer_rtx)
1714 /* Setting a scratch register that we will use instead
1715 of SP for saving registers to the stack. */
1716 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1717 cur_trace->cfa_store.reg = dwf_regno (dest);
1718 cur_trace->cfa_store.offset
1719 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1722 /* Rule 9 */
1723 else if (GET_CODE (src) == LO_SUM
1724 && CONST_INT_P (XEXP (src, 1)))
1726 cur_trace->cfa_temp.reg = dwf_regno (dest);
1727 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1729 else
1730 gcc_unreachable ();
1732 break;
1734 /* Rule 6 */
1735 case CONST_INT:
1736 cur_trace->cfa_temp.reg = dwf_regno (dest);
1737 cur_trace->cfa_temp.offset = INTVAL (src);
1738 break;
1740 /* Rule 7 */
1741 case IOR:
1742 gcc_assert (REG_P (XEXP (src, 0))
1743 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1744 && CONST_INT_P (XEXP (src, 1)));
1746 cur_trace->cfa_temp.reg = dwf_regno (dest);
1747 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1748 break;
1750 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1751 which will fill in all of the bits. */
1752 /* Rule 8 */
1753 case HIGH:
1754 break;
1756 /* Rule 15 */
1757 case UNSPEC:
1758 case UNSPEC_VOLATILE:
1759 /* All unspecs should be represented by REG_CFA_* notes. */
1760 gcc_unreachable ();
1761 return;
1763 /* Rule 16 */
1764 case AND:
1765 /* If this AND operation happens on stack pointer in prologue,
1766 we assume the stack is realigned and we extract the
1767 alignment. */
1768 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1770 /* We interpret reg_save differently with stack_realign set.
1771 Thus we must flush whatever we have queued first. */
1772 dwarf2out_flush_queued_reg_saves ();
1774 gcc_assert (cur_trace->cfa_store.reg
1775 == dwf_regno (XEXP (src, 0)));
1776 fde->stack_realign = 1;
1777 fde->stack_realignment = INTVAL (XEXP (src, 1));
1778 cur_trace->cfa_store.offset = 0;
1780 if (cur_cfa->reg != dw_stack_pointer_regnum
1781 && cur_cfa->reg != dw_frame_pointer_regnum)
1782 fde->drap_reg = cur_cfa->reg;
1784 return;
1786 default:
1787 gcc_unreachable ();
1789 break;
1791 case MEM:
1793 /* Saving a register to the stack. Make sure dest is relative to the
1794 CFA register. */
1795 switch (GET_CODE (XEXP (dest, 0)))
1797 /* Rule 10 */
1798 /* With a push. */
1799 case PRE_MODIFY:
1800 case POST_MODIFY:
1801 /* We can't handle variable size modifications. */
1802 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1803 == CONST_INT);
1804 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1806 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1807 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1809 cur_trace->cfa_store.offset += offset;
1810 if (cur_cfa->reg == dw_stack_pointer_regnum)
1811 cur_cfa->offset = cur_trace->cfa_store.offset;
1813 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1814 offset -= cur_trace->cfa_store.offset;
1815 else
1816 offset = -cur_trace->cfa_store.offset;
1817 break;
1819 /* Rule 11 */
1820 case PRE_INC:
1821 case PRE_DEC:
1822 case POST_DEC:
1823 offset = GET_MODE_SIZE (GET_MODE (dest));
1824 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1825 offset = -offset;
1827 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1828 == STACK_POINTER_REGNUM)
1829 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1831 cur_trace->cfa_store.offset += offset;
1833 /* Rule 18: If stack is aligned, we will use FP as a
1834 reference to represent the address of the stored
1835 regiser. */
1836 if (fde
1837 && fde->stack_realign
1838 && REG_P (src)
1839 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1841 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1842 cur_trace->cfa_store.offset = 0;
1845 if (cur_cfa->reg == dw_stack_pointer_regnum)
1846 cur_cfa->offset = cur_trace->cfa_store.offset;
1848 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1849 offset += -cur_trace->cfa_store.offset;
1850 else
1851 offset = -cur_trace->cfa_store.offset;
1852 break;
1854 /* Rule 12 */
1855 /* With an offset. */
1856 case PLUS:
1857 case MINUS:
1858 case LO_SUM:
1860 unsigned int regno;
1862 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1863 && REG_P (XEXP (XEXP (dest, 0), 0)));
1864 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1865 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1866 offset = -offset;
1868 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1870 if (cur_cfa->reg == regno)
1871 offset -= cur_cfa->offset;
1872 else if (cur_trace->cfa_store.reg == regno)
1873 offset -= cur_trace->cfa_store.offset;
1874 else
1876 gcc_assert (cur_trace->cfa_temp.reg == regno);
1877 offset -= cur_trace->cfa_temp.offset;
1880 break;
1882 /* Rule 13 */
1883 /* Without an offset. */
1884 case REG:
1886 unsigned int regno = dwf_regno (XEXP (dest, 0));
1888 if (cur_cfa->reg == regno)
1889 offset = -cur_cfa->offset;
1890 else if (cur_trace->cfa_store.reg == regno)
1891 offset = -cur_trace->cfa_store.offset;
1892 else
1894 gcc_assert (cur_trace->cfa_temp.reg == regno);
1895 offset = -cur_trace->cfa_temp.offset;
1898 break;
1900 /* Rule 14 */
1901 case POST_INC:
1902 gcc_assert (cur_trace->cfa_temp.reg
1903 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1904 offset = -cur_trace->cfa_temp.offset;
1905 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1906 break;
1908 default:
1909 gcc_unreachable ();
1912 /* Rule 17 */
1913 /* If the source operand of this MEM operation is a memory,
1914 we only care how much stack grew. */
1915 if (MEM_P (src))
1916 break;
1918 if (REG_P (src)
1919 && REGNO (src) != STACK_POINTER_REGNUM
1920 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1921 && dwf_regno (src) == cur_cfa->reg)
1923 /* We're storing the current CFA reg into the stack. */
1925 if (cur_cfa->offset == 0)
1927 /* Rule 19 */
1928 /* If stack is aligned, putting CFA reg into stack means
1929 we can no longer use reg + offset to represent CFA.
1930 Here we use DW_CFA_def_cfa_expression instead. The
1931 result of this expression equals to the original CFA
1932 value. */
1933 if (fde
1934 && fde->stack_realign
1935 && cur_cfa->indirect == 0
1936 && cur_cfa->reg != dw_frame_pointer_regnum)
1938 gcc_assert (fde->drap_reg == cur_cfa->reg);
1940 cur_cfa->indirect = 1;
1941 cur_cfa->reg = dw_frame_pointer_regnum;
1942 cur_cfa->base_offset = offset;
1943 cur_cfa->offset = 0;
1945 fde->drap_reg_saved = 1;
1946 break;
1949 /* If the source register is exactly the CFA, assume
1950 we're saving SP like any other register; this happens
1951 on the ARM. */
1952 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1953 break;
1955 else
1957 /* Otherwise, we'll need to look in the stack to
1958 calculate the CFA. */
1959 rtx x = XEXP (dest, 0);
1961 if (!REG_P (x))
1962 x = XEXP (x, 0);
1963 gcc_assert (REG_P (x));
1965 cur_cfa->reg = dwf_regno (x);
1966 cur_cfa->base_offset = offset;
1967 cur_cfa->indirect = 1;
1968 break;
1972 if (REG_P (src))
1973 span = targetm.dwarf_register_span (src);
1974 else
1975 span = NULL;
1977 if (!span)
1978 queue_reg_save (src, NULL_RTX, offset);
1979 else
1981 /* We have a PARALLEL describing where the contents of SRC live.
1982 Queue register saves for each piece of the PARALLEL. */
1983 HOST_WIDE_INT span_offset = offset;
1985 gcc_assert (GET_CODE (span) == PARALLEL);
1987 const int par_len = XVECLEN (span, 0);
1988 for (int par_index = 0; par_index < par_len; par_index++)
1990 rtx elem = XVECEXP (span, 0, par_index);
1991 queue_reg_save (elem, NULL_RTX, span_offset);
1992 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1995 break;
1997 default:
1998 gcc_unreachable ();
2002 /* Record call frame debugging information for INSN, which either sets
2003 SP or FP (adjusting how we calculate the frame address) or saves a
2004 register to the stack. */
2006 static void
2007 dwarf2out_frame_debug (rtx_insn *insn)
2009 rtx note, n, pat;
2010 bool handled_one = false;
2012 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2013 switch (REG_NOTE_KIND (note))
2015 case REG_FRAME_RELATED_EXPR:
2016 pat = XEXP (note, 0);
2017 goto do_frame_expr;
2019 case REG_CFA_DEF_CFA:
2020 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2021 handled_one = true;
2022 break;
2024 case REG_CFA_ADJUST_CFA:
2025 n = XEXP (note, 0);
2026 if (n == NULL)
2028 n = PATTERN (insn);
2029 if (GET_CODE (n) == PARALLEL)
2030 n = XVECEXP (n, 0, 0);
2032 dwarf2out_frame_debug_adjust_cfa (n);
2033 handled_one = true;
2034 break;
2036 case REG_CFA_OFFSET:
2037 n = XEXP (note, 0);
2038 if (n == NULL)
2039 n = single_set (insn);
2040 dwarf2out_frame_debug_cfa_offset (n);
2041 handled_one = true;
2042 break;
2044 case REG_CFA_REGISTER:
2045 n = XEXP (note, 0);
2046 if (n == NULL)
2048 n = PATTERN (insn);
2049 if (GET_CODE (n) == PARALLEL)
2050 n = XVECEXP (n, 0, 0);
2052 dwarf2out_frame_debug_cfa_register (n);
2053 handled_one = true;
2054 break;
2056 case REG_CFA_EXPRESSION:
2057 n = XEXP (note, 0);
2058 if (n == NULL)
2059 n = single_set (insn);
2060 dwarf2out_frame_debug_cfa_expression (n);
2061 handled_one = true;
2062 break;
2064 case REG_CFA_RESTORE:
2065 n = XEXP (note, 0);
2066 if (n == NULL)
2068 n = PATTERN (insn);
2069 if (GET_CODE (n) == PARALLEL)
2070 n = XVECEXP (n, 0, 0);
2071 n = XEXP (n, 0);
2073 dwarf2out_frame_debug_cfa_restore (n);
2074 handled_one = true;
2075 break;
2077 case REG_CFA_SET_VDRAP:
2078 n = XEXP (note, 0);
2079 if (REG_P (n))
2081 dw_fde_ref fde = cfun->fde;
2082 if (fde)
2084 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2085 if (REG_P (n))
2086 fde->vdrap_reg = dwf_regno (n);
2089 handled_one = true;
2090 break;
2092 case REG_CFA_WINDOW_SAVE:
2093 dwarf2out_frame_debug_cfa_window_save ();
2094 handled_one = true;
2095 break;
2097 case REG_CFA_FLUSH_QUEUE:
2098 /* The actual flush happens elsewhere. */
2099 handled_one = true;
2100 break;
2102 default:
2103 break;
2106 if (!handled_one)
2108 pat = PATTERN (insn);
2109 do_frame_expr:
2110 dwarf2out_frame_debug_expr (pat);
2112 /* Check again. A parallel can save and update the same register.
2113 We could probably check just once, here, but this is safer than
2114 removing the check at the start of the function. */
2115 if (clobbers_queued_reg_save (pat))
2116 dwarf2out_flush_queued_reg_saves ();
2120 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2122 static void
2123 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2125 size_t i, n_old, n_new, n_max;
2126 dw_cfi_ref cfi;
2128 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2129 add_cfi (new_row->cfa_cfi);
2130 else
2132 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2133 if (cfi)
2134 add_cfi (cfi);
2137 n_old = vec_safe_length (old_row->reg_save);
2138 n_new = vec_safe_length (new_row->reg_save);
2139 n_max = MAX (n_old, n_new);
2141 for (i = 0; i < n_max; ++i)
2143 dw_cfi_ref r_old = NULL, r_new = NULL;
2145 if (i < n_old)
2146 r_old = (*old_row->reg_save)[i];
2147 if (i < n_new)
2148 r_new = (*new_row->reg_save)[i];
2150 if (r_old == r_new)
2152 else if (r_new == NULL)
2153 add_cfi_restore (i);
2154 else if (!cfi_equal_p (r_old, r_new))
2155 add_cfi (r_new);
2159 /* Examine CFI and return true if a cfi label and set_loc is needed
2160 beforehand. Even when generating CFI assembler instructions, we
2161 still have to add the cfi to the list so that lookup_cfa_1 works
2162 later on. When -g2 and above we even need to force emitting of
2163 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2164 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2165 and so don't use convert_cfa_to_fb_loc_list. */
2167 static bool
2168 cfi_label_required_p (dw_cfi_ref cfi)
2170 if (!dwarf2out_do_cfi_asm ())
2171 return true;
2173 if (dwarf_version == 2
2174 && debug_info_level > DINFO_LEVEL_TERSE
2175 && (write_symbols == DWARF2_DEBUG
2176 || write_symbols == VMS_AND_DWARF2_DEBUG))
2178 switch (cfi->dw_cfi_opc)
2180 case DW_CFA_def_cfa_offset:
2181 case DW_CFA_def_cfa_offset_sf:
2182 case DW_CFA_def_cfa_register:
2183 case DW_CFA_def_cfa:
2184 case DW_CFA_def_cfa_sf:
2185 case DW_CFA_def_cfa_expression:
2186 case DW_CFA_restore_state:
2187 return true;
2188 default:
2189 return false;
2192 return false;
2195 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2196 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2197 necessary. */
2198 static void
2199 add_cfis_to_fde (void)
2201 dw_fde_ref fde = cfun->fde;
2202 rtx_insn *insn, *next;
2203 /* We always start with a function_begin label. */
2204 bool first = false;
2206 for (insn = get_insns (); insn; insn = next)
2208 next = NEXT_INSN (insn);
2210 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2212 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2213 /* Don't attempt to advance_loc4 between labels
2214 in different sections. */
2215 first = true;
2218 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2220 bool required = cfi_label_required_p (NOTE_CFI (insn));
2221 while (next)
2222 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2224 required |= cfi_label_required_p (NOTE_CFI (next));
2225 next = NEXT_INSN (next);
2227 else if (active_insn_p (next)
2228 || (NOTE_P (next) && (NOTE_KIND (next)
2229 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2230 break;
2231 else
2232 next = NEXT_INSN (next);
2233 if (required)
2235 int num = dwarf2out_cfi_label_num;
2236 const char *label = dwarf2out_cfi_label ();
2237 dw_cfi_ref xcfi;
2239 /* Set the location counter to the new label. */
2240 xcfi = new_cfi ();
2241 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2242 : DW_CFA_advance_loc4);
2243 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2244 vec_safe_push (fde->dw_fde_cfi, xcfi);
2246 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2247 NOTE_LABEL_NUMBER (tmp) = num;
2252 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2253 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2254 insn = NEXT_INSN (insn);
2256 while (insn != next);
2257 first = false;
2262 /* If LABEL is the start of a trace, then initialize the state of that
2263 trace from CUR_TRACE and CUR_ROW. */
2265 static void
2266 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2268 dw_trace_info *ti;
2269 HOST_WIDE_INT args_size;
2271 ti = get_trace_info (start);
2272 gcc_assert (ti != NULL);
2274 if (dump_file)
2276 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2277 cur_trace->id, ti->id,
2278 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2279 (origin ? INSN_UID (origin) : 0));
2282 args_size = cur_trace->end_true_args_size;
2283 if (ti->beg_row == NULL)
2285 /* This is the first time we've encountered this trace. Propagate
2286 state across the edge and push the trace onto the work list. */
2287 ti->beg_row = copy_cfi_row (cur_row);
2288 ti->beg_true_args_size = args_size;
2290 ti->cfa_store = cur_trace->cfa_store;
2291 ti->cfa_temp = cur_trace->cfa_temp;
2292 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2294 trace_work_list.safe_push (ti);
2296 if (dump_file)
2297 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2299 else
2302 /* We ought to have the same state incoming to a given trace no
2303 matter how we arrive at the trace. Anything else means we've
2304 got some kind of optimization error. */
2305 gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2307 /* The args_size is allowed to conflict if it isn't actually used. */
2308 if (ti->beg_true_args_size != args_size)
2309 ti->args_size_undefined = true;
2313 /* Similarly, but handle the args_size and CFA reset across EH
2314 and non-local goto edges. */
2316 static void
2317 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2319 HOST_WIDE_INT save_args_size, delta;
2320 dw_cfa_location save_cfa;
2322 save_args_size = cur_trace->end_true_args_size;
2323 if (save_args_size == 0)
2325 maybe_record_trace_start (start, origin);
2326 return;
2329 delta = -save_args_size;
2330 cur_trace->end_true_args_size = 0;
2332 save_cfa = cur_row->cfa;
2333 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2335 /* Convert a change in args_size (always a positive in the
2336 direction of stack growth) to a change in stack pointer. */
2337 if (!STACK_GROWS_DOWNWARD)
2338 delta = -delta;
2340 cur_row->cfa.offset += delta;
2343 maybe_record_trace_start (start, origin);
2345 cur_trace->end_true_args_size = save_args_size;
2346 cur_row->cfa = save_cfa;
2349 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2350 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2352 static void
2353 create_trace_edges (rtx_insn *insn)
2355 rtx tmp;
2356 int i, n;
2358 if (JUMP_P (insn))
2360 rtx_jump_table_data *table;
2362 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2363 return;
2365 if (tablejump_p (insn, NULL, &table))
2367 rtvec vec = table->get_labels ();
2369 n = GET_NUM_ELEM (vec);
2370 for (i = 0; i < n; ++i)
2372 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2373 maybe_record_trace_start (lab, insn);
2376 else if (computed_jump_p (insn))
2378 for (rtx_insn_list *lab = forced_labels; lab; lab = lab->next ())
2379 maybe_record_trace_start (lab->insn (), insn);
2381 else if (returnjump_p (insn))
2383 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2385 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2386 for (i = 0; i < n; ++i)
2388 rtx_insn *lab =
2389 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2390 maybe_record_trace_start (lab, insn);
2393 else
2395 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2396 gcc_assert (lab != NULL);
2397 maybe_record_trace_start (lab, insn);
2400 else if (CALL_P (insn))
2402 /* Sibling calls don't have edges inside this function. */
2403 if (SIBLING_CALL_P (insn))
2404 return;
2406 /* Process non-local goto edges. */
2407 if (can_nonlocal_goto (insn))
2408 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2409 lab;
2410 lab = lab->next ())
2411 maybe_record_trace_start_abnormal (lab->insn (), insn);
2413 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2415 int i, n = seq->len ();
2416 for (i = 0; i < n; ++i)
2417 create_trace_edges (seq->insn (i));
2418 return;
2421 /* Process EH edges. */
2422 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2424 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2425 if (lp)
2426 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2430 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2432 static void
2433 scan_insn_after (rtx_insn *insn)
2435 if (RTX_FRAME_RELATED_P (insn))
2436 dwarf2out_frame_debug (insn);
2437 notice_args_size (insn);
2440 /* Scan the trace beginning at INSN and create the CFI notes for the
2441 instructions therein. */
2443 static void
2444 scan_trace (dw_trace_info *trace)
2446 rtx_insn *prev, *insn = trace->head;
2447 dw_cfa_location this_cfa;
2449 if (dump_file)
2450 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2451 trace->id, rtx_name[(int) GET_CODE (insn)],
2452 INSN_UID (insn));
2454 trace->end_row = copy_cfi_row (trace->beg_row);
2455 trace->end_true_args_size = trace->beg_true_args_size;
2457 cur_trace = trace;
2458 cur_row = trace->end_row;
2460 this_cfa = cur_row->cfa;
2461 cur_cfa = &this_cfa;
2463 for (prev = insn, insn = NEXT_INSN (insn);
2464 insn;
2465 prev = insn, insn = NEXT_INSN (insn))
2467 rtx_insn *control;
2469 /* Do everything that happens "before" the insn. */
2470 add_cfi_insn = prev;
2472 /* Notice the end of a trace. */
2473 if (BARRIER_P (insn))
2475 /* Don't bother saving the unneeded queued registers at all. */
2476 queued_reg_saves.truncate (0);
2477 break;
2479 if (save_point_p (insn))
2481 /* Propagate across fallthru edges. */
2482 dwarf2out_flush_queued_reg_saves ();
2483 maybe_record_trace_start (insn, NULL);
2484 break;
2487 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2488 continue;
2490 /* Handle all changes to the row state. Sequences require special
2491 handling for the positioning of the notes. */
2492 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2494 rtx_insn *elt;
2495 int i, n = pat->len ();
2497 control = pat->insn (0);
2498 if (can_throw_internal (control))
2499 notice_eh_throw (control);
2500 dwarf2out_flush_queued_reg_saves ();
2502 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2504 /* ??? Hopefully multiple delay slots are not annulled. */
2505 gcc_assert (n == 2);
2506 gcc_assert (!RTX_FRAME_RELATED_P (control));
2507 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2509 elt = pat->insn (1);
2511 if (INSN_FROM_TARGET_P (elt))
2513 HOST_WIDE_INT restore_args_size;
2514 cfi_vec save_row_reg_save;
2516 /* If ELT is an instruction from target of an annulled
2517 branch, the effects are for the target only and so
2518 the args_size and CFA along the current path
2519 shouldn't change. */
2520 add_cfi_insn = NULL;
2521 restore_args_size = cur_trace->end_true_args_size;
2522 cur_cfa = &cur_row->cfa;
2523 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2525 scan_insn_after (elt);
2527 /* ??? Should we instead save the entire row state? */
2528 gcc_assert (!queued_reg_saves.length ());
2530 create_trace_edges (control);
2532 cur_trace->end_true_args_size = restore_args_size;
2533 cur_row->cfa = this_cfa;
2534 cur_row->reg_save = save_row_reg_save;
2535 cur_cfa = &this_cfa;
2537 else
2539 /* If ELT is a annulled branch-taken instruction (i.e.
2540 executed only when branch is not taken), the args_size
2541 and CFA should not change through the jump. */
2542 create_trace_edges (control);
2544 /* Update and continue with the trace. */
2545 add_cfi_insn = insn;
2546 scan_insn_after (elt);
2547 def_cfa_1 (&this_cfa);
2549 continue;
2552 /* The insns in the delay slot should all be considered to happen
2553 "before" a call insn. Consider a call with a stack pointer
2554 adjustment in the delay slot. The backtrace from the callee
2555 should include the sp adjustment. Unfortunately, that leaves
2556 us with an unavoidable unwinding error exactly at the call insn
2557 itself. For jump insns we'd prefer to avoid this error by
2558 placing the notes after the sequence. */
2559 if (JUMP_P (control))
2560 add_cfi_insn = insn;
2562 for (i = 1; i < n; ++i)
2564 elt = pat->insn (i);
2565 scan_insn_after (elt);
2568 /* Make sure any register saves are visible at the jump target. */
2569 dwarf2out_flush_queued_reg_saves ();
2570 any_cfis_emitted = false;
2572 /* However, if there is some adjustment on the call itself, e.g.
2573 a call_pop, that action should be considered to happen after
2574 the call returns. */
2575 add_cfi_insn = insn;
2576 scan_insn_after (control);
2578 else
2580 /* Flush data before calls and jumps, and of course if necessary. */
2581 if (can_throw_internal (insn))
2583 notice_eh_throw (insn);
2584 dwarf2out_flush_queued_reg_saves ();
2586 else if (!NONJUMP_INSN_P (insn)
2587 || clobbers_queued_reg_save (insn)
2588 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2589 dwarf2out_flush_queued_reg_saves ();
2590 any_cfis_emitted = false;
2592 add_cfi_insn = insn;
2593 scan_insn_after (insn);
2594 control = insn;
2597 /* Between frame-related-p and args_size we might have otherwise
2598 emitted two cfa adjustments. Do it now. */
2599 def_cfa_1 (&this_cfa);
2601 /* Minimize the number of advances by emitting the entire queue
2602 once anything is emitted. */
2603 if (any_cfis_emitted
2604 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2605 dwarf2out_flush_queued_reg_saves ();
2607 /* Note that a test for control_flow_insn_p does exactly the
2608 same tests as are done to actually create the edges. So
2609 always call the routine and let it not create edges for
2610 non-control-flow insns. */
2611 create_trace_edges (control);
2614 add_cfi_insn = NULL;
2615 cur_row = NULL;
2616 cur_trace = NULL;
2617 cur_cfa = NULL;
2620 /* Scan the function and create the initial set of CFI notes. */
2622 static void
2623 create_cfi_notes (void)
2625 dw_trace_info *ti;
2627 gcc_checking_assert (!queued_reg_saves.exists ());
2628 gcc_checking_assert (!trace_work_list.exists ());
2630 /* Always begin at the entry trace. */
2631 ti = &trace_info[0];
2632 scan_trace (ti);
2634 while (!trace_work_list.is_empty ())
2636 ti = trace_work_list.pop ();
2637 scan_trace (ti);
2640 queued_reg_saves.release ();
2641 trace_work_list.release ();
2644 /* Return the insn before the first NOTE_INSN_CFI after START. */
2646 static rtx_insn *
2647 before_next_cfi_note (rtx_insn *start)
2649 rtx_insn *prev = start;
2650 while (start)
2652 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2653 return prev;
2654 prev = start;
2655 start = NEXT_INSN (start);
2657 gcc_unreachable ();
2660 /* Insert CFI notes between traces to properly change state between them. */
2662 static void
2663 connect_traces (void)
2665 unsigned i, n = trace_info.length ();
2666 dw_trace_info *prev_ti, *ti;
2668 /* ??? Ideally, we should have both queued and processed every trace.
2669 However the current representation of constant pools on various targets
2670 is indistinguishable from unreachable code. Assume for the moment that
2671 we can simply skip over such traces. */
2672 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2673 these are not "real" instructions, and should not be considered.
2674 This could be generically useful for tablejump data as well. */
2675 /* Remove all unprocessed traces from the list. */
2676 for (i = n - 1; i > 0; --i)
2678 ti = &trace_info[i];
2679 if (ti->beg_row == NULL)
2681 trace_info.ordered_remove (i);
2682 n -= 1;
2684 else
2685 gcc_assert (ti->end_row != NULL);
2688 /* Work from the end back to the beginning. This lets us easily insert
2689 remember/restore_state notes in the correct order wrt other notes. */
2690 prev_ti = &trace_info[n - 1];
2691 for (i = n - 1; i > 0; --i)
2693 dw_cfi_row *old_row;
2695 ti = prev_ti;
2696 prev_ti = &trace_info[i - 1];
2698 add_cfi_insn = ti->head;
2700 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2701 for the portion of the function in the alternate text
2702 section. The row state at the very beginning of that
2703 new FDE will be exactly the row state from the CIE. */
2704 if (ti->switch_sections)
2705 old_row = cie_cfi_row;
2706 else
2708 old_row = prev_ti->end_row;
2709 /* If there's no change from the previous end state, fine. */
2710 if (cfi_row_equal_p (old_row, ti->beg_row))
2712 /* Otherwise check for the common case of sharing state with
2713 the beginning of an epilogue, but not the end. Insert
2714 remember/restore opcodes in that case. */
2715 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2717 dw_cfi_ref cfi;
2719 /* Note that if we blindly insert the remember at the
2720 start of the trace, we can wind up increasing the
2721 size of the unwind info due to extra advance opcodes.
2722 Instead, put the remember immediately before the next
2723 state change. We know there must be one, because the
2724 state at the beginning and head of the trace differ. */
2725 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2726 cfi = new_cfi ();
2727 cfi->dw_cfi_opc = DW_CFA_remember_state;
2728 add_cfi (cfi);
2730 add_cfi_insn = ti->head;
2731 cfi = new_cfi ();
2732 cfi->dw_cfi_opc = DW_CFA_restore_state;
2733 add_cfi (cfi);
2735 old_row = prev_ti->beg_row;
2737 /* Otherwise, we'll simply change state from the previous end. */
2740 change_cfi_row (old_row, ti->beg_row);
2742 if (dump_file && add_cfi_insn != ti->head)
2744 rtx_insn *note;
2746 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2747 prev_ti->id, ti->id);
2749 note = ti->head;
2752 note = NEXT_INSN (note);
2753 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2754 output_cfi_directive (dump_file, NOTE_CFI (note));
2756 while (note != add_cfi_insn);
2760 /* Connect args_size between traces that have can_throw_internal insns. */
2761 if (cfun->eh->lp_array)
2763 HOST_WIDE_INT prev_args_size = 0;
2765 for (i = 0; i < n; ++i)
2767 ti = &trace_info[i];
2769 if (ti->switch_sections)
2770 prev_args_size = 0;
2771 if (ti->eh_head == NULL)
2772 continue;
2773 gcc_assert (!ti->args_size_undefined);
2775 if (ti->beg_delay_args_size != prev_args_size)
2777 /* ??? Search back to previous CFI note. */
2778 add_cfi_insn = PREV_INSN (ti->eh_head);
2779 add_cfi_args_size (ti->beg_delay_args_size);
2782 prev_args_size = ti->end_delay_args_size;
2787 /* Set up the pseudo-cfg of instruction traces, as described at the
2788 block comment at the top of the file. */
2790 static void
2791 create_pseudo_cfg (void)
2793 bool saw_barrier, switch_sections;
2794 dw_trace_info ti;
2795 rtx_insn *insn;
2796 unsigned i;
2798 /* The first trace begins at the start of the function,
2799 and begins with the CIE row state. */
2800 trace_info.create (16);
2801 memset (&ti, 0, sizeof (ti));
2802 ti.head = get_insns ();
2803 ti.beg_row = cie_cfi_row;
2804 ti.cfa_store = cie_cfi_row->cfa;
2805 ti.cfa_temp.reg = INVALID_REGNUM;
2806 trace_info.quick_push (ti);
2808 if (cie_return_save)
2809 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2811 /* Walk all the insns, collecting start of trace locations. */
2812 saw_barrier = false;
2813 switch_sections = false;
2814 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2816 if (BARRIER_P (insn))
2817 saw_barrier = true;
2818 else if (NOTE_P (insn)
2819 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2821 /* We should have just seen a barrier. */
2822 gcc_assert (saw_barrier);
2823 switch_sections = true;
2825 /* Watch out for save_point notes between basic blocks.
2826 In particular, a note after a barrier. Do not record these,
2827 delaying trace creation until the label. */
2828 else if (save_point_p (insn)
2829 && (LABEL_P (insn) || !saw_barrier))
2831 memset (&ti, 0, sizeof (ti));
2832 ti.head = insn;
2833 ti.switch_sections = switch_sections;
2834 ti.id = trace_info.length ();
2835 trace_info.safe_push (ti);
2837 saw_barrier = false;
2838 switch_sections = false;
2842 /* Create the trace index after we've finished building trace_info,
2843 avoiding stale pointer problems due to reallocation. */
2844 trace_index
2845 = new hash_table<trace_info_hasher> (trace_info.length ());
2846 dw_trace_info *tp;
2847 FOR_EACH_VEC_ELT (trace_info, i, tp)
2849 dw_trace_info **slot;
2851 if (dump_file)
2852 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2853 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2854 tp->switch_sections ? " (section switch)" : "");
2856 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2857 gcc_assert (*slot == NULL);
2858 *slot = tp;
2862 /* Record the initial position of the return address. RTL is
2863 INCOMING_RETURN_ADDR_RTX. */
2865 static void
2866 initial_return_save (rtx rtl)
2868 unsigned int reg = INVALID_REGNUM;
2869 HOST_WIDE_INT offset = 0;
2871 switch (GET_CODE (rtl))
2873 case REG:
2874 /* RA is in a register. */
2875 reg = dwf_regno (rtl);
2876 break;
2878 case MEM:
2879 /* RA is on the stack. */
2880 rtl = XEXP (rtl, 0);
2881 switch (GET_CODE (rtl))
2883 case REG:
2884 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2885 offset = 0;
2886 break;
2888 case PLUS:
2889 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2890 offset = INTVAL (XEXP (rtl, 1));
2891 break;
2893 case MINUS:
2894 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2895 offset = -INTVAL (XEXP (rtl, 1));
2896 break;
2898 default:
2899 gcc_unreachable ();
2902 break;
2904 case PLUS:
2905 /* The return address is at some offset from any value we can
2906 actually load. For instance, on the SPARC it is in %i7+8. Just
2907 ignore the offset for now; it doesn't matter for unwinding frames. */
2908 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2909 initial_return_save (XEXP (rtl, 0));
2910 return;
2912 default:
2913 gcc_unreachable ();
2916 if (reg != DWARF_FRAME_RETURN_COLUMN)
2918 if (reg != INVALID_REGNUM)
2919 record_reg_saved_in_reg (rtl, pc_rtx);
2920 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2924 static void
2925 create_cie_data (void)
2927 dw_cfa_location loc;
2928 dw_trace_info cie_trace;
2930 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2932 memset (&cie_trace, 0, sizeof (cie_trace));
2933 cur_trace = &cie_trace;
2935 add_cfi_vec = &cie_cfi_vec;
2936 cie_cfi_row = cur_row = new_cfi_row ();
2938 /* On entry, the Canonical Frame Address is at SP. */
2939 memset (&loc, 0, sizeof (loc));
2940 loc.reg = dw_stack_pointer_regnum;
2941 loc.offset = INCOMING_FRAME_SP_OFFSET;
2942 def_cfa_1 (&loc);
2944 if (targetm.debug_unwind_info () == UI_DWARF2
2945 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2947 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2949 /* For a few targets, we have the return address incoming into a
2950 register, but choose a different return column. This will result
2951 in a DW_CFA_register for the return, and an entry in
2952 regs_saved_in_regs to match. If the target later stores that
2953 return address register to the stack, we want to be able to emit
2954 the DW_CFA_offset against the return column, not the intermediate
2955 save register. Save the contents of regs_saved_in_regs so that
2956 we can re-initialize it at the start of each function. */
2957 switch (cie_trace.regs_saved_in_regs.length ())
2959 case 0:
2960 break;
2961 case 1:
2962 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2963 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2964 cie_trace.regs_saved_in_regs.release ();
2965 break;
2966 default:
2967 gcc_unreachable ();
2971 add_cfi_vec = NULL;
2972 cur_row = NULL;
2973 cur_trace = NULL;
2976 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2977 state at each location within the function. These notes will be
2978 emitted during pass_final. */
2980 static unsigned int
2981 execute_dwarf2_frame (void)
2983 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
2984 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2986 /* The first time we're called, compute the incoming frame state. */
2987 if (cie_cfi_vec == NULL)
2988 create_cie_data ();
2990 dwarf2out_alloc_current_fde ();
2992 create_pseudo_cfg ();
2994 /* Do the work. */
2995 create_cfi_notes ();
2996 connect_traces ();
2997 add_cfis_to_fde ();
2999 /* Free all the data we allocated. */
3001 size_t i;
3002 dw_trace_info *ti;
3004 FOR_EACH_VEC_ELT (trace_info, i, ti)
3005 ti->regs_saved_in_regs.release ();
3007 trace_info.release ();
3009 delete trace_index;
3010 trace_index = NULL;
3012 return 0;
3015 /* Convert a DWARF call frame info. operation to its string name */
3017 static const char *
3018 dwarf_cfi_name (unsigned int cfi_opc)
3020 const char *name = get_DW_CFA_name (cfi_opc);
3022 if (name != NULL)
3023 return name;
3025 return "DW_CFA_<unknown>";
3028 /* This routine will generate the correct assembly data for a location
3029 description based on a cfi entry with a complex address. */
3031 static void
3032 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3034 dw_loc_descr_ref loc;
3035 unsigned long size;
3037 if (cfi->dw_cfi_opc == DW_CFA_expression)
3039 unsigned r =
3040 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3041 dw2_asm_output_data (1, r, NULL);
3042 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3044 else
3045 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3047 /* Output the size of the block. */
3048 size = size_of_locs (loc);
3049 dw2_asm_output_data_uleb128 (size, NULL);
3051 /* Now output the operations themselves. */
3052 output_loc_sequence (loc, for_eh);
3055 /* Similar, but used for .cfi_escape. */
3057 static void
3058 output_cfa_loc_raw (dw_cfi_ref cfi)
3060 dw_loc_descr_ref loc;
3061 unsigned long size;
3063 if (cfi->dw_cfi_opc == DW_CFA_expression)
3065 unsigned r =
3066 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3067 fprintf (asm_out_file, "%#x,", r);
3068 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3070 else
3071 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3073 /* Output the size of the block. */
3074 size = size_of_locs (loc);
3075 dw2_asm_output_data_uleb128_raw (size);
3076 fputc (',', asm_out_file);
3078 /* Now output the operations themselves. */
3079 output_loc_sequence_raw (loc);
3082 /* Output a Call Frame Information opcode and its operand(s). */
3084 void
3085 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3087 unsigned long r;
3088 HOST_WIDE_INT off;
3090 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3091 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3092 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3093 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3094 ((unsigned HOST_WIDE_INT)
3095 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3096 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3098 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3099 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3100 "DW_CFA_offset, column %#lx", r);
3101 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3102 dw2_asm_output_data_uleb128 (off, NULL);
3104 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3106 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3107 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3108 "DW_CFA_restore, column %#lx", r);
3110 else
3112 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3113 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3115 switch (cfi->dw_cfi_opc)
3117 case DW_CFA_set_loc:
3118 if (for_eh)
3119 dw2_asm_output_encoded_addr_rtx (
3120 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3121 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3122 false, NULL);
3123 else
3124 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3125 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3126 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3127 break;
3129 case DW_CFA_advance_loc1:
3130 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3131 fde->dw_fde_current_label, NULL);
3132 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3133 break;
3135 case DW_CFA_advance_loc2:
3136 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3137 fde->dw_fde_current_label, NULL);
3138 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3139 break;
3141 case DW_CFA_advance_loc4:
3142 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3143 fde->dw_fde_current_label, NULL);
3144 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3145 break;
3147 case DW_CFA_MIPS_advance_loc8:
3148 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3149 fde->dw_fde_current_label, NULL);
3150 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3151 break;
3153 case DW_CFA_offset_extended:
3154 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3155 dw2_asm_output_data_uleb128 (r, NULL);
3156 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3157 dw2_asm_output_data_uleb128 (off, NULL);
3158 break;
3160 case DW_CFA_def_cfa:
3161 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3162 dw2_asm_output_data_uleb128 (r, NULL);
3163 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3164 break;
3166 case DW_CFA_offset_extended_sf:
3167 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3168 dw2_asm_output_data_uleb128 (r, NULL);
3169 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3170 dw2_asm_output_data_sleb128 (off, NULL);
3171 break;
3173 case DW_CFA_def_cfa_sf:
3174 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3175 dw2_asm_output_data_uleb128 (r, NULL);
3176 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3177 dw2_asm_output_data_sleb128 (off, NULL);
3178 break;
3180 case DW_CFA_restore_extended:
3181 case DW_CFA_undefined:
3182 case DW_CFA_same_value:
3183 case DW_CFA_def_cfa_register:
3184 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3185 dw2_asm_output_data_uleb128 (r, NULL);
3186 break;
3188 case DW_CFA_register:
3189 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3190 dw2_asm_output_data_uleb128 (r, NULL);
3191 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3192 dw2_asm_output_data_uleb128 (r, NULL);
3193 break;
3195 case DW_CFA_def_cfa_offset:
3196 case DW_CFA_GNU_args_size:
3197 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3198 break;
3200 case DW_CFA_def_cfa_offset_sf:
3201 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3202 dw2_asm_output_data_sleb128 (off, NULL);
3203 break;
3205 case DW_CFA_GNU_window_save:
3206 break;
3208 case DW_CFA_def_cfa_expression:
3209 case DW_CFA_expression:
3210 output_cfa_loc (cfi, for_eh);
3211 break;
3213 case DW_CFA_GNU_negative_offset_extended:
3214 /* Obsoleted by DW_CFA_offset_extended_sf. */
3215 gcc_unreachable ();
3217 default:
3218 break;
3223 /* Similar, but do it via assembler directives instead. */
3225 void
3226 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3228 unsigned long r, r2;
3230 switch (cfi->dw_cfi_opc)
3232 case DW_CFA_advance_loc:
3233 case DW_CFA_advance_loc1:
3234 case DW_CFA_advance_loc2:
3235 case DW_CFA_advance_loc4:
3236 case DW_CFA_MIPS_advance_loc8:
3237 case DW_CFA_set_loc:
3238 /* Should only be created in a code path not followed when emitting
3239 via directives. The assembler is going to take care of this for
3240 us. But this routines is also used for debugging dumps, so
3241 print something. */
3242 gcc_assert (f != asm_out_file);
3243 fprintf (f, "\t.cfi_advance_loc\n");
3244 break;
3246 case DW_CFA_offset:
3247 case DW_CFA_offset_extended:
3248 case DW_CFA_offset_extended_sf:
3249 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3250 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3251 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3252 break;
3254 case DW_CFA_restore:
3255 case DW_CFA_restore_extended:
3256 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3257 fprintf (f, "\t.cfi_restore %lu\n", r);
3258 break;
3260 case DW_CFA_undefined:
3261 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3262 fprintf (f, "\t.cfi_undefined %lu\n", r);
3263 break;
3265 case DW_CFA_same_value:
3266 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3267 fprintf (f, "\t.cfi_same_value %lu\n", r);
3268 break;
3270 case DW_CFA_def_cfa:
3271 case DW_CFA_def_cfa_sf:
3272 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3273 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3274 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3275 break;
3277 case DW_CFA_def_cfa_register:
3278 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3279 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3280 break;
3282 case DW_CFA_register:
3283 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3284 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3285 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3286 break;
3288 case DW_CFA_def_cfa_offset:
3289 case DW_CFA_def_cfa_offset_sf:
3290 fprintf (f, "\t.cfi_def_cfa_offset "
3291 HOST_WIDE_INT_PRINT_DEC"\n",
3292 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3293 break;
3295 case DW_CFA_remember_state:
3296 fprintf (f, "\t.cfi_remember_state\n");
3297 break;
3298 case DW_CFA_restore_state:
3299 fprintf (f, "\t.cfi_restore_state\n");
3300 break;
3302 case DW_CFA_GNU_args_size:
3303 if (f == asm_out_file)
3305 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3306 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3307 if (flag_debug_asm)
3308 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3309 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3310 fputc ('\n', f);
3312 else
3314 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3315 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3317 break;
3319 case DW_CFA_GNU_window_save:
3320 fprintf (f, "\t.cfi_window_save\n");
3321 break;
3323 case DW_CFA_def_cfa_expression:
3324 if (f != asm_out_file)
3326 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3327 break;
3329 /* FALLTHRU */
3330 case DW_CFA_expression:
3331 if (f != asm_out_file)
3333 fprintf (f, "\t.cfi_cfa_expression ...\n");
3334 break;
3336 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3337 output_cfa_loc_raw (cfi);
3338 fputc ('\n', f);
3339 break;
3341 default:
3342 gcc_unreachable ();
3346 void
3347 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3349 if (dwarf2out_do_cfi_asm ())
3350 output_cfi_directive (asm_out_file, cfi);
3353 static void
3354 dump_cfi_row (FILE *f, dw_cfi_row *row)
3356 dw_cfi_ref cfi;
3357 unsigned i;
3359 cfi = row->cfa_cfi;
3360 if (!cfi)
3362 dw_cfa_location dummy;
3363 memset (&dummy, 0, sizeof (dummy));
3364 dummy.reg = INVALID_REGNUM;
3365 cfi = def_cfa_0 (&dummy, &row->cfa);
3367 output_cfi_directive (f, cfi);
3369 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3370 if (cfi)
3371 output_cfi_directive (f, cfi);
3374 void debug_cfi_row (dw_cfi_row *row);
3376 void
3377 debug_cfi_row (dw_cfi_row *row)
3379 dump_cfi_row (stderr, row);
3383 /* Save the result of dwarf2out_do_frame across PCH.
3384 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3385 static GTY(()) signed char saved_do_cfi_asm = 0;
3387 /* Decide whether we want to emit frame unwind information for the current
3388 translation unit. */
3390 bool
3391 dwarf2out_do_frame (void)
3393 /* We want to emit correct CFA location expressions or lists, so we
3394 have to return true if we're going to output debug info, even if
3395 we're not going to output frame or unwind info. */
3396 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3397 return true;
3399 if (saved_do_cfi_asm > 0)
3400 return true;
3402 if (targetm.debug_unwind_info () == UI_DWARF2)
3403 return true;
3405 if ((flag_unwind_tables || flag_exceptions)
3406 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3407 return true;
3409 return false;
3412 /* Decide whether to emit frame unwind via assembler directives. */
3414 bool
3415 dwarf2out_do_cfi_asm (void)
3417 int enc;
3419 if (saved_do_cfi_asm != 0)
3420 return saved_do_cfi_asm > 0;
3422 /* Assume failure for a moment. */
3423 saved_do_cfi_asm = -1;
3425 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3426 return false;
3427 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3428 return false;
3430 /* Make sure the personality encoding is one the assembler can support.
3431 In particular, aligned addresses can't be handled. */
3432 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3433 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3434 return false;
3435 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3436 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3437 return false;
3439 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3440 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3441 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3442 && !flag_unwind_tables && !flag_exceptions
3443 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3444 return false;
3446 /* Success! */
3447 saved_do_cfi_asm = 1;
3448 return true;
3451 namespace {
3453 const pass_data pass_data_dwarf2_frame =
3455 RTL_PASS, /* type */
3456 "dwarf2", /* name */
3457 OPTGROUP_NONE, /* optinfo_flags */
3458 TV_FINAL, /* tv_id */
3459 0, /* properties_required */
3460 0, /* properties_provided */
3461 0, /* properties_destroyed */
3462 0, /* todo_flags_start */
3463 0, /* todo_flags_finish */
3466 class pass_dwarf2_frame : public rtl_opt_pass
3468 public:
3469 pass_dwarf2_frame (gcc::context *ctxt)
3470 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3473 /* opt_pass methods: */
3474 virtual bool gate (function *);
3475 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3477 }; // class pass_dwarf2_frame
3479 bool
3480 pass_dwarf2_frame::gate (function *)
3482 #ifndef HAVE_prologue
3483 /* Targets which still implement the prologue in assembler text
3484 cannot use the generic dwarf2 unwinding. */
3485 return false;
3486 #endif
3488 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3489 from the optimized shrink-wrapping annotations that we will compute.
3490 For now, only produce the CFI notes for dwarf2. */
3491 return dwarf2out_do_frame ();
3494 } // anon namespace
3496 rtl_opt_pass *
3497 make_pass_dwarf2_frame (gcc::context *ctxt)
3499 return new pass_dwarf2_frame (ctxt);
3502 #include "gt-dwarf2cfi.h"