* config/msp430/msp430.c (msp430_asm_integer): Support addition
[official-gcc.git] / gcc / dwarf2cfi.c
blobac2196e990176f48c8c43422cecea84ee3041180
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "version.h"
25 #include "flags.h"
26 #include "rtl.h"
27 #include "hash-set.h"
28 #include "vec.h"
29 #include "input.h"
30 #include "alias.h"
31 #include "symtab.h"
32 #include "inchash.h"
33 #include "tree.h"
34 #include "stor-layout.h"
35 #include "hard-reg-set.h"
36 #include "function.h"
37 #include "cfgbuild.h"
38 #include "dwarf2.h"
39 #include "dwarf2out.h"
40 #include "dwarf2asm.h"
41 #include "ggc.h"
42 #include "hash-table.h"
43 #include "tm_p.h"
44 #include "target.h"
45 #include "common/common-target.h"
46 #include "tree-pass.h"
48 #include "except.h" /* expand_builtin_dwarf_sp_column */
49 #include "hashtab.h"
50 #include "statistics.h"
51 #include "insn-config.h"
52 #include "expmed.h"
53 #include "dojump.h"
54 #include "explow.h"
55 #include "calls.h"
56 #include "emit-rtl.h"
57 #include "varasm.h"
58 #include "stmt.h"
59 #include "expr.h" /* init_return_column_size */
60 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
61 #include "output.h" /* asm_out_file */
62 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
65 /* ??? Poison these here until it can be done generically. They've been
66 totally replaced in this file; make sure it stays that way. */
67 #undef DWARF2_UNWIND_INFO
68 #undef DWARF2_FRAME_INFO
69 #if (GCC_VERSION >= 3000)
70 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
71 #endif
73 #ifndef INCOMING_RETURN_ADDR_RTX
74 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
75 #endif
77 /* Maximum size (in bytes) of an artificially generated label. */
78 #define MAX_ARTIFICIAL_LABEL_BYTES 30
80 /* A collected description of an entire row of the abstract CFI table. */
81 typedef struct GTY(()) dw_cfi_row_struct
83 /* The expression that computes the CFA, expressed in two different ways.
84 The CFA member for the simple cases, and the full CFI expression for
85 the complex cases. The later will be a DW_CFA_cfa_expression. */
86 dw_cfa_location cfa;
87 dw_cfi_ref cfa_cfi;
89 /* The expressions for any register column that is saved. */
90 cfi_vec reg_save;
91 } dw_cfi_row;
93 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
94 typedef struct GTY(()) reg_saved_in_data_struct {
95 rtx orig_reg;
96 rtx saved_in_reg;
97 } reg_saved_in_data;
100 /* Since we no longer have a proper CFG, we're going to create a facsimile
101 of one on the fly while processing the frame-related insns.
103 We create dw_trace_info structures for each extended basic block beginning
104 and ending at a "save point". Save points are labels, barriers, certain
105 notes, and of course the beginning and end of the function.
107 As we encounter control transfer insns, we propagate the "current"
108 row state across the edges to the starts of traces. When checking is
109 enabled, we validate that we propagate the same data from all sources.
111 All traces are members of the TRACE_INFO array, in the order in which
112 they appear in the instruction stream.
114 All save points are present in the TRACE_INDEX hash, mapping the insn
115 starting a trace to the dw_trace_info describing the trace. */
117 typedef struct
119 /* The insn that begins the trace. */
120 rtx_insn *head;
122 /* The row state at the beginning and end of the trace. */
123 dw_cfi_row *beg_row, *end_row;
125 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
126 while scanning insns. However, the args_size value is irrelevant at
127 any point except can_throw_internal_p insns. Therefore the "delay"
128 sizes the values that must actually be emitted for this trace. */
129 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
130 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
132 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
133 rtx_insn *eh_head;
135 /* The following variables contain data used in interpreting frame related
136 expressions. These are not part of the "real" row state as defined by
137 Dwarf, but it seems like they need to be propagated into a trace in case
138 frame related expressions have been sunk. */
139 /* ??? This seems fragile. These variables are fragments of a larger
140 expression. If we do not keep the entire expression together, we risk
141 not being able to put it together properly. Consider forcing targets
142 to generate self-contained expressions and dropping all of the magic
143 interpretation code in this file. Or at least refusing to shrink wrap
144 any frame related insn that doesn't contain a complete expression. */
146 /* The register used for saving registers to the stack, and its offset
147 from the CFA. */
148 dw_cfa_location cfa_store;
150 /* A temporary register holding an integral value used in adjusting SP
151 or setting up the store_reg. The "offset" field holds the integer
152 value, not an offset. */
153 dw_cfa_location cfa_temp;
155 /* A set of registers saved in other registers. This is the inverse of
156 the row->reg_save info, if the entry is a DW_CFA_register. This is
157 implemented as a flat array because it normally contains zero or 1
158 entry, depending on the target. IA-64 is the big spender here, using
159 a maximum of 5 entries. */
160 vec<reg_saved_in_data> regs_saved_in_regs;
162 /* An identifier for this trace. Used only for debugging dumps. */
163 unsigned id;
165 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
166 bool switch_sections;
168 /* True if we've seen different values incoming to beg_true_args_size. */
169 bool args_size_undefined;
170 } dw_trace_info;
173 typedef dw_trace_info *dw_trace_info_ref;
176 /* Hashtable helpers. */
178 struct trace_info_hasher : typed_noop_remove <dw_trace_info>
180 typedef dw_trace_info *value_type;
181 typedef dw_trace_info *compare_type;
182 static inline hashval_t hash (const dw_trace_info *);
183 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
186 inline hashval_t
187 trace_info_hasher::hash (const dw_trace_info *ti)
189 return INSN_UID (ti->head);
192 inline bool
193 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
195 return a->head == b->head;
199 /* The variables making up the pseudo-cfg, as described above. */
200 static vec<dw_trace_info> trace_info;
201 static vec<dw_trace_info_ref> trace_work_list;
202 static hash_table<trace_info_hasher> *trace_index;
204 /* A vector of call frame insns for the CIE. */
205 cfi_vec cie_cfi_vec;
207 /* The state of the first row of the FDE table, which includes the
208 state provided by the CIE. */
209 static GTY(()) dw_cfi_row *cie_cfi_row;
211 static GTY(()) reg_saved_in_data *cie_return_save;
213 static GTY(()) unsigned long dwarf2out_cfi_label_num;
215 /* The insn after which a new CFI note should be emitted. */
216 static rtx_insn *add_cfi_insn;
218 /* When non-null, add_cfi will add the CFI to this vector. */
219 static cfi_vec *add_cfi_vec;
221 /* The current instruction trace. */
222 static dw_trace_info *cur_trace;
224 /* The current, i.e. most recently generated, row of the CFI table. */
225 static dw_cfi_row *cur_row;
227 /* A copy of the current CFA, for use during the processing of a
228 single insn. */
229 static dw_cfa_location *cur_cfa;
231 /* We delay emitting a register save until either (a) we reach the end
232 of the prologue or (b) the register is clobbered. This clusters
233 register saves so that there are fewer pc advances. */
235 typedef struct {
236 rtx reg;
237 rtx saved_reg;
238 HOST_WIDE_INT cfa_offset;
239 } queued_reg_save;
242 static vec<queued_reg_save> queued_reg_saves;
244 /* True if any CFI directives were emitted at the current insn. */
245 static bool any_cfis_emitted;
247 /* Short-hand for commonly used register numbers. */
248 static unsigned dw_stack_pointer_regnum;
249 static unsigned dw_frame_pointer_regnum;
251 /* Hook used by __throw. */
254 expand_builtin_dwarf_sp_column (void)
256 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
257 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
260 /* MEM is a memory reference for the register size table, each element of
261 which has mode MODE. Initialize column C as a return address column. */
263 static void
264 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
266 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
267 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
268 emit_move_insn (adjust_address (mem, mode, offset),
269 gen_int_mode (size, mode));
272 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
273 init_one_dwarf_reg_size to communicate on what has been done by the
274 latter. */
276 typedef struct
278 /* Whether the dwarf return column was initialized. */
279 bool wrote_return_column;
281 /* For each hard register REGNO, whether init_one_dwarf_reg_size
282 was given REGNO to process already. */
283 bool processed_regno [FIRST_PSEUDO_REGISTER];
285 } init_one_dwarf_reg_state;
287 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
288 initialize the dwarf register size table entry corresponding to register
289 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
290 use for the size entry to initialize, and INIT_STATE is the communication
291 datastructure conveying what we're doing to our caller. */
293 static
294 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
295 rtx table, machine_mode slotmode,
296 init_one_dwarf_reg_state *init_state)
298 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
299 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
300 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
302 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
303 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
305 init_state->processed_regno[regno] = true;
307 if (rnum >= DWARF_FRAME_REGISTERS)
308 return;
310 if (dnum == DWARF_FRAME_RETURN_COLUMN)
312 if (regmode == VOIDmode)
313 return;
314 init_state->wrote_return_column = true;
317 if (slotoffset < 0)
318 return;
320 emit_move_insn (adjust_address (table, slotmode, slotoffset),
321 gen_int_mode (regsize, slotmode));
324 /* Generate code to initialize the dwarf register size table located
325 at the provided ADDRESS. */
327 void
328 expand_builtin_init_dwarf_reg_sizes (tree address)
330 unsigned int i;
331 machine_mode mode = TYPE_MODE (char_type_node);
332 rtx addr = expand_normal (address);
333 rtx mem = gen_rtx_MEM (BLKmode, addr);
335 init_one_dwarf_reg_state init_state;
337 memset ((char *)&init_state, 0, sizeof (init_state));
339 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
341 machine_mode save_mode;
342 rtx span;
344 /* No point in processing a register multiple times. This could happen
345 with register spans, e.g. when a reg is first processed as a piece of
346 a span, then as a register on its own later on. */
348 if (init_state.processed_regno[i])
349 continue;
351 save_mode = targetm.dwarf_frame_reg_mode (i);
352 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
354 if (!span)
355 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
356 else
358 for (int si = 0; si < XVECLEN (span, 0); si++)
360 rtx reg = XVECEXP (span, 0, si);
362 init_one_dwarf_reg_size
363 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
368 if (!init_state.wrote_return_column)
369 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
371 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
372 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
373 #endif
375 targetm.init_dwarf_reg_sizes_extra (address);
379 static dw_trace_info *
380 get_trace_info (rtx_insn *insn)
382 dw_trace_info dummy;
383 dummy.head = insn;
384 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
387 static bool
388 save_point_p (rtx_insn *insn)
390 /* Labels, except those that are really jump tables. */
391 if (LABEL_P (insn))
392 return inside_basic_block_p (insn);
394 /* We split traces at the prologue/epilogue notes because those
395 are points at which the unwind info is usually stable. This
396 makes it easier to find spots with identical unwind info so
397 that we can use remember/restore_state opcodes. */
398 if (NOTE_P (insn))
399 switch (NOTE_KIND (insn))
401 case NOTE_INSN_PROLOGUE_END:
402 case NOTE_INSN_EPILOGUE_BEG:
403 return true;
406 return false;
409 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
411 static inline HOST_WIDE_INT
412 div_data_align (HOST_WIDE_INT off)
414 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
415 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
416 return r;
419 /* Return true if we need a signed version of a given opcode
420 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
422 static inline bool
423 need_data_align_sf_opcode (HOST_WIDE_INT off)
425 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
428 /* Return a pointer to a newly allocated Call Frame Instruction. */
430 static inline dw_cfi_ref
431 new_cfi (void)
433 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
435 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
436 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
438 return cfi;
441 /* Return a newly allocated CFI row, with no defined data. */
443 static dw_cfi_row *
444 new_cfi_row (void)
446 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
448 row->cfa.reg = INVALID_REGNUM;
450 return row;
453 /* Return a copy of an existing CFI row. */
455 static dw_cfi_row *
456 copy_cfi_row (dw_cfi_row *src)
458 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
460 *dst = *src;
461 dst->reg_save = vec_safe_copy (src->reg_save);
463 return dst;
466 /* Generate a new label for the CFI info to refer to. */
468 static char *
469 dwarf2out_cfi_label (void)
471 int num = dwarf2out_cfi_label_num++;
472 char label[20];
474 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
476 return xstrdup (label);
479 /* Add CFI either to the current insn stream or to a vector, or both. */
481 static void
482 add_cfi (dw_cfi_ref cfi)
484 any_cfis_emitted = true;
486 if (add_cfi_insn != NULL)
488 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
489 NOTE_CFI (add_cfi_insn) = cfi;
492 if (add_cfi_vec != NULL)
493 vec_safe_push (*add_cfi_vec, cfi);
496 static void
497 add_cfi_args_size (HOST_WIDE_INT size)
499 dw_cfi_ref cfi = new_cfi ();
501 /* While we can occasionally have args_size < 0 internally, this state
502 should not persist at a point we actually need an opcode. */
503 gcc_assert (size >= 0);
505 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
506 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
508 add_cfi (cfi);
511 static void
512 add_cfi_restore (unsigned reg)
514 dw_cfi_ref cfi = new_cfi ();
516 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
517 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
519 add_cfi (cfi);
522 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
523 that the register column is no longer saved. */
525 static void
526 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
528 if (vec_safe_length (row->reg_save) <= column)
529 vec_safe_grow_cleared (row->reg_save, column + 1);
530 (*row->reg_save)[column] = cfi;
533 /* This function fills in aa dw_cfa_location structure from a dwarf location
534 descriptor sequence. */
536 static void
537 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
539 struct dw_loc_descr_node *ptr;
540 cfa->offset = 0;
541 cfa->base_offset = 0;
542 cfa->indirect = 0;
543 cfa->reg = -1;
545 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
547 enum dwarf_location_atom op = ptr->dw_loc_opc;
549 switch (op)
551 case DW_OP_reg0:
552 case DW_OP_reg1:
553 case DW_OP_reg2:
554 case DW_OP_reg3:
555 case DW_OP_reg4:
556 case DW_OP_reg5:
557 case DW_OP_reg6:
558 case DW_OP_reg7:
559 case DW_OP_reg8:
560 case DW_OP_reg9:
561 case DW_OP_reg10:
562 case DW_OP_reg11:
563 case DW_OP_reg12:
564 case DW_OP_reg13:
565 case DW_OP_reg14:
566 case DW_OP_reg15:
567 case DW_OP_reg16:
568 case DW_OP_reg17:
569 case DW_OP_reg18:
570 case DW_OP_reg19:
571 case DW_OP_reg20:
572 case DW_OP_reg21:
573 case DW_OP_reg22:
574 case DW_OP_reg23:
575 case DW_OP_reg24:
576 case DW_OP_reg25:
577 case DW_OP_reg26:
578 case DW_OP_reg27:
579 case DW_OP_reg28:
580 case DW_OP_reg29:
581 case DW_OP_reg30:
582 case DW_OP_reg31:
583 cfa->reg = op - DW_OP_reg0;
584 break;
585 case DW_OP_regx:
586 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
587 break;
588 case DW_OP_breg0:
589 case DW_OP_breg1:
590 case DW_OP_breg2:
591 case DW_OP_breg3:
592 case DW_OP_breg4:
593 case DW_OP_breg5:
594 case DW_OP_breg6:
595 case DW_OP_breg7:
596 case DW_OP_breg8:
597 case DW_OP_breg9:
598 case DW_OP_breg10:
599 case DW_OP_breg11:
600 case DW_OP_breg12:
601 case DW_OP_breg13:
602 case DW_OP_breg14:
603 case DW_OP_breg15:
604 case DW_OP_breg16:
605 case DW_OP_breg17:
606 case DW_OP_breg18:
607 case DW_OP_breg19:
608 case DW_OP_breg20:
609 case DW_OP_breg21:
610 case DW_OP_breg22:
611 case DW_OP_breg23:
612 case DW_OP_breg24:
613 case DW_OP_breg25:
614 case DW_OP_breg26:
615 case DW_OP_breg27:
616 case DW_OP_breg28:
617 case DW_OP_breg29:
618 case DW_OP_breg30:
619 case DW_OP_breg31:
620 cfa->reg = op - DW_OP_breg0;
621 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
622 break;
623 case DW_OP_bregx:
624 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
625 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
626 break;
627 case DW_OP_deref:
628 cfa->indirect = 1;
629 break;
630 case DW_OP_plus_uconst:
631 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
632 break;
633 default:
634 gcc_unreachable ();
639 /* Find the previous value for the CFA, iteratively. CFI is the opcode
640 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
641 one level of remember/restore state processing. */
643 void
644 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
646 switch (cfi->dw_cfi_opc)
648 case DW_CFA_def_cfa_offset:
649 case DW_CFA_def_cfa_offset_sf:
650 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
651 break;
652 case DW_CFA_def_cfa_register:
653 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
654 break;
655 case DW_CFA_def_cfa:
656 case DW_CFA_def_cfa_sf:
657 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
658 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
659 break;
660 case DW_CFA_def_cfa_expression:
661 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
662 break;
664 case DW_CFA_remember_state:
665 gcc_assert (!remember->in_use);
666 *remember = *loc;
667 remember->in_use = 1;
668 break;
669 case DW_CFA_restore_state:
670 gcc_assert (remember->in_use);
671 *loc = *remember;
672 remember->in_use = 0;
673 break;
675 default:
676 break;
680 /* Determine if two dw_cfa_location structures define the same data. */
682 bool
683 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
685 return (loc1->reg == loc2->reg
686 && loc1->offset == loc2->offset
687 && loc1->indirect == loc2->indirect
688 && (loc1->indirect == 0
689 || loc1->base_offset == loc2->base_offset));
692 /* Determine if two CFI operands are identical. */
694 static bool
695 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
697 switch (t)
699 case dw_cfi_oprnd_unused:
700 return true;
701 case dw_cfi_oprnd_reg_num:
702 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
703 case dw_cfi_oprnd_offset:
704 return a->dw_cfi_offset == b->dw_cfi_offset;
705 case dw_cfi_oprnd_addr:
706 return (a->dw_cfi_addr == b->dw_cfi_addr
707 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
708 case dw_cfi_oprnd_loc:
709 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
711 gcc_unreachable ();
714 /* Determine if two CFI entries are identical. */
716 static bool
717 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
719 enum dwarf_call_frame_info opc;
721 /* Make things easier for our callers, including missing operands. */
722 if (a == b)
723 return true;
724 if (a == NULL || b == NULL)
725 return false;
727 /* Obviously, the opcodes must match. */
728 opc = a->dw_cfi_opc;
729 if (opc != b->dw_cfi_opc)
730 return false;
732 /* Compare the two operands, re-using the type of the operands as
733 already exposed elsewhere. */
734 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
735 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
736 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
737 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
740 /* Determine if two CFI_ROW structures are identical. */
742 static bool
743 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
745 size_t i, n_a, n_b, n_max;
747 if (a->cfa_cfi)
749 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
750 return false;
752 else if (!cfa_equal_p (&a->cfa, &b->cfa))
753 return false;
755 n_a = vec_safe_length (a->reg_save);
756 n_b = vec_safe_length (b->reg_save);
757 n_max = MAX (n_a, n_b);
759 for (i = 0; i < n_max; ++i)
761 dw_cfi_ref r_a = NULL, r_b = NULL;
763 if (i < n_a)
764 r_a = (*a->reg_save)[i];
765 if (i < n_b)
766 r_b = (*b->reg_save)[i];
768 if (!cfi_equal_p (r_a, r_b))
769 return false;
772 return true;
775 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
776 what opcode to emit. Returns the CFI opcode to effect the change, or
777 NULL if NEW_CFA == OLD_CFA. */
779 static dw_cfi_ref
780 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
782 dw_cfi_ref cfi;
784 /* If nothing changed, no need to issue any call frame instructions. */
785 if (cfa_equal_p (old_cfa, new_cfa))
786 return NULL;
788 cfi = new_cfi ();
790 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
792 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
793 the CFA register did not change but the offset did. The data
794 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
795 in the assembler via the .cfi_def_cfa_offset directive. */
796 if (new_cfa->offset < 0)
797 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
798 else
799 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
800 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
802 else if (new_cfa->offset == old_cfa->offset
803 && old_cfa->reg != INVALID_REGNUM
804 && !new_cfa->indirect
805 && !old_cfa->indirect)
807 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
808 indicating the CFA register has changed to <register> but the
809 offset has not changed. */
810 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
811 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
813 else if (new_cfa->indirect == 0)
815 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
816 indicating the CFA register has changed to <register> with
817 the specified offset. The data factoring for DW_CFA_def_cfa_sf
818 happens in output_cfi, or in the assembler via the .cfi_def_cfa
819 directive. */
820 if (new_cfa->offset < 0)
821 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
822 else
823 cfi->dw_cfi_opc = DW_CFA_def_cfa;
824 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
825 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
827 else
829 /* Construct a DW_CFA_def_cfa_expression instruction to
830 calculate the CFA using a full location expression since no
831 register-offset pair is available. */
832 struct dw_loc_descr_node *loc_list;
834 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
835 loc_list = build_cfa_loc (new_cfa, 0);
836 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
839 return cfi;
842 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
844 static void
845 def_cfa_1 (dw_cfa_location *new_cfa)
847 dw_cfi_ref cfi;
849 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
850 cur_trace->cfa_store.offset = new_cfa->offset;
852 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
853 if (cfi)
855 cur_row->cfa = *new_cfa;
856 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
857 ? cfi : NULL);
859 add_cfi (cfi);
863 /* Add the CFI for saving a register. REG is the CFA column number.
864 If SREG is -1, the register is saved at OFFSET from the CFA;
865 otherwise it is saved in SREG. */
867 static void
868 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
870 dw_fde_ref fde = cfun ? cfun->fde : NULL;
871 dw_cfi_ref cfi = new_cfi ();
873 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
875 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
876 if (fde
877 && fde->stack_realign
878 && sreg == INVALID_REGNUM)
880 cfi->dw_cfi_opc = DW_CFA_expression;
881 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
882 cfi->dw_cfi_oprnd2.dw_cfi_loc
883 = build_cfa_aligned_loc (&cur_row->cfa, offset,
884 fde->stack_realignment);
886 else if (sreg == INVALID_REGNUM)
888 if (need_data_align_sf_opcode (offset))
889 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
890 else if (reg & ~0x3f)
891 cfi->dw_cfi_opc = DW_CFA_offset_extended;
892 else
893 cfi->dw_cfi_opc = DW_CFA_offset;
894 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
896 else if (sreg == reg)
898 /* While we could emit something like DW_CFA_same_value or
899 DW_CFA_restore, we never expect to see something like that
900 in a prologue. This is more likely to be a bug. A backend
901 can always bypass this by using REG_CFA_RESTORE directly. */
902 gcc_unreachable ();
904 else
906 cfi->dw_cfi_opc = DW_CFA_register;
907 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
910 add_cfi (cfi);
911 update_row_reg_save (cur_row, reg, cfi);
914 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
915 and adjust data structures to match. */
917 static void
918 notice_args_size (rtx_insn *insn)
920 HOST_WIDE_INT args_size, delta;
921 rtx note;
923 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
924 if (note == NULL)
925 return;
927 args_size = INTVAL (XEXP (note, 0));
928 delta = args_size - cur_trace->end_true_args_size;
929 if (delta == 0)
930 return;
932 cur_trace->end_true_args_size = args_size;
934 /* If the CFA is computed off the stack pointer, then we must adjust
935 the computation of the CFA as well. */
936 if (cur_cfa->reg == dw_stack_pointer_regnum)
938 gcc_assert (!cur_cfa->indirect);
940 /* Convert a change in args_size (always a positive in the
941 direction of stack growth) to a change in stack pointer. */
942 if (!STACK_GROWS_DOWNWARD)
943 delta = -delta;
945 cur_cfa->offset += delta;
949 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
950 data within the trace related to EH insns and args_size. */
952 static void
953 notice_eh_throw (rtx_insn *insn)
955 HOST_WIDE_INT args_size;
957 args_size = cur_trace->end_true_args_size;
958 if (cur_trace->eh_head == NULL)
960 cur_trace->eh_head = insn;
961 cur_trace->beg_delay_args_size = args_size;
962 cur_trace->end_delay_args_size = args_size;
964 else if (cur_trace->end_delay_args_size != args_size)
966 cur_trace->end_delay_args_size = args_size;
968 /* ??? If the CFA is the stack pointer, search backward for the last
969 CFI note and insert there. Given that the stack changed for the
970 args_size change, there *must* be such a note in between here and
971 the last eh insn. */
972 add_cfi_args_size (args_size);
976 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
977 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
978 used in places where rtl is prohibited. */
980 static inline unsigned
981 dwf_regno (const_rtx reg)
983 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
984 return DWARF_FRAME_REGNUM (REGNO (reg));
987 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
989 static bool
990 compare_reg_or_pc (rtx x, rtx y)
992 if (REG_P (x) && REG_P (y))
993 return REGNO (x) == REGNO (y);
994 return x == y;
997 /* Record SRC as being saved in DEST. DEST may be null to delete an
998 existing entry. SRC may be a register or PC_RTX. */
1000 static void
1001 record_reg_saved_in_reg (rtx dest, rtx src)
1003 reg_saved_in_data *elt;
1004 size_t i;
1006 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1007 if (compare_reg_or_pc (elt->orig_reg, src))
1009 if (dest == NULL)
1010 cur_trace->regs_saved_in_regs.unordered_remove (i);
1011 else
1012 elt->saved_in_reg = dest;
1013 return;
1016 if (dest == NULL)
1017 return;
1019 reg_saved_in_data e = {src, dest};
1020 cur_trace->regs_saved_in_regs.safe_push (e);
1023 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1024 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1026 static void
1027 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1029 queued_reg_save *q;
1030 queued_reg_save e = {reg, sreg, offset};
1031 size_t i;
1033 /* Duplicates waste space, but it's also necessary to remove them
1034 for correctness, since the queue gets output in reverse order. */
1035 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1036 if (compare_reg_or_pc (q->reg, reg))
1038 *q = e;
1039 return;
1042 queued_reg_saves.safe_push (e);
1045 /* Output all the entries in QUEUED_REG_SAVES. */
1047 static void
1048 dwarf2out_flush_queued_reg_saves (void)
1050 queued_reg_save *q;
1051 size_t i;
1053 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1055 unsigned int reg, sreg;
1057 record_reg_saved_in_reg (q->saved_reg, q->reg);
1059 if (q->reg == pc_rtx)
1060 reg = DWARF_FRAME_RETURN_COLUMN;
1061 else
1062 reg = dwf_regno (q->reg);
1063 if (q->saved_reg)
1064 sreg = dwf_regno (q->saved_reg);
1065 else
1066 sreg = INVALID_REGNUM;
1067 reg_save (reg, sreg, q->cfa_offset);
1070 queued_reg_saves.truncate (0);
1073 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1074 location for? Or, does it clobber a register which we've previously
1075 said that some other register is saved in, and for which we now
1076 have a new location for? */
1078 static bool
1079 clobbers_queued_reg_save (const_rtx insn)
1081 queued_reg_save *q;
1082 size_t iq;
1084 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1086 size_t ir;
1087 reg_saved_in_data *rir;
1089 if (modified_in_p (q->reg, insn))
1090 return true;
1092 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1093 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1094 && modified_in_p (rir->saved_in_reg, insn))
1095 return true;
1098 return false;
1101 /* What register, if any, is currently saved in REG? */
1103 static rtx
1104 reg_saved_in (rtx reg)
1106 unsigned int regn = REGNO (reg);
1107 queued_reg_save *q;
1108 reg_saved_in_data *rir;
1109 size_t i;
1111 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1112 if (q->saved_reg && regn == REGNO (q->saved_reg))
1113 return q->reg;
1115 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1116 if (regn == REGNO (rir->saved_in_reg))
1117 return rir->orig_reg;
1119 return NULL_RTX;
1122 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1124 static void
1125 dwarf2out_frame_debug_def_cfa (rtx pat)
1127 memset (cur_cfa, 0, sizeof (*cur_cfa));
1129 if (GET_CODE (pat) == PLUS)
1131 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1132 pat = XEXP (pat, 0);
1134 if (MEM_P (pat))
1136 cur_cfa->indirect = 1;
1137 pat = XEXP (pat, 0);
1138 if (GET_CODE (pat) == PLUS)
1140 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1141 pat = XEXP (pat, 0);
1144 /* ??? If this fails, we could be calling into the _loc functions to
1145 define a full expression. So far no port does that. */
1146 gcc_assert (REG_P (pat));
1147 cur_cfa->reg = dwf_regno (pat);
1150 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1152 static void
1153 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1155 rtx src, dest;
1157 gcc_assert (GET_CODE (pat) == SET);
1158 dest = XEXP (pat, 0);
1159 src = XEXP (pat, 1);
1161 switch (GET_CODE (src))
1163 case PLUS:
1164 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1165 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1166 break;
1168 case REG:
1169 break;
1171 default:
1172 gcc_unreachable ();
1175 cur_cfa->reg = dwf_regno (dest);
1176 gcc_assert (cur_cfa->indirect == 0);
1179 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1181 static void
1182 dwarf2out_frame_debug_cfa_offset (rtx set)
1184 HOST_WIDE_INT offset;
1185 rtx src, addr, span;
1186 unsigned int sregno;
1188 src = XEXP (set, 1);
1189 addr = XEXP (set, 0);
1190 gcc_assert (MEM_P (addr));
1191 addr = XEXP (addr, 0);
1193 /* As documented, only consider extremely simple addresses. */
1194 switch (GET_CODE (addr))
1196 case REG:
1197 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1198 offset = -cur_cfa->offset;
1199 break;
1200 case PLUS:
1201 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1202 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1203 break;
1204 default:
1205 gcc_unreachable ();
1208 if (src == pc_rtx)
1210 span = NULL;
1211 sregno = DWARF_FRAME_RETURN_COLUMN;
1213 else
1215 span = targetm.dwarf_register_span (src);
1216 sregno = dwf_regno (src);
1219 /* ??? We'd like to use queue_reg_save, but we need to come up with
1220 a different flushing heuristic for epilogues. */
1221 if (!span)
1222 reg_save (sregno, INVALID_REGNUM, offset);
1223 else
1225 /* We have a PARALLEL describing where the contents of SRC live.
1226 Adjust the offset for each piece of the PARALLEL. */
1227 HOST_WIDE_INT span_offset = offset;
1229 gcc_assert (GET_CODE (span) == PARALLEL);
1231 const int par_len = XVECLEN (span, 0);
1232 for (int par_index = 0; par_index < par_len; par_index++)
1234 rtx elem = XVECEXP (span, 0, par_index);
1235 sregno = dwf_regno (src);
1236 reg_save (sregno, INVALID_REGNUM, span_offset);
1237 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1242 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1244 static void
1245 dwarf2out_frame_debug_cfa_register (rtx set)
1247 rtx src, dest;
1248 unsigned sregno, dregno;
1250 src = XEXP (set, 1);
1251 dest = XEXP (set, 0);
1253 record_reg_saved_in_reg (dest, src);
1254 if (src == pc_rtx)
1255 sregno = DWARF_FRAME_RETURN_COLUMN;
1256 else
1257 sregno = dwf_regno (src);
1259 dregno = dwf_regno (dest);
1261 /* ??? We'd like to use queue_reg_save, but we need to come up with
1262 a different flushing heuristic for epilogues. */
1263 reg_save (sregno, dregno, 0);
1266 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1268 static void
1269 dwarf2out_frame_debug_cfa_expression (rtx set)
1271 rtx src, dest, span;
1272 dw_cfi_ref cfi = new_cfi ();
1273 unsigned regno;
1275 dest = SET_DEST (set);
1276 src = SET_SRC (set);
1278 gcc_assert (REG_P (src));
1279 gcc_assert (MEM_P (dest));
1281 span = targetm.dwarf_register_span (src);
1282 gcc_assert (!span);
1284 regno = dwf_regno (src);
1286 cfi->dw_cfi_opc = DW_CFA_expression;
1287 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1288 cfi->dw_cfi_oprnd2.dw_cfi_loc
1289 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1290 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1292 /* ??? We'd like to use queue_reg_save, were the interface different,
1293 and, as above, we could manage flushing for epilogues. */
1294 add_cfi (cfi);
1295 update_row_reg_save (cur_row, regno, cfi);
1298 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1300 static void
1301 dwarf2out_frame_debug_cfa_restore (rtx reg)
1303 gcc_assert (REG_P (reg));
1305 rtx span = targetm.dwarf_register_span (reg);
1306 if (!span)
1308 unsigned int regno = dwf_regno (reg);
1309 add_cfi_restore (regno);
1310 update_row_reg_save (cur_row, regno, NULL);
1312 else
1314 /* We have a PARALLEL describing where the contents of REG live.
1315 Restore the register for each piece of the PARALLEL. */
1316 gcc_assert (GET_CODE (span) == PARALLEL);
1318 const int par_len = XVECLEN (span, 0);
1319 for (int par_index = 0; par_index < par_len; par_index++)
1321 reg = XVECEXP (span, 0, par_index);
1322 gcc_assert (REG_P (reg));
1323 unsigned int regno = dwf_regno (reg);
1324 add_cfi_restore (regno);
1325 update_row_reg_save (cur_row, regno, NULL);
1330 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1331 ??? Perhaps we should note in the CIE where windows are saved (instead of
1332 assuming 0(cfa)) and what registers are in the window. */
1334 static void
1335 dwarf2out_frame_debug_cfa_window_save (void)
1337 dw_cfi_ref cfi = new_cfi ();
1339 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1340 add_cfi (cfi);
1343 /* Record call frame debugging information for an expression EXPR,
1344 which either sets SP or FP (adjusting how we calculate the frame
1345 address) or saves a register to the stack or another register.
1346 LABEL indicates the address of EXPR.
1348 This function encodes a state machine mapping rtxes to actions on
1349 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1350 users need not read the source code.
1352 The High-Level Picture
1354 Changes in the register we use to calculate the CFA: Currently we
1355 assume that if you copy the CFA register into another register, we
1356 should take the other one as the new CFA register; this seems to
1357 work pretty well. If it's wrong for some target, it's simple
1358 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1360 Changes in the register we use for saving registers to the stack:
1361 This is usually SP, but not always. Again, we deduce that if you
1362 copy SP into another register (and SP is not the CFA register),
1363 then the new register is the one we will be using for register
1364 saves. This also seems to work.
1366 Register saves: There's not much guesswork about this one; if
1367 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1368 register save, and the register used to calculate the destination
1369 had better be the one we think we're using for this purpose.
1370 It's also assumed that a copy from a call-saved register to another
1371 register is saving that register if RTX_FRAME_RELATED_P is set on
1372 that instruction. If the copy is from a call-saved register to
1373 the *same* register, that means that the register is now the same
1374 value as in the caller.
1376 Except: If the register being saved is the CFA register, and the
1377 offset is nonzero, we are saving the CFA, so we assume we have to
1378 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1379 the intent is to save the value of SP from the previous frame.
1381 In addition, if a register has previously been saved to a different
1382 register,
1384 Invariants / Summaries of Rules
1386 cfa current rule for calculating the CFA. It usually
1387 consists of a register and an offset. This is
1388 actually stored in *cur_cfa, but abbreviated
1389 for the purposes of this documentation.
1390 cfa_store register used by prologue code to save things to the stack
1391 cfa_store.offset is the offset from the value of
1392 cfa_store.reg to the actual CFA
1393 cfa_temp register holding an integral value. cfa_temp.offset
1394 stores the value, which will be used to adjust the
1395 stack pointer. cfa_temp is also used like cfa_store,
1396 to track stores to the stack via fp or a temp reg.
1398 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1399 with cfa.reg as the first operand changes the cfa.reg and its
1400 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1401 cfa_temp.offset.
1403 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1404 expression yielding a constant. This sets cfa_temp.reg
1405 and cfa_temp.offset.
1407 Rule 5: Create a new register cfa_store used to save items to the
1408 stack.
1410 Rules 10-14: Save a register to the stack. Define offset as the
1411 difference of the original location and cfa_store's
1412 location (or cfa_temp's location if cfa_temp is used).
1414 Rules 16-20: If AND operation happens on sp in prologue, we assume
1415 stack is realigned. We will use a group of DW_OP_XXX
1416 expressions to represent the location of the stored
1417 register instead of CFA+offset.
1419 The Rules
1421 "{a,b}" indicates a choice of a xor b.
1422 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1424 Rule 1:
1425 (set <reg1> <reg2>:cfa.reg)
1426 effects: cfa.reg = <reg1>
1427 cfa.offset unchanged
1428 cfa_temp.reg = <reg1>
1429 cfa_temp.offset = cfa.offset
1431 Rule 2:
1432 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1433 {<const_int>,<reg>:cfa_temp.reg}))
1434 effects: cfa.reg = sp if fp used
1435 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1436 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1437 if cfa_store.reg==sp
1439 Rule 3:
1440 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1441 effects: cfa.reg = fp
1442 cfa_offset += +/- <const_int>
1444 Rule 4:
1445 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1446 constraints: <reg1> != fp
1447 <reg1> != sp
1448 effects: cfa.reg = <reg1>
1449 cfa_temp.reg = <reg1>
1450 cfa_temp.offset = cfa.offset
1452 Rule 5:
1453 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1454 constraints: <reg1> != fp
1455 <reg1> != sp
1456 effects: cfa_store.reg = <reg1>
1457 cfa_store.offset = cfa.offset - cfa_temp.offset
1459 Rule 6:
1460 (set <reg> <const_int>)
1461 effects: cfa_temp.reg = <reg>
1462 cfa_temp.offset = <const_int>
1464 Rule 7:
1465 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1466 effects: cfa_temp.reg = <reg1>
1467 cfa_temp.offset |= <const_int>
1469 Rule 8:
1470 (set <reg> (high <exp>))
1471 effects: none
1473 Rule 9:
1474 (set <reg> (lo_sum <exp> <const_int>))
1475 effects: cfa_temp.reg = <reg>
1476 cfa_temp.offset = <const_int>
1478 Rule 10:
1479 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1480 effects: cfa_store.offset -= <const_int>
1481 cfa.offset = cfa_store.offset if cfa.reg == sp
1482 cfa.reg = sp
1483 cfa.base_offset = -cfa_store.offset
1485 Rule 11:
1486 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1487 effects: cfa_store.offset += -/+ mode_size(mem)
1488 cfa.offset = cfa_store.offset if cfa.reg == sp
1489 cfa.reg = sp
1490 cfa.base_offset = -cfa_store.offset
1492 Rule 12:
1493 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1495 <reg2>)
1496 effects: cfa.reg = <reg1>
1497 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1499 Rule 13:
1500 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1501 effects: cfa.reg = <reg1>
1502 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1504 Rule 14:
1505 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1506 effects: cfa.reg = <reg1>
1507 cfa.base_offset = -cfa_temp.offset
1508 cfa_temp.offset -= mode_size(mem)
1510 Rule 15:
1511 (set <reg> {unspec, unspec_volatile})
1512 effects: target-dependent
1514 Rule 16:
1515 (set sp (and: sp <const_int>))
1516 constraints: cfa_store.reg == sp
1517 effects: cfun->fde.stack_realign = 1
1518 cfa_store.offset = 0
1519 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1521 Rule 17:
1522 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1523 effects: cfa_store.offset += -/+ mode_size(mem)
1525 Rule 18:
1526 (set (mem ({pre_inc, pre_dec} sp)) fp)
1527 constraints: fde->stack_realign == 1
1528 effects: cfa_store.offset = 0
1529 cfa.reg != HARD_FRAME_POINTER_REGNUM
1531 Rule 19:
1532 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1533 constraints: fde->stack_realign == 1
1534 && cfa.offset == 0
1535 && cfa.indirect == 0
1536 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1537 effects: Use DW_CFA_def_cfa_expression to define cfa
1538 cfa.reg == fde->drap_reg */
1540 static void
1541 dwarf2out_frame_debug_expr (rtx expr)
1543 rtx src, dest, span;
1544 HOST_WIDE_INT offset;
1545 dw_fde_ref fde;
1547 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1548 the PARALLEL independently. The first element is always processed if
1549 it is a SET. This is for backward compatibility. Other elements
1550 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1551 flag is set in them. */
1552 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1554 int par_index;
1555 int limit = XVECLEN (expr, 0);
1556 rtx elem;
1558 /* PARALLELs have strict read-modify-write semantics, so we
1559 ought to evaluate every rvalue before changing any lvalue.
1560 It's cumbersome to do that in general, but there's an
1561 easy approximation that is enough for all current users:
1562 handle register saves before register assignments. */
1563 if (GET_CODE (expr) == PARALLEL)
1564 for (par_index = 0; par_index < limit; par_index++)
1566 elem = XVECEXP (expr, 0, par_index);
1567 if (GET_CODE (elem) == SET
1568 && MEM_P (SET_DEST (elem))
1569 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1570 dwarf2out_frame_debug_expr (elem);
1573 for (par_index = 0; par_index < limit; par_index++)
1575 elem = XVECEXP (expr, 0, par_index);
1576 if (GET_CODE (elem) == SET
1577 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1578 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1579 dwarf2out_frame_debug_expr (elem);
1581 return;
1584 gcc_assert (GET_CODE (expr) == SET);
1586 src = SET_SRC (expr);
1587 dest = SET_DEST (expr);
1589 if (REG_P (src))
1591 rtx rsi = reg_saved_in (src);
1592 if (rsi)
1593 src = rsi;
1596 fde = cfun->fde;
1598 switch (GET_CODE (dest))
1600 case REG:
1601 switch (GET_CODE (src))
1603 /* Setting FP from SP. */
1604 case REG:
1605 if (cur_cfa->reg == dwf_regno (src))
1607 /* Rule 1 */
1608 /* Update the CFA rule wrt SP or FP. Make sure src is
1609 relative to the current CFA register.
1611 We used to require that dest be either SP or FP, but the
1612 ARM copies SP to a temporary register, and from there to
1613 FP. So we just rely on the backends to only set
1614 RTX_FRAME_RELATED_P on appropriate insns. */
1615 cur_cfa->reg = dwf_regno (dest);
1616 cur_trace->cfa_temp.reg = cur_cfa->reg;
1617 cur_trace->cfa_temp.offset = cur_cfa->offset;
1619 else
1621 /* Saving a register in a register. */
1622 gcc_assert (!fixed_regs [REGNO (dest)]
1623 /* For the SPARC and its register window. */
1624 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1626 /* After stack is aligned, we can only save SP in FP
1627 if drap register is used. In this case, we have
1628 to restore stack pointer with the CFA value and we
1629 don't generate this DWARF information. */
1630 if (fde
1631 && fde->stack_realign
1632 && REGNO (src) == STACK_POINTER_REGNUM)
1633 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1634 && fde->drap_reg != INVALID_REGNUM
1635 && cur_cfa->reg != dwf_regno (src));
1636 else
1637 queue_reg_save (src, dest, 0);
1639 break;
1641 case PLUS:
1642 case MINUS:
1643 case LO_SUM:
1644 if (dest == stack_pointer_rtx)
1646 /* Rule 2 */
1647 /* Adjusting SP. */
1648 switch (GET_CODE (XEXP (src, 1)))
1650 case CONST_INT:
1651 offset = INTVAL (XEXP (src, 1));
1652 break;
1653 case REG:
1654 gcc_assert (dwf_regno (XEXP (src, 1))
1655 == cur_trace->cfa_temp.reg);
1656 offset = cur_trace->cfa_temp.offset;
1657 break;
1658 default:
1659 gcc_unreachable ();
1662 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1664 /* Restoring SP from FP in the epilogue. */
1665 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1666 cur_cfa->reg = dw_stack_pointer_regnum;
1668 else if (GET_CODE (src) == LO_SUM)
1669 /* Assume we've set the source reg of the LO_SUM from sp. */
1671 else
1672 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1674 if (GET_CODE (src) != MINUS)
1675 offset = -offset;
1676 if (cur_cfa->reg == dw_stack_pointer_regnum)
1677 cur_cfa->offset += offset;
1678 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1679 cur_trace->cfa_store.offset += offset;
1681 else if (dest == hard_frame_pointer_rtx)
1683 /* Rule 3 */
1684 /* Either setting the FP from an offset of the SP,
1685 or adjusting the FP */
1686 gcc_assert (frame_pointer_needed);
1688 gcc_assert (REG_P (XEXP (src, 0))
1689 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1690 && CONST_INT_P (XEXP (src, 1)));
1691 offset = INTVAL (XEXP (src, 1));
1692 if (GET_CODE (src) != MINUS)
1693 offset = -offset;
1694 cur_cfa->offset += offset;
1695 cur_cfa->reg = dw_frame_pointer_regnum;
1697 else
1699 gcc_assert (GET_CODE (src) != MINUS);
1701 /* Rule 4 */
1702 if (REG_P (XEXP (src, 0))
1703 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1704 && CONST_INT_P (XEXP (src, 1)))
1706 /* Setting a temporary CFA register that will be copied
1707 into the FP later on. */
1708 offset = - INTVAL (XEXP (src, 1));
1709 cur_cfa->offset += offset;
1710 cur_cfa->reg = dwf_regno (dest);
1711 /* Or used to save regs to the stack. */
1712 cur_trace->cfa_temp.reg = cur_cfa->reg;
1713 cur_trace->cfa_temp.offset = cur_cfa->offset;
1716 /* Rule 5 */
1717 else if (REG_P (XEXP (src, 0))
1718 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1719 && XEXP (src, 1) == stack_pointer_rtx)
1721 /* Setting a scratch register that we will use instead
1722 of SP for saving registers to the stack. */
1723 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1724 cur_trace->cfa_store.reg = dwf_regno (dest);
1725 cur_trace->cfa_store.offset
1726 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1729 /* Rule 9 */
1730 else if (GET_CODE (src) == LO_SUM
1731 && CONST_INT_P (XEXP (src, 1)))
1733 cur_trace->cfa_temp.reg = dwf_regno (dest);
1734 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1736 else
1737 gcc_unreachable ();
1739 break;
1741 /* Rule 6 */
1742 case CONST_INT:
1743 cur_trace->cfa_temp.reg = dwf_regno (dest);
1744 cur_trace->cfa_temp.offset = INTVAL (src);
1745 break;
1747 /* Rule 7 */
1748 case IOR:
1749 gcc_assert (REG_P (XEXP (src, 0))
1750 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1751 && CONST_INT_P (XEXP (src, 1)));
1753 cur_trace->cfa_temp.reg = dwf_regno (dest);
1754 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1755 break;
1757 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1758 which will fill in all of the bits. */
1759 /* Rule 8 */
1760 case HIGH:
1761 break;
1763 /* Rule 15 */
1764 case UNSPEC:
1765 case UNSPEC_VOLATILE:
1766 /* All unspecs should be represented by REG_CFA_* notes. */
1767 gcc_unreachable ();
1768 return;
1770 /* Rule 16 */
1771 case AND:
1772 /* If this AND operation happens on stack pointer in prologue,
1773 we assume the stack is realigned and we extract the
1774 alignment. */
1775 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1777 /* We interpret reg_save differently with stack_realign set.
1778 Thus we must flush whatever we have queued first. */
1779 dwarf2out_flush_queued_reg_saves ();
1781 gcc_assert (cur_trace->cfa_store.reg
1782 == dwf_regno (XEXP (src, 0)));
1783 fde->stack_realign = 1;
1784 fde->stack_realignment = INTVAL (XEXP (src, 1));
1785 cur_trace->cfa_store.offset = 0;
1787 if (cur_cfa->reg != dw_stack_pointer_regnum
1788 && cur_cfa->reg != dw_frame_pointer_regnum)
1789 fde->drap_reg = cur_cfa->reg;
1791 return;
1793 default:
1794 gcc_unreachable ();
1796 break;
1798 case MEM:
1800 /* Saving a register to the stack. Make sure dest is relative to the
1801 CFA register. */
1802 switch (GET_CODE (XEXP (dest, 0)))
1804 /* Rule 10 */
1805 /* With a push. */
1806 case PRE_MODIFY:
1807 case POST_MODIFY:
1808 /* We can't handle variable size modifications. */
1809 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1810 == CONST_INT);
1811 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1813 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1814 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1816 cur_trace->cfa_store.offset += offset;
1817 if (cur_cfa->reg == dw_stack_pointer_regnum)
1818 cur_cfa->offset = cur_trace->cfa_store.offset;
1820 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1821 offset -= cur_trace->cfa_store.offset;
1822 else
1823 offset = -cur_trace->cfa_store.offset;
1824 break;
1826 /* Rule 11 */
1827 case PRE_INC:
1828 case PRE_DEC:
1829 case POST_DEC:
1830 offset = GET_MODE_SIZE (GET_MODE (dest));
1831 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1832 offset = -offset;
1834 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1835 == STACK_POINTER_REGNUM)
1836 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1838 cur_trace->cfa_store.offset += offset;
1840 /* Rule 18: If stack is aligned, we will use FP as a
1841 reference to represent the address of the stored
1842 regiser. */
1843 if (fde
1844 && fde->stack_realign
1845 && REG_P (src)
1846 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1848 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1849 cur_trace->cfa_store.offset = 0;
1852 if (cur_cfa->reg == dw_stack_pointer_regnum)
1853 cur_cfa->offset = cur_trace->cfa_store.offset;
1855 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1856 offset += -cur_trace->cfa_store.offset;
1857 else
1858 offset = -cur_trace->cfa_store.offset;
1859 break;
1861 /* Rule 12 */
1862 /* With an offset. */
1863 case PLUS:
1864 case MINUS:
1865 case LO_SUM:
1867 unsigned int regno;
1869 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1870 && REG_P (XEXP (XEXP (dest, 0), 0)));
1871 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1872 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1873 offset = -offset;
1875 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1877 if (cur_cfa->reg == regno)
1878 offset -= cur_cfa->offset;
1879 else if (cur_trace->cfa_store.reg == regno)
1880 offset -= cur_trace->cfa_store.offset;
1881 else
1883 gcc_assert (cur_trace->cfa_temp.reg == regno);
1884 offset -= cur_trace->cfa_temp.offset;
1887 break;
1889 /* Rule 13 */
1890 /* Without an offset. */
1891 case REG:
1893 unsigned int regno = dwf_regno (XEXP (dest, 0));
1895 if (cur_cfa->reg == regno)
1896 offset = -cur_cfa->offset;
1897 else if (cur_trace->cfa_store.reg == regno)
1898 offset = -cur_trace->cfa_store.offset;
1899 else
1901 gcc_assert (cur_trace->cfa_temp.reg == regno);
1902 offset = -cur_trace->cfa_temp.offset;
1905 break;
1907 /* Rule 14 */
1908 case POST_INC:
1909 gcc_assert (cur_trace->cfa_temp.reg
1910 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1911 offset = -cur_trace->cfa_temp.offset;
1912 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1913 break;
1915 default:
1916 gcc_unreachable ();
1919 /* Rule 17 */
1920 /* If the source operand of this MEM operation is a memory,
1921 we only care how much stack grew. */
1922 if (MEM_P (src))
1923 break;
1925 if (REG_P (src)
1926 && REGNO (src) != STACK_POINTER_REGNUM
1927 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1928 && dwf_regno (src) == cur_cfa->reg)
1930 /* We're storing the current CFA reg into the stack. */
1932 if (cur_cfa->offset == 0)
1934 /* Rule 19 */
1935 /* If stack is aligned, putting CFA reg into stack means
1936 we can no longer use reg + offset to represent CFA.
1937 Here we use DW_CFA_def_cfa_expression instead. The
1938 result of this expression equals to the original CFA
1939 value. */
1940 if (fde
1941 && fde->stack_realign
1942 && cur_cfa->indirect == 0
1943 && cur_cfa->reg != dw_frame_pointer_regnum)
1945 gcc_assert (fde->drap_reg == cur_cfa->reg);
1947 cur_cfa->indirect = 1;
1948 cur_cfa->reg = dw_frame_pointer_regnum;
1949 cur_cfa->base_offset = offset;
1950 cur_cfa->offset = 0;
1952 fde->drap_reg_saved = 1;
1953 break;
1956 /* If the source register is exactly the CFA, assume
1957 we're saving SP like any other register; this happens
1958 on the ARM. */
1959 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1960 break;
1962 else
1964 /* Otherwise, we'll need to look in the stack to
1965 calculate the CFA. */
1966 rtx x = XEXP (dest, 0);
1968 if (!REG_P (x))
1969 x = XEXP (x, 0);
1970 gcc_assert (REG_P (x));
1972 cur_cfa->reg = dwf_regno (x);
1973 cur_cfa->base_offset = offset;
1974 cur_cfa->indirect = 1;
1975 break;
1979 if (REG_P (src))
1980 span = targetm.dwarf_register_span (src);
1981 else
1982 span = NULL;
1984 if (!span)
1985 queue_reg_save (src, NULL_RTX, offset);
1986 else
1988 /* We have a PARALLEL describing where the contents of SRC live.
1989 Queue register saves for each piece of the PARALLEL. */
1990 HOST_WIDE_INT span_offset = offset;
1992 gcc_assert (GET_CODE (span) == PARALLEL);
1994 const int par_len = XVECLEN (span, 0);
1995 for (int par_index = 0; par_index < par_len; par_index++)
1997 rtx elem = XVECEXP (span, 0, par_index);
1998 queue_reg_save (elem, NULL_RTX, span_offset);
1999 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2002 break;
2004 default:
2005 gcc_unreachable ();
2009 /* Record call frame debugging information for INSN, which either sets
2010 SP or FP (adjusting how we calculate the frame address) or saves a
2011 register to the stack. */
2013 static void
2014 dwarf2out_frame_debug (rtx_insn *insn)
2016 rtx note, n, pat;
2017 bool handled_one = false;
2019 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2020 switch (REG_NOTE_KIND (note))
2022 case REG_FRAME_RELATED_EXPR:
2023 pat = XEXP (note, 0);
2024 goto do_frame_expr;
2026 case REG_CFA_DEF_CFA:
2027 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2028 handled_one = true;
2029 break;
2031 case REG_CFA_ADJUST_CFA:
2032 n = XEXP (note, 0);
2033 if (n == NULL)
2035 n = PATTERN (insn);
2036 if (GET_CODE (n) == PARALLEL)
2037 n = XVECEXP (n, 0, 0);
2039 dwarf2out_frame_debug_adjust_cfa (n);
2040 handled_one = true;
2041 break;
2043 case REG_CFA_OFFSET:
2044 n = XEXP (note, 0);
2045 if (n == NULL)
2046 n = single_set (insn);
2047 dwarf2out_frame_debug_cfa_offset (n);
2048 handled_one = true;
2049 break;
2051 case REG_CFA_REGISTER:
2052 n = XEXP (note, 0);
2053 if (n == NULL)
2055 n = PATTERN (insn);
2056 if (GET_CODE (n) == PARALLEL)
2057 n = XVECEXP (n, 0, 0);
2059 dwarf2out_frame_debug_cfa_register (n);
2060 handled_one = true;
2061 break;
2063 case REG_CFA_EXPRESSION:
2064 n = XEXP (note, 0);
2065 if (n == NULL)
2066 n = single_set (insn);
2067 dwarf2out_frame_debug_cfa_expression (n);
2068 handled_one = true;
2069 break;
2071 case REG_CFA_RESTORE:
2072 n = XEXP (note, 0);
2073 if (n == NULL)
2075 n = PATTERN (insn);
2076 if (GET_CODE (n) == PARALLEL)
2077 n = XVECEXP (n, 0, 0);
2078 n = XEXP (n, 0);
2080 dwarf2out_frame_debug_cfa_restore (n);
2081 handled_one = true;
2082 break;
2084 case REG_CFA_SET_VDRAP:
2085 n = XEXP (note, 0);
2086 if (REG_P (n))
2088 dw_fde_ref fde = cfun->fde;
2089 if (fde)
2091 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2092 if (REG_P (n))
2093 fde->vdrap_reg = dwf_regno (n);
2096 handled_one = true;
2097 break;
2099 case REG_CFA_WINDOW_SAVE:
2100 dwarf2out_frame_debug_cfa_window_save ();
2101 handled_one = true;
2102 break;
2104 case REG_CFA_FLUSH_QUEUE:
2105 /* The actual flush happens elsewhere. */
2106 handled_one = true;
2107 break;
2109 default:
2110 break;
2113 if (!handled_one)
2115 pat = PATTERN (insn);
2116 do_frame_expr:
2117 dwarf2out_frame_debug_expr (pat);
2119 /* Check again. A parallel can save and update the same register.
2120 We could probably check just once, here, but this is safer than
2121 removing the check at the start of the function. */
2122 if (clobbers_queued_reg_save (pat))
2123 dwarf2out_flush_queued_reg_saves ();
2127 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2129 static void
2130 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2132 size_t i, n_old, n_new, n_max;
2133 dw_cfi_ref cfi;
2135 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2136 add_cfi (new_row->cfa_cfi);
2137 else
2139 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2140 if (cfi)
2141 add_cfi (cfi);
2144 n_old = vec_safe_length (old_row->reg_save);
2145 n_new = vec_safe_length (new_row->reg_save);
2146 n_max = MAX (n_old, n_new);
2148 for (i = 0; i < n_max; ++i)
2150 dw_cfi_ref r_old = NULL, r_new = NULL;
2152 if (i < n_old)
2153 r_old = (*old_row->reg_save)[i];
2154 if (i < n_new)
2155 r_new = (*new_row->reg_save)[i];
2157 if (r_old == r_new)
2159 else if (r_new == NULL)
2160 add_cfi_restore (i);
2161 else if (!cfi_equal_p (r_old, r_new))
2162 add_cfi (r_new);
2166 /* Examine CFI and return true if a cfi label and set_loc is needed
2167 beforehand. Even when generating CFI assembler instructions, we
2168 still have to add the cfi to the list so that lookup_cfa_1 works
2169 later on. When -g2 and above we even need to force emitting of
2170 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2171 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2172 and so don't use convert_cfa_to_fb_loc_list. */
2174 static bool
2175 cfi_label_required_p (dw_cfi_ref cfi)
2177 if (!dwarf2out_do_cfi_asm ())
2178 return true;
2180 if (dwarf_version == 2
2181 && debug_info_level > DINFO_LEVEL_TERSE
2182 && (write_symbols == DWARF2_DEBUG
2183 || write_symbols == VMS_AND_DWARF2_DEBUG))
2185 switch (cfi->dw_cfi_opc)
2187 case DW_CFA_def_cfa_offset:
2188 case DW_CFA_def_cfa_offset_sf:
2189 case DW_CFA_def_cfa_register:
2190 case DW_CFA_def_cfa:
2191 case DW_CFA_def_cfa_sf:
2192 case DW_CFA_def_cfa_expression:
2193 case DW_CFA_restore_state:
2194 return true;
2195 default:
2196 return false;
2199 return false;
2202 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2203 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2204 necessary. */
2205 static void
2206 add_cfis_to_fde (void)
2208 dw_fde_ref fde = cfun->fde;
2209 rtx_insn *insn, *next;
2210 /* We always start with a function_begin label. */
2211 bool first = false;
2213 for (insn = get_insns (); insn; insn = next)
2215 next = NEXT_INSN (insn);
2217 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2219 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2220 /* Don't attempt to advance_loc4 between labels
2221 in different sections. */
2222 first = true;
2225 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2227 bool required = cfi_label_required_p (NOTE_CFI (insn));
2228 while (next)
2229 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2231 required |= cfi_label_required_p (NOTE_CFI (next));
2232 next = NEXT_INSN (next);
2234 else if (active_insn_p (next)
2235 || (NOTE_P (next) && (NOTE_KIND (next)
2236 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2237 break;
2238 else
2239 next = NEXT_INSN (next);
2240 if (required)
2242 int num = dwarf2out_cfi_label_num;
2243 const char *label = dwarf2out_cfi_label ();
2244 dw_cfi_ref xcfi;
2245 rtx tmp;
2247 /* Set the location counter to the new label. */
2248 xcfi = new_cfi ();
2249 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2250 : DW_CFA_advance_loc4);
2251 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2252 vec_safe_push (fde->dw_fde_cfi, xcfi);
2254 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2255 NOTE_LABEL_NUMBER (tmp) = num;
2260 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2261 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2262 insn = NEXT_INSN (insn);
2264 while (insn != next);
2265 first = false;
2270 /* If LABEL is the start of a trace, then initialize the state of that
2271 trace from CUR_TRACE and CUR_ROW. */
2273 static void
2274 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2276 dw_trace_info *ti;
2277 HOST_WIDE_INT args_size;
2279 ti = get_trace_info (start);
2280 gcc_assert (ti != NULL);
2282 if (dump_file)
2284 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2285 cur_trace->id, ti->id,
2286 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2287 (origin ? INSN_UID (origin) : 0));
2290 args_size = cur_trace->end_true_args_size;
2291 if (ti->beg_row == NULL)
2293 /* This is the first time we've encountered this trace. Propagate
2294 state across the edge and push the trace onto the work list. */
2295 ti->beg_row = copy_cfi_row (cur_row);
2296 ti->beg_true_args_size = args_size;
2298 ti->cfa_store = cur_trace->cfa_store;
2299 ti->cfa_temp = cur_trace->cfa_temp;
2300 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2302 trace_work_list.safe_push (ti);
2304 if (dump_file)
2305 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2307 else
2310 /* We ought to have the same state incoming to a given trace no
2311 matter how we arrive at the trace. Anything else means we've
2312 got some kind of optimization error. */
2313 gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2315 /* The args_size is allowed to conflict if it isn't actually used. */
2316 if (ti->beg_true_args_size != args_size)
2317 ti->args_size_undefined = true;
2321 /* Similarly, but handle the args_size and CFA reset across EH
2322 and non-local goto edges. */
2324 static void
2325 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2327 HOST_WIDE_INT save_args_size, delta;
2328 dw_cfa_location save_cfa;
2330 save_args_size = cur_trace->end_true_args_size;
2331 if (save_args_size == 0)
2333 maybe_record_trace_start (start, origin);
2334 return;
2337 delta = -save_args_size;
2338 cur_trace->end_true_args_size = 0;
2340 save_cfa = cur_row->cfa;
2341 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2343 /* Convert a change in args_size (always a positive in the
2344 direction of stack growth) to a change in stack pointer. */
2345 if (!STACK_GROWS_DOWNWARD)
2346 delta = -delta;
2348 cur_row->cfa.offset += delta;
2351 maybe_record_trace_start (start, origin);
2353 cur_trace->end_true_args_size = save_args_size;
2354 cur_row->cfa = save_cfa;
2357 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2358 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2360 static void
2361 create_trace_edges (rtx_insn *insn)
2363 rtx tmp;
2364 int i, n;
2366 if (JUMP_P (insn))
2368 rtx_jump_table_data *table;
2370 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2371 return;
2373 if (tablejump_p (insn, NULL, &table))
2375 rtvec vec = table->get_labels ();
2377 n = GET_NUM_ELEM (vec);
2378 for (i = 0; i < n; ++i)
2380 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2381 maybe_record_trace_start (lab, insn);
2384 else if (computed_jump_p (insn))
2386 for (rtx_insn_list *lab = forced_labels; lab; lab = lab->next ())
2387 maybe_record_trace_start (lab->insn (), insn);
2389 else if (returnjump_p (insn))
2391 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2393 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2394 for (i = 0; i < n; ++i)
2396 rtx_insn *lab =
2397 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2398 maybe_record_trace_start (lab, insn);
2401 else
2403 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2404 gcc_assert (lab != NULL);
2405 maybe_record_trace_start (lab, insn);
2408 else if (CALL_P (insn))
2410 /* Sibling calls don't have edges inside this function. */
2411 if (SIBLING_CALL_P (insn))
2412 return;
2414 /* Process non-local goto edges. */
2415 if (can_nonlocal_goto (insn))
2416 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2417 lab;
2418 lab = lab->next ())
2419 maybe_record_trace_start_abnormal (lab->insn (), insn);
2421 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2423 int i, n = seq->len ();
2424 for (i = 0; i < n; ++i)
2425 create_trace_edges (seq->insn (i));
2426 return;
2429 /* Process EH edges. */
2430 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2432 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2433 if (lp)
2434 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2438 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2440 static void
2441 scan_insn_after (rtx_insn *insn)
2443 if (RTX_FRAME_RELATED_P (insn))
2444 dwarf2out_frame_debug (insn);
2445 notice_args_size (insn);
2448 /* Scan the trace beginning at INSN and create the CFI notes for the
2449 instructions therein. */
2451 static void
2452 scan_trace (dw_trace_info *trace)
2454 rtx_insn *prev, *insn = trace->head;
2455 dw_cfa_location this_cfa;
2457 if (dump_file)
2458 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2459 trace->id, rtx_name[(int) GET_CODE (insn)],
2460 INSN_UID (insn));
2462 trace->end_row = copy_cfi_row (trace->beg_row);
2463 trace->end_true_args_size = trace->beg_true_args_size;
2465 cur_trace = trace;
2466 cur_row = trace->end_row;
2468 this_cfa = cur_row->cfa;
2469 cur_cfa = &this_cfa;
2471 for (prev = insn, insn = NEXT_INSN (insn);
2472 insn;
2473 prev = insn, insn = NEXT_INSN (insn))
2475 rtx_insn *control;
2477 /* Do everything that happens "before" the insn. */
2478 add_cfi_insn = prev;
2480 /* Notice the end of a trace. */
2481 if (BARRIER_P (insn))
2483 /* Don't bother saving the unneeded queued registers at all. */
2484 queued_reg_saves.truncate (0);
2485 break;
2487 if (save_point_p (insn))
2489 /* Propagate across fallthru edges. */
2490 dwarf2out_flush_queued_reg_saves ();
2491 maybe_record_trace_start (insn, NULL);
2492 break;
2495 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2496 continue;
2498 /* Handle all changes to the row state. Sequences require special
2499 handling for the positioning of the notes. */
2500 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2502 rtx_insn *elt;
2503 int i, n = pat->len ();
2505 control = pat->insn (0);
2506 if (can_throw_internal (control))
2507 notice_eh_throw (control);
2508 dwarf2out_flush_queued_reg_saves ();
2510 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2512 /* ??? Hopefully multiple delay slots are not annulled. */
2513 gcc_assert (n == 2);
2514 gcc_assert (!RTX_FRAME_RELATED_P (control));
2515 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2517 elt = pat->insn (1);
2519 if (INSN_FROM_TARGET_P (elt))
2521 HOST_WIDE_INT restore_args_size;
2522 cfi_vec save_row_reg_save;
2524 /* If ELT is an instruction from target of an annulled
2525 branch, the effects are for the target only and so
2526 the args_size and CFA along the current path
2527 shouldn't change. */
2528 add_cfi_insn = NULL;
2529 restore_args_size = cur_trace->end_true_args_size;
2530 cur_cfa = &cur_row->cfa;
2531 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2533 scan_insn_after (elt);
2535 /* ??? Should we instead save the entire row state? */
2536 gcc_assert (!queued_reg_saves.length ());
2538 create_trace_edges (control);
2540 cur_trace->end_true_args_size = restore_args_size;
2541 cur_row->cfa = this_cfa;
2542 cur_row->reg_save = save_row_reg_save;
2543 cur_cfa = &this_cfa;
2545 else
2547 /* If ELT is a annulled branch-taken instruction (i.e.
2548 executed only when branch is not taken), the args_size
2549 and CFA should not change through the jump. */
2550 create_trace_edges (control);
2552 /* Update and continue with the trace. */
2553 add_cfi_insn = insn;
2554 scan_insn_after (elt);
2555 def_cfa_1 (&this_cfa);
2557 continue;
2560 /* The insns in the delay slot should all be considered to happen
2561 "before" a call insn. Consider a call with a stack pointer
2562 adjustment in the delay slot. The backtrace from the callee
2563 should include the sp adjustment. Unfortunately, that leaves
2564 us with an unavoidable unwinding error exactly at the call insn
2565 itself. For jump insns we'd prefer to avoid this error by
2566 placing the notes after the sequence. */
2567 if (JUMP_P (control))
2568 add_cfi_insn = insn;
2570 for (i = 1; i < n; ++i)
2572 elt = pat->insn (i);
2573 scan_insn_after (elt);
2576 /* Make sure any register saves are visible at the jump target. */
2577 dwarf2out_flush_queued_reg_saves ();
2578 any_cfis_emitted = false;
2580 /* However, if there is some adjustment on the call itself, e.g.
2581 a call_pop, that action should be considered to happen after
2582 the call returns. */
2583 add_cfi_insn = insn;
2584 scan_insn_after (control);
2586 else
2588 /* Flush data before calls and jumps, and of course if necessary. */
2589 if (can_throw_internal (insn))
2591 notice_eh_throw (insn);
2592 dwarf2out_flush_queued_reg_saves ();
2594 else if (!NONJUMP_INSN_P (insn)
2595 || clobbers_queued_reg_save (insn)
2596 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2597 dwarf2out_flush_queued_reg_saves ();
2598 any_cfis_emitted = false;
2600 add_cfi_insn = insn;
2601 scan_insn_after (insn);
2602 control = insn;
2605 /* Between frame-related-p and args_size we might have otherwise
2606 emitted two cfa adjustments. Do it now. */
2607 def_cfa_1 (&this_cfa);
2609 /* Minimize the number of advances by emitting the entire queue
2610 once anything is emitted. */
2611 if (any_cfis_emitted
2612 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2613 dwarf2out_flush_queued_reg_saves ();
2615 /* Note that a test for control_flow_insn_p does exactly the
2616 same tests as are done to actually create the edges. So
2617 always call the routine and let it not create edges for
2618 non-control-flow insns. */
2619 create_trace_edges (control);
2622 add_cfi_insn = NULL;
2623 cur_row = NULL;
2624 cur_trace = NULL;
2625 cur_cfa = NULL;
2628 /* Scan the function and create the initial set of CFI notes. */
2630 static void
2631 create_cfi_notes (void)
2633 dw_trace_info *ti;
2635 gcc_checking_assert (!queued_reg_saves.exists ());
2636 gcc_checking_assert (!trace_work_list.exists ());
2638 /* Always begin at the entry trace. */
2639 ti = &trace_info[0];
2640 scan_trace (ti);
2642 while (!trace_work_list.is_empty ())
2644 ti = trace_work_list.pop ();
2645 scan_trace (ti);
2648 queued_reg_saves.release ();
2649 trace_work_list.release ();
2652 /* Return the insn before the first NOTE_INSN_CFI after START. */
2654 static rtx_insn *
2655 before_next_cfi_note (rtx_insn *start)
2657 rtx_insn *prev = start;
2658 while (start)
2660 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2661 return prev;
2662 prev = start;
2663 start = NEXT_INSN (start);
2665 gcc_unreachable ();
2668 /* Insert CFI notes between traces to properly change state between them. */
2670 static void
2671 connect_traces (void)
2673 unsigned i, n = trace_info.length ();
2674 dw_trace_info *prev_ti, *ti;
2676 /* ??? Ideally, we should have both queued and processed every trace.
2677 However the current representation of constant pools on various targets
2678 is indistinguishable from unreachable code. Assume for the moment that
2679 we can simply skip over such traces. */
2680 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2681 these are not "real" instructions, and should not be considered.
2682 This could be generically useful for tablejump data as well. */
2683 /* Remove all unprocessed traces from the list. */
2684 for (i = n - 1; i > 0; --i)
2686 ti = &trace_info[i];
2687 if (ti->beg_row == NULL)
2689 trace_info.ordered_remove (i);
2690 n -= 1;
2692 else
2693 gcc_assert (ti->end_row != NULL);
2696 /* Work from the end back to the beginning. This lets us easily insert
2697 remember/restore_state notes in the correct order wrt other notes. */
2698 prev_ti = &trace_info[n - 1];
2699 for (i = n - 1; i > 0; --i)
2701 dw_cfi_row *old_row;
2703 ti = prev_ti;
2704 prev_ti = &trace_info[i - 1];
2706 add_cfi_insn = ti->head;
2708 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2709 for the portion of the function in the alternate text
2710 section. The row state at the very beginning of that
2711 new FDE will be exactly the row state from the CIE. */
2712 if (ti->switch_sections)
2713 old_row = cie_cfi_row;
2714 else
2716 old_row = prev_ti->end_row;
2717 /* If there's no change from the previous end state, fine. */
2718 if (cfi_row_equal_p (old_row, ti->beg_row))
2720 /* Otherwise check for the common case of sharing state with
2721 the beginning of an epilogue, but not the end. Insert
2722 remember/restore opcodes in that case. */
2723 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2725 dw_cfi_ref cfi;
2727 /* Note that if we blindly insert the remember at the
2728 start of the trace, we can wind up increasing the
2729 size of the unwind info due to extra advance opcodes.
2730 Instead, put the remember immediately before the next
2731 state change. We know there must be one, because the
2732 state at the beginning and head of the trace differ. */
2733 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2734 cfi = new_cfi ();
2735 cfi->dw_cfi_opc = DW_CFA_remember_state;
2736 add_cfi (cfi);
2738 add_cfi_insn = ti->head;
2739 cfi = new_cfi ();
2740 cfi->dw_cfi_opc = DW_CFA_restore_state;
2741 add_cfi (cfi);
2743 old_row = prev_ti->beg_row;
2745 /* Otherwise, we'll simply change state from the previous end. */
2748 change_cfi_row (old_row, ti->beg_row);
2750 if (dump_file && add_cfi_insn != ti->head)
2752 rtx_insn *note;
2754 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2755 prev_ti->id, ti->id);
2757 note = ti->head;
2760 note = NEXT_INSN (note);
2761 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2762 output_cfi_directive (dump_file, NOTE_CFI (note));
2764 while (note != add_cfi_insn);
2768 /* Connect args_size between traces that have can_throw_internal insns. */
2769 if (cfun->eh->lp_array)
2771 HOST_WIDE_INT prev_args_size = 0;
2773 for (i = 0; i < n; ++i)
2775 ti = &trace_info[i];
2777 if (ti->switch_sections)
2778 prev_args_size = 0;
2779 if (ti->eh_head == NULL)
2780 continue;
2781 gcc_assert (!ti->args_size_undefined);
2783 if (ti->beg_delay_args_size != prev_args_size)
2785 /* ??? Search back to previous CFI note. */
2786 add_cfi_insn = PREV_INSN (ti->eh_head);
2787 add_cfi_args_size (ti->beg_delay_args_size);
2790 prev_args_size = ti->end_delay_args_size;
2795 /* Set up the pseudo-cfg of instruction traces, as described at the
2796 block comment at the top of the file. */
2798 static void
2799 create_pseudo_cfg (void)
2801 bool saw_barrier, switch_sections;
2802 dw_trace_info ti;
2803 rtx_insn *insn;
2804 unsigned i;
2806 /* The first trace begins at the start of the function,
2807 and begins with the CIE row state. */
2808 trace_info.create (16);
2809 memset (&ti, 0, sizeof (ti));
2810 ti.head = get_insns ();
2811 ti.beg_row = cie_cfi_row;
2812 ti.cfa_store = cie_cfi_row->cfa;
2813 ti.cfa_temp.reg = INVALID_REGNUM;
2814 trace_info.quick_push (ti);
2816 if (cie_return_save)
2817 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2819 /* Walk all the insns, collecting start of trace locations. */
2820 saw_barrier = false;
2821 switch_sections = false;
2822 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2824 if (BARRIER_P (insn))
2825 saw_barrier = true;
2826 else if (NOTE_P (insn)
2827 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2829 /* We should have just seen a barrier. */
2830 gcc_assert (saw_barrier);
2831 switch_sections = true;
2833 /* Watch out for save_point notes between basic blocks.
2834 In particular, a note after a barrier. Do not record these,
2835 delaying trace creation until the label. */
2836 else if (save_point_p (insn)
2837 && (LABEL_P (insn) || !saw_barrier))
2839 memset (&ti, 0, sizeof (ti));
2840 ti.head = insn;
2841 ti.switch_sections = switch_sections;
2842 ti.id = trace_info.length ();
2843 trace_info.safe_push (ti);
2845 saw_barrier = false;
2846 switch_sections = false;
2850 /* Create the trace index after we've finished building trace_info,
2851 avoiding stale pointer problems due to reallocation. */
2852 trace_index
2853 = new hash_table<trace_info_hasher> (trace_info.length ());
2854 dw_trace_info *tp;
2855 FOR_EACH_VEC_ELT (trace_info, i, tp)
2857 dw_trace_info **slot;
2859 if (dump_file)
2860 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2861 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2862 tp->switch_sections ? " (section switch)" : "");
2864 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2865 gcc_assert (*slot == NULL);
2866 *slot = tp;
2870 /* Record the initial position of the return address. RTL is
2871 INCOMING_RETURN_ADDR_RTX. */
2873 static void
2874 initial_return_save (rtx rtl)
2876 unsigned int reg = INVALID_REGNUM;
2877 HOST_WIDE_INT offset = 0;
2879 switch (GET_CODE (rtl))
2881 case REG:
2882 /* RA is in a register. */
2883 reg = dwf_regno (rtl);
2884 break;
2886 case MEM:
2887 /* RA is on the stack. */
2888 rtl = XEXP (rtl, 0);
2889 switch (GET_CODE (rtl))
2891 case REG:
2892 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2893 offset = 0;
2894 break;
2896 case PLUS:
2897 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2898 offset = INTVAL (XEXP (rtl, 1));
2899 break;
2901 case MINUS:
2902 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2903 offset = -INTVAL (XEXP (rtl, 1));
2904 break;
2906 default:
2907 gcc_unreachable ();
2910 break;
2912 case PLUS:
2913 /* The return address is at some offset from any value we can
2914 actually load. For instance, on the SPARC it is in %i7+8. Just
2915 ignore the offset for now; it doesn't matter for unwinding frames. */
2916 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2917 initial_return_save (XEXP (rtl, 0));
2918 return;
2920 default:
2921 gcc_unreachable ();
2924 if (reg != DWARF_FRAME_RETURN_COLUMN)
2926 if (reg != INVALID_REGNUM)
2927 record_reg_saved_in_reg (rtl, pc_rtx);
2928 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2932 static void
2933 create_cie_data (void)
2935 dw_cfa_location loc;
2936 dw_trace_info cie_trace;
2938 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2940 memset (&cie_trace, 0, sizeof (cie_trace));
2941 cur_trace = &cie_trace;
2943 add_cfi_vec = &cie_cfi_vec;
2944 cie_cfi_row = cur_row = new_cfi_row ();
2946 /* On entry, the Canonical Frame Address is at SP. */
2947 memset (&loc, 0, sizeof (loc));
2948 loc.reg = dw_stack_pointer_regnum;
2949 loc.offset = INCOMING_FRAME_SP_OFFSET;
2950 def_cfa_1 (&loc);
2952 if (targetm.debug_unwind_info () == UI_DWARF2
2953 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2955 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2957 /* For a few targets, we have the return address incoming into a
2958 register, but choose a different return column. This will result
2959 in a DW_CFA_register for the return, and an entry in
2960 regs_saved_in_regs to match. If the target later stores that
2961 return address register to the stack, we want to be able to emit
2962 the DW_CFA_offset against the return column, not the intermediate
2963 save register. Save the contents of regs_saved_in_regs so that
2964 we can re-initialize it at the start of each function. */
2965 switch (cie_trace.regs_saved_in_regs.length ())
2967 case 0:
2968 break;
2969 case 1:
2970 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2971 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2972 cie_trace.regs_saved_in_regs.release ();
2973 break;
2974 default:
2975 gcc_unreachable ();
2979 add_cfi_vec = NULL;
2980 cur_row = NULL;
2981 cur_trace = NULL;
2984 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2985 state at each location within the function. These notes will be
2986 emitted during pass_final. */
2988 static unsigned int
2989 execute_dwarf2_frame (void)
2991 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
2992 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2994 /* The first time we're called, compute the incoming frame state. */
2995 if (cie_cfi_vec == NULL)
2996 create_cie_data ();
2998 dwarf2out_alloc_current_fde ();
3000 create_pseudo_cfg ();
3002 /* Do the work. */
3003 create_cfi_notes ();
3004 connect_traces ();
3005 add_cfis_to_fde ();
3007 /* Free all the data we allocated. */
3009 size_t i;
3010 dw_trace_info *ti;
3012 FOR_EACH_VEC_ELT (trace_info, i, ti)
3013 ti->regs_saved_in_regs.release ();
3015 trace_info.release ();
3017 delete trace_index;
3018 trace_index = NULL;
3020 return 0;
3023 /* Convert a DWARF call frame info. operation to its string name */
3025 static const char *
3026 dwarf_cfi_name (unsigned int cfi_opc)
3028 const char *name = get_DW_CFA_name (cfi_opc);
3030 if (name != NULL)
3031 return name;
3033 return "DW_CFA_<unknown>";
3036 /* This routine will generate the correct assembly data for a location
3037 description based on a cfi entry with a complex address. */
3039 static void
3040 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3042 dw_loc_descr_ref loc;
3043 unsigned long size;
3045 if (cfi->dw_cfi_opc == DW_CFA_expression)
3047 unsigned r =
3048 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3049 dw2_asm_output_data (1, r, NULL);
3050 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3052 else
3053 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3055 /* Output the size of the block. */
3056 size = size_of_locs (loc);
3057 dw2_asm_output_data_uleb128 (size, NULL);
3059 /* Now output the operations themselves. */
3060 output_loc_sequence (loc, for_eh);
3063 /* Similar, but used for .cfi_escape. */
3065 static void
3066 output_cfa_loc_raw (dw_cfi_ref cfi)
3068 dw_loc_descr_ref loc;
3069 unsigned long size;
3071 if (cfi->dw_cfi_opc == DW_CFA_expression)
3073 unsigned r =
3074 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3075 fprintf (asm_out_file, "%#x,", r);
3076 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3078 else
3079 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3081 /* Output the size of the block. */
3082 size = size_of_locs (loc);
3083 dw2_asm_output_data_uleb128_raw (size);
3084 fputc (',', asm_out_file);
3086 /* Now output the operations themselves. */
3087 output_loc_sequence_raw (loc);
3090 /* Output a Call Frame Information opcode and its operand(s). */
3092 void
3093 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3095 unsigned long r;
3096 HOST_WIDE_INT off;
3098 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3099 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3100 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3101 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3102 ((unsigned HOST_WIDE_INT)
3103 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3104 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3106 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3107 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3108 "DW_CFA_offset, column %#lx", r);
3109 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3110 dw2_asm_output_data_uleb128 (off, NULL);
3112 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3114 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3115 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3116 "DW_CFA_restore, column %#lx", r);
3118 else
3120 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3121 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3123 switch (cfi->dw_cfi_opc)
3125 case DW_CFA_set_loc:
3126 if (for_eh)
3127 dw2_asm_output_encoded_addr_rtx (
3128 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3129 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3130 false, NULL);
3131 else
3132 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3133 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3134 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3135 break;
3137 case DW_CFA_advance_loc1:
3138 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3139 fde->dw_fde_current_label, NULL);
3140 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3141 break;
3143 case DW_CFA_advance_loc2:
3144 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3145 fde->dw_fde_current_label, NULL);
3146 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3147 break;
3149 case DW_CFA_advance_loc4:
3150 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3151 fde->dw_fde_current_label, NULL);
3152 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3153 break;
3155 case DW_CFA_MIPS_advance_loc8:
3156 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3157 fde->dw_fde_current_label, NULL);
3158 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3159 break;
3161 case DW_CFA_offset_extended:
3162 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3163 dw2_asm_output_data_uleb128 (r, NULL);
3164 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3165 dw2_asm_output_data_uleb128 (off, NULL);
3166 break;
3168 case DW_CFA_def_cfa:
3169 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3170 dw2_asm_output_data_uleb128 (r, NULL);
3171 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3172 break;
3174 case DW_CFA_offset_extended_sf:
3175 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3176 dw2_asm_output_data_uleb128 (r, NULL);
3177 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3178 dw2_asm_output_data_sleb128 (off, NULL);
3179 break;
3181 case DW_CFA_def_cfa_sf:
3182 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3183 dw2_asm_output_data_uleb128 (r, NULL);
3184 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3185 dw2_asm_output_data_sleb128 (off, NULL);
3186 break;
3188 case DW_CFA_restore_extended:
3189 case DW_CFA_undefined:
3190 case DW_CFA_same_value:
3191 case DW_CFA_def_cfa_register:
3192 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3193 dw2_asm_output_data_uleb128 (r, NULL);
3194 break;
3196 case DW_CFA_register:
3197 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3198 dw2_asm_output_data_uleb128 (r, NULL);
3199 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3200 dw2_asm_output_data_uleb128 (r, NULL);
3201 break;
3203 case DW_CFA_def_cfa_offset:
3204 case DW_CFA_GNU_args_size:
3205 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3206 break;
3208 case DW_CFA_def_cfa_offset_sf:
3209 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3210 dw2_asm_output_data_sleb128 (off, NULL);
3211 break;
3213 case DW_CFA_GNU_window_save:
3214 break;
3216 case DW_CFA_def_cfa_expression:
3217 case DW_CFA_expression:
3218 output_cfa_loc (cfi, for_eh);
3219 break;
3221 case DW_CFA_GNU_negative_offset_extended:
3222 /* Obsoleted by DW_CFA_offset_extended_sf. */
3223 gcc_unreachable ();
3225 default:
3226 break;
3231 /* Similar, but do it via assembler directives instead. */
3233 void
3234 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3236 unsigned long r, r2;
3238 switch (cfi->dw_cfi_opc)
3240 case DW_CFA_advance_loc:
3241 case DW_CFA_advance_loc1:
3242 case DW_CFA_advance_loc2:
3243 case DW_CFA_advance_loc4:
3244 case DW_CFA_MIPS_advance_loc8:
3245 case DW_CFA_set_loc:
3246 /* Should only be created in a code path not followed when emitting
3247 via directives. The assembler is going to take care of this for
3248 us. But this routines is also used for debugging dumps, so
3249 print something. */
3250 gcc_assert (f != asm_out_file);
3251 fprintf (f, "\t.cfi_advance_loc\n");
3252 break;
3254 case DW_CFA_offset:
3255 case DW_CFA_offset_extended:
3256 case DW_CFA_offset_extended_sf:
3257 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3258 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3259 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3260 break;
3262 case DW_CFA_restore:
3263 case DW_CFA_restore_extended:
3264 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3265 fprintf (f, "\t.cfi_restore %lu\n", r);
3266 break;
3268 case DW_CFA_undefined:
3269 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3270 fprintf (f, "\t.cfi_undefined %lu\n", r);
3271 break;
3273 case DW_CFA_same_value:
3274 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3275 fprintf (f, "\t.cfi_same_value %lu\n", r);
3276 break;
3278 case DW_CFA_def_cfa:
3279 case DW_CFA_def_cfa_sf:
3280 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3281 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3282 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3283 break;
3285 case DW_CFA_def_cfa_register:
3286 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3287 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3288 break;
3290 case DW_CFA_register:
3291 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3292 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3293 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3294 break;
3296 case DW_CFA_def_cfa_offset:
3297 case DW_CFA_def_cfa_offset_sf:
3298 fprintf (f, "\t.cfi_def_cfa_offset "
3299 HOST_WIDE_INT_PRINT_DEC"\n",
3300 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3301 break;
3303 case DW_CFA_remember_state:
3304 fprintf (f, "\t.cfi_remember_state\n");
3305 break;
3306 case DW_CFA_restore_state:
3307 fprintf (f, "\t.cfi_restore_state\n");
3308 break;
3310 case DW_CFA_GNU_args_size:
3311 if (f == asm_out_file)
3313 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3314 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3315 if (flag_debug_asm)
3316 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3317 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3318 fputc ('\n', f);
3320 else
3322 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3323 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3325 break;
3327 case DW_CFA_GNU_window_save:
3328 fprintf (f, "\t.cfi_window_save\n");
3329 break;
3331 case DW_CFA_def_cfa_expression:
3332 if (f != asm_out_file)
3334 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3335 break;
3337 /* FALLTHRU */
3338 case DW_CFA_expression:
3339 if (f != asm_out_file)
3341 fprintf (f, "\t.cfi_cfa_expression ...\n");
3342 break;
3344 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3345 output_cfa_loc_raw (cfi);
3346 fputc ('\n', f);
3347 break;
3349 default:
3350 gcc_unreachable ();
3354 void
3355 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3357 if (dwarf2out_do_cfi_asm ())
3358 output_cfi_directive (asm_out_file, cfi);
3361 static void
3362 dump_cfi_row (FILE *f, dw_cfi_row *row)
3364 dw_cfi_ref cfi;
3365 unsigned i;
3367 cfi = row->cfa_cfi;
3368 if (!cfi)
3370 dw_cfa_location dummy;
3371 memset (&dummy, 0, sizeof (dummy));
3372 dummy.reg = INVALID_REGNUM;
3373 cfi = def_cfa_0 (&dummy, &row->cfa);
3375 output_cfi_directive (f, cfi);
3377 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3378 if (cfi)
3379 output_cfi_directive (f, cfi);
3382 void debug_cfi_row (dw_cfi_row *row);
3384 void
3385 debug_cfi_row (dw_cfi_row *row)
3387 dump_cfi_row (stderr, row);
3391 /* Save the result of dwarf2out_do_frame across PCH.
3392 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3393 static GTY(()) signed char saved_do_cfi_asm = 0;
3395 /* Decide whether we want to emit frame unwind information for the current
3396 translation unit. */
3398 bool
3399 dwarf2out_do_frame (void)
3401 /* We want to emit correct CFA location expressions or lists, so we
3402 have to return true if we're going to output debug info, even if
3403 we're not going to output frame or unwind info. */
3404 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3405 return true;
3407 if (saved_do_cfi_asm > 0)
3408 return true;
3410 if (targetm.debug_unwind_info () == UI_DWARF2)
3411 return true;
3413 if ((flag_unwind_tables || flag_exceptions)
3414 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3415 return true;
3417 return false;
3420 /* Decide whether to emit frame unwind via assembler directives. */
3422 bool
3423 dwarf2out_do_cfi_asm (void)
3425 int enc;
3427 if (saved_do_cfi_asm != 0)
3428 return saved_do_cfi_asm > 0;
3430 /* Assume failure for a moment. */
3431 saved_do_cfi_asm = -1;
3433 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3434 return false;
3435 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3436 return false;
3438 /* Make sure the personality encoding is one the assembler can support.
3439 In particular, aligned addresses can't be handled. */
3440 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3441 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3442 return false;
3443 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3444 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3445 return false;
3447 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3448 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3449 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3450 && !flag_unwind_tables && !flag_exceptions
3451 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3452 return false;
3454 /* Success! */
3455 saved_do_cfi_asm = 1;
3456 return true;
3459 namespace {
3461 const pass_data pass_data_dwarf2_frame =
3463 RTL_PASS, /* type */
3464 "dwarf2", /* name */
3465 OPTGROUP_NONE, /* optinfo_flags */
3466 TV_FINAL, /* tv_id */
3467 0, /* properties_required */
3468 0, /* properties_provided */
3469 0, /* properties_destroyed */
3470 0, /* todo_flags_start */
3471 0, /* todo_flags_finish */
3474 class pass_dwarf2_frame : public rtl_opt_pass
3476 public:
3477 pass_dwarf2_frame (gcc::context *ctxt)
3478 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3481 /* opt_pass methods: */
3482 virtual bool gate (function *);
3483 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3485 }; // class pass_dwarf2_frame
3487 bool
3488 pass_dwarf2_frame::gate (function *)
3490 #ifndef HAVE_prologue
3491 /* Targets which still implement the prologue in assembler text
3492 cannot use the generic dwarf2 unwinding. */
3493 return false;
3494 #endif
3496 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3497 from the optimized shrink-wrapping annotations that we will compute.
3498 For now, only produce the CFI notes for dwarf2. */
3499 return dwarf2out_do_frame ();
3502 } // anon namespace
3504 rtl_opt_pass *
3505 make_pass_dwarf2_frame (gcc::context *ctxt)
3507 return new pass_dwarf2_frame (ctxt);
3510 #include "gt-dwarf2cfi.h"