Daily bump.
[official-gcc.git] / gcc / dwarf2cfi.c
bloba5f9832fc4a85635684c2f2d75be70357d74d2ae
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
50 #endif
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
54 #endif
56 /* Maximum size (in bytes) of an artificially generated label. */
57 #define MAX_ARTIFICIAL_LABEL_BYTES 30
59 /* A collected description of an entire row of the abstract CFI table. */
60 struct GTY(()) dw_cfi_row
62 /* The expression that computes the CFA, expressed in two different ways.
63 The CFA member for the simple cases, and the full CFI expression for
64 the complex cases. The later will be a DW_CFA_cfa_expression. */
65 dw_cfa_location cfa;
66 dw_cfi_ref cfa_cfi;
68 /* The expressions for any register column that is saved. */
69 cfi_vec reg_save;
72 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
73 struct GTY(()) reg_saved_in_data {
74 rtx orig_reg;
75 rtx saved_in_reg;
79 /* Since we no longer have a proper CFG, we're going to create a facsimile
80 of one on the fly while processing the frame-related insns.
82 We create dw_trace_info structures for each extended basic block beginning
83 and ending at a "save point". Save points are labels, barriers, certain
84 notes, and of course the beginning and end of the function.
86 As we encounter control transfer insns, we propagate the "current"
87 row state across the edges to the starts of traces. When checking is
88 enabled, we validate that we propagate the same data from all sources.
90 All traces are members of the TRACE_INFO array, in the order in which
91 they appear in the instruction stream.
93 All save points are present in the TRACE_INDEX hash, mapping the insn
94 starting a trace to the dw_trace_info describing the trace. */
96 struct dw_trace_info
98 /* The insn that begins the trace. */
99 rtx_insn *head;
101 /* The row state at the beginning and end of the trace. */
102 dw_cfi_row *beg_row, *end_row;
104 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
105 while scanning insns. However, the args_size value is irrelevant at
106 any point except can_throw_internal_p insns. Therefore the "delay"
107 sizes the values that must actually be emitted for this trace. */
108 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
109 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
111 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
112 rtx_insn *eh_head;
114 /* The following variables contain data used in interpreting frame related
115 expressions. These are not part of the "real" row state as defined by
116 Dwarf, but it seems like they need to be propagated into a trace in case
117 frame related expressions have been sunk. */
118 /* ??? This seems fragile. These variables are fragments of a larger
119 expression. If we do not keep the entire expression together, we risk
120 not being able to put it together properly. Consider forcing targets
121 to generate self-contained expressions and dropping all of the magic
122 interpretation code in this file. Or at least refusing to shrink wrap
123 any frame related insn that doesn't contain a complete expression. */
125 /* The register used for saving registers to the stack, and its offset
126 from the CFA. */
127 dw_cfa_location cfa_store;
129 /* A temporary register holding an integral value used in adjusting SP
130 or setting up the store_reg. The "offset" field holds the integer
131 value, not an offset. */
132 dw_cfa_location cfa_temp;
134 /* A set of registers saved in other registers. This is the inverse of
135 the row->reg_save info, if the entry is a DW_CFA_register. This is
136 implemented as a flat array because it normally contains zero or 1
137 entry, depending on the target. IA-64 is the big spender here, using
138 a maximum of 5 entries. */
139 vec<reg_saved_in_data> regs_saved_in_regs;
141 /* An identifier for this trace. Used only for debugging dumps. */
142 unsigned id;
144 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
145 bool switch_sections;
147 /* True if we've seen different values incoming to beg_true_args_size. */
148 bool args_size_undefined;
152 /* Hashtable helpers. */
154 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
156 static inline hashval_t hash (const dw_trace_info *);
157 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
160 inline hashval_t
161 trace_info_hasher::hash (const dw_trace_info *ti)
163 return INSN_UID (ti->head);
166 inline bool
167 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
169 return a->head == b->head;
173 /* The variables making up the pseudo-cfg, as described above. */
174 static vec<dw_trace_info> trace_info;
175 static vec<dw_trace_info *> trace_work_list;
176 static hash_table<trace_info_hasher> *trace_index;
178 /* A vector of call frame insns for the CIE. */
179 cfi_vec cie_cfi_vec;
181 /* The state of the first row of the FDE table, which includes the
182 state provided by the CIE. */
183 static GTY(()) dw_cfi_row *cie_cfi_row;
185 static GTY(()) reg_saved_in_data *cie_return_save;
187 static GTY(()) unsigned long dwarf2out_cfi_label_num;
189 /* The insn after which a new CFI note should be emitted. */
190 static rtx_insn *add_cfi_insn;
192 /* When non-null, add_cfi will add the CFI to this vector. */
193 static cfi_vec *add_cfi_vec;
195 /* The current instruction trace. */
196 static dw_trace_info *cur_trace;
198 /* The current, i.e. most recently generated, row of the CFI table. */
199 static dw_cfi_row *cur_row;
201 /* A copy of the current CFA, for use during the processing of a
202 single insn. */
203 static dw_cfa_location *cur_cfa;
205 /* We delay emitting a register save until either (a) we reach the end
206 of the prologue or (b) the register is clobbered. This clusters
207 register saves so that there are fewer pc advances. */
209 struct queued_reg_save {
210 rtx reg;
211 rtx saved_reg;
212 HOST_WIDE_INT cfa_offset;
216 static vec<queued_reg_save> queued_reg_saves;
218 /* True if any CFI directives were emitted at the current insn. */
219 static bool any_cfis_emitted;
221 /* Short-hand for commonly used register numbers. */
222 static unsigned dw_stack_pointer_regnum;
223 static unsigned dw_frame_pointer_regnum;
225 /* Hook used by __throw. */
228 expand_builtin_dwarf_sp_column (void)
230 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
231 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
234 /* MEM is a memory reference for the register size table, each element of
235 which has mode MODE. Initialize column C as a return address column. */
237 static void
238 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
240 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
241 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
242 emit_move_insn (adjust_address (mem, mode, offset),
243 gen_int_mode (size, mode));
246 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
247 init_one_dwarf_reg_size to communicate on what has been done by the
248 latter. */
250 struct init_one_dwarf_reg_state
252 /* Whether the dwarf return column was initialized. */
253 bool wrote_return_column;
255 /* For each hard register REGNO, whether init_one_dwarf_reg_size
256 was given REGNO to process already. */
257 bool processed_regno [FIRST_PSEUDO_REGISTER];
261 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
262 initialize the dwarf register size table entry corresponding to register
263 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
264 use for the size entry to initialize, and INIT_STATE is the communication
265 datastructure conveying what we're doing to our caller. */
267 static
268 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
269 rtx table, machine_mode slotmode,
270 init_one_dwarf_reg_state *init_state)
272 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
273 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
274 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
276 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
277 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
279 init_state->processed_regno[regno] = true;
281 if (rnum >= DWARF_FRAME_REGISTERS)
282 return;
284 if (dnum == DWARF_FRAME_RETURN_COLUMN)
286 if (regmode == VOIDmode)
287 return;
288 init_state->wrote_return_column = true;
291 if (slotoffset < 0)
292 return;
294 emit_move_insn (adjust_address (table, slotmode, slotoffset),
295 gen_int_mode (regsize, slotmode));
298 /* Generate code to initialize the dwarf register size table located
299 at the provided ADDRESS. */
301 void
302 expand_builtin_init_dwarf_reg_sizes (tree address)
304 unsigned int i;
305 machine_mode mode = TYPE_MODE (char_type_node);
306 rtx addr = expand_normal (address);
307 rtx mem = gen_rtx_MEM (BLKmode, addr);
309 init_one_dwarf_reg_state init_state;
311 memset ((char *)&init_state, 0, sizeof (init_state));
313 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
315 machine_mode save_mode;
316 rtx span;
318 /* No point in processing a register multiple times. This could happen
319 with register spans, e.g. when a reg is first processed as a piece of
320 a span, then as a register on its own later on. */
322 if (init_state.processed_regno[i])
323 continue;
325 save_mode = targetm.dwarf_frame_reg_mode (i);
326 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
328 if (!span)
329 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
330 else
332 for (int si = 0; si < XVECLEN (span, 0); si++)
334 rtx reg = XVECEXP (span, 0, si);
336 init_one_dwarf_reg_size
337 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
342 if (!init_state.wrote_return_column)
343 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
345 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
346 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
347 #endif
349 targetm.init_dwarf_reg_sizes_extra (address);
353 static dw_trace_info *
354 get_trace_info (rtx_insn *insn)
356 dw_trace_info dummy;
357 dummy.head = insn;
358 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
361 static bool
362 save_point_p (rtx_insn *insn)
364 /* Labels, except those that are really jump tables. */
365 if (LABEL_P (insn))
366 return inside_basic_block_p (insn);
368 /* We split traces at the prologue/epilogue notes because those
369 are points at which the unwind info is usually stable. This
370 makes it easier to find spots with identical unwind info so
371 that we can use remember/restore_state opcodes. */
372 if (NOTE_P (insn))
373 switch (NOTE_KIND (insn))
375 case NOTE_INSN_PROLOGUE_END:
376 case NOTE_INSN_EPILOGUE_BEG:
377 return true;
380 return false;
383 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
385 static inline HOST_WIDE_INT
386 div_data_align (HOST_WIDE_INT off)
388 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
389 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
390 return r;
393 /* Return true if we need a signed version of a given opcode
394 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
396 static inline bool
397 need_data_align_sf_opcode (HOST_WIDE_INT off)
399 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
402 /* Return a pointer to a newly allocated Call Frame Instruction. */
404 static inline dw_cfi_ref
405 new_cfi (void)
407 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
409 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
410 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
412 return cfi;
415 /* Return a newly allocated CFI row, with no defined data. */
417 static dw_cfi_row *
418 new_cfi_row (void)
420 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
422 row->cfa.reg = INVALID_REGNUM;
424 return row;
427 /* Return a copy of an existing CFI row. */
429 static dw_cfi_row *
430 copy_cfi_row (dw_cfi_row *src)
432 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
434 *dst = *src;
435 dst->reg_save = vec_safe_copy (src->reg_save);
437 return dst;
440 /* Generate a new label for the CFI info to refer to. */
442 static char *
443 dwarf2out_cfi_label (void)
445 int num = dwarf2out_cfi_label_num++;
446 char label[20];
448 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
450 return xstrdup (label);
453 /* Add CFI either to the current insn stream or to a vector, or both. */
455 static void
456 add_cfi (dw_cfi_ref cfi)
458 any_cfis_emitted = true;
460 if (add_cfi_insn != NULL)
462 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
463 NOTE_CFI (add_cfi_insn) = cfi;
466 if (add_cfi_vec != NULL)
467 vec_safe_push (*add_cfi_vec, cfi);
470 static void
471 add_cfi_args_size (HOST_WIDE_INT size)
473 dw_cfi_ref cfi = new_cfi ();
475 /* While we can occasionally have args_size < 0 internally, this state
476 should not persist at a point we actually need an opcode. */
477 gcc_assert (size >= 0);
479 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
480 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
482 add_cfi (cfi);
485 static void
486 add_cfi_restore (unsigned reg)
488 dw_cfi_ref cfi = new_cfi ();
490 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
491 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
493 add_cfi (cfi);
496 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
497 that the register column is no longer saved. */
499 static void
500 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
502 if (vec_safe_length (row->reg_save) <= column)
503 vec_safe_grow_cleared (row->reg_save, column + 1);
504 (*row->reg_save)[column] = cfi;
507 /* This function fills in aa dw_cfa_location structure from a dwarf location
508 descriptor sequence. */
510 static void
511 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
513 struct dw_loc_descr_node *ptr;
514 cfa->offset = 0;
515 cfa->base_offset = 0;
516 cfa->indirect = 0;
517 cfa->reg = -1;
519 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
521 enum dwarf_location_atom op = ptr->dw_loc_opc;
523 switch (op)
525 case DW_OP_reg0:
526 case DW_OP_reg1:
527 case DW_OP_reg2:
528 case DW_OP_reg3:
529 case DW_OP_reg4:
530 case DW_OP_reg5:
531 case DW_OP_reg6:
532 case DW_OP_reg7:
533 case DW_OP_reg8:
534 case DW_OP_reg9:
535 case DW_OP_reg10:
536 case DW_OP_reg11:
537 case DW_OP_reg12:
538 case DW_OP_reg13:
539 case DW_OP_reg14:
540 case DW_OP_reg15:
541 case DW_OP_reg16:
542 case DW_OP_reg17:
543 case DW_OP_reg18:
544 case DW_OP_reg19:
545 case DW_OP_reg20:
546 case DW_OP_reg21:
547 case DW_OP_reg22:
548 case DW_OP_reg23:
549 case DW_OP_reg24:
550 case DW_OP_reg25:
551 case DW_OP_reg26:
552 case DW_OP_reg27:
553 case DW_OP_reg28:
554 case DW_OP_reg29:
555 case DW_OP_reg30:
556 case DW_OP_reg31:
557 cfa->reg = op - DW_OP_reg0;
558 break;
559 case DW_OP_regx:
560 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
561 break;
562 case DW_OP_breg0:
563 case DW_OP_breg1:
564 case DW_OP_breg2:
565 case DW_OP_breg3:
566 case DW_OP_breg4:
567 case DW_OP_breg5:
568 case DW_OP_breg6:
569 case DW_OP_breg7:
570 case DW_OP_breg8:
571 case DW_OP_breg9:
572 case DW_OP_breg10:
573 case DW_OP_breg11:
574 case DW_OP_breg12:
575 case DW_OP_breg13:
576 case DW_OP_breg14:
577 case DW_OP_breg15:
578 case DW_OP_breg16:
579 case DW_OP_breg17:
580 case DW_OP_breg18:
581 case DW_OP_breg19:
582 case DW_OP_breg20:
583 case DW_OP_breg21:
584 case DW_OP_breg22:
585 case DW_OP_breg23:
586 case DW_OP_breg24:
587 case DW_OP_breg25:
588 case DW_OP_breg26:
589 case DW_OP_breg27:
590 case DW_OP_breg28:
591 case DW_OP_breg29:
592 case DW_OP_breg30:
593 case DW_OP_breg31:
594 cfa->reg = op - DW_OP_breg0;
595 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
596 break;
597 case DW_OP_bregx:
598 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
599 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
600 break;
601 case DW_OP_deref:
602 cfa->indirect = 1;
603 break;
604 case DW_OP_plus_uconst:
605 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
606 break;
607 default:
608 gcc_unreachable ();
613 /* Find the previous value for the CFA, iteratively. CFI is the opcode
614 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
615 one level of remember/restore state processing. */
617 void
618 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
620 switch (cfi->dw_cfi_opc)
622 case DW_CFA_def_cfa_offset:
623 case DW_CFA_def_cfa_offset_sf:
624 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
625 break;
626 case DW_CFA_def_cfa_register:
627 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
628 break;
629 case DW_CFA_def_cfa:
630 case DW_CFA_def_cfa_sf:
631 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
632 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
633 break;
634 case DW_CFA_def_cfa_expression:
635 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
636 break;
638 case DW_CFA_remember_state:
639 gcc_assert (!remember->in_use);
640 *remember = *loc;
641 remember->in_use = 1;
642 break;
643 case DW_CFA_restore_state:
644 gcc_assert (remember->in_use);
645 *loc = *remember;
646 remember->in_use = 0;
647 break;
649 default:
650 break;
654 /* Determine if two dw_cfa_location structures define the same data. */
656 bool
657 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
659 return (loc1->reg == loc2->reg
660 && loc1->offset == loc2->offset
661 && loc1->indirect == loc2->indirect
662 && (loc1->indirect == 0
663 || loc1->base_offset == loc2->base_offset));
666 /* Determine if two CFI operands are identical. */
668 static bool
669 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
671 switch (t)
673 case dw_cfi_oprnd_unused:
674 return true;
675 case dw_cfi_oprnd_reg_num:
676 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
677 case dw_cfi_oprnd_offset:
678 return a->dw_cfi_offset == b->dw_cfi_offset;
679 case dw_cfi_oprnd_addr:
680 return (a->dw_cfi_addr == b->dw_cfi_addr
681 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
682 case dw_cfi_oprnd_loc:
683 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
685 gcc_unreachable ();
688 /* Determine if two CFI entries are identical. */
690 static bool
691 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
693 enum dwarf_call_frame_info opc;
695 /* Make things easier for our callers, including missing operands. */
696 if (a == b)
697 return true;
698 if (a == NULL || b == NULL)
699 return false;
701 /* Obviously, the opcodes must match. */
702 opc = a->dw_cfi_opc;
703 if (opc != b->dw_cfi_opc)
704 return false;
706 /* Compare the two operands, re-using the type of the operands as
707 already exposed elsewhere. */
708 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
709 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
710 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
711 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
714 /* Determine if two CFI_ROW structures are identical. */
716 static bool
717 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
719 size_t i, n_a, n_b, n_max;
721 if (a->cfa_cfi)
723 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
724 return false;
726 else if (!cfa_equal_p (&a->cfa, &b->cfa))
727 return false;
729 n_a = vec_safe_length (a->reg_save);
730 n_b = vec_safe_length (b->reg_save);
731 n_max = MAX (n_a, n_b);
733 for (i = 0; i < n_max; ++i)
735 dw_cfi_ref r_a = NULL, r_b = NULL;
737 if (i < n_a)
738 r_a = (*a->reg_save)[i];
739 if (i < n_b)
740 r_b = (*b->reg_save)[i];
742 if (!cfi_equal_p (r_a, r_b))
743 return false;
746 return true;
749 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
750 what opcode to emit. Returns the CFI opcode to effect the change, or
751 NULL if NEW_CFA == OLD_CFA. */
753 static dw_cfi_ref
754 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
756 dw_cfi_ref cfi;
758 /* If nothing changed, no need to issue any call frame instructions. */
759 if (cfa_equal_p (old_cfa, new_cfa))
760 return NULL;
762 cfi = new_cfi ();
764 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
766 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
767 the CFA register did not change but the offset did. The data
768 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
769 in the assembler via the .cfi_def_cfa_offset directive. */
770 if (new_cfa->offset < 0)
771 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
772 else
773 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
774 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
776 else if (new_cfa->offset == old_cfa->offset
777 && old_cfa->reg != INVALID_REGNUM
778 && !new_cfa->indirect
779 && !old_cfa->indirect)
781 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
782 indicating the CFA register has changed to <register> but the
783 offset has not changed. */
784 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
785 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
787 else if (new_cfa->indirect == 0)
789 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
790 indicating the CFA register has changed to <register> with
791 the specified offset. The data factoring for DW_CFA_def_cfa_sf
792 happens in output_cfi, or in the assembler via the .cfi_def_cfa
793 directive. */
794 if (new_cfa->offset < 0)
795 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
796 else
797 cfi->dw_cfi_opc = DW_CFA_def_cfa;
798 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
799 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
801 else
803 /* Construct a DW_CFA_def_cfa_expression instruction to
804 calculate the CFA using a full location expression since no
805 register-offset pair is available. */
806 struct dw_loc_descr_node *loc_list;
808 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
809 loc_list = build_cfa_loc (new_cfa, 0);
810 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
813 return cfi;
816 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
818 static void
819 def_cfa_1 (dw_cfa_location *new_cfa)
821 dw_cfi_ref cfi;
823 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
824 cur_trace->cfa_store.offset = new_cfa->offset;
826 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
827 if (cfi)
829 cur_row->cfa = *new_cfa;
830 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
831 ? cfi : NULL);
833 add_cfi (cfi);
837 /* Add the CFI for saving a register. REG is the CFA column number.
838 If SREG is -1, the register is saved at OFFSET from the CFA;
839 otherwise it is saved in SREG. */
841 static void
842 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
844 dw_fde_ref fde = cfun ? cfun->fde : NULL;
845 dw_cfi_ref cfi = new_cfi ();
847 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
849 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
850 if (fde
851 && fde->stack_realign
852 && sreg == INVALID_REGNUM)
854 cfi->dw_cfi_opc = DW_CFA_expression;
855 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
856 cfi->dw_cfi_oprnd2.dw_cfi_loc
857 = build_cfa_aligned_loc (&cur_row->cfa, offset,
858 fde->stack_realignment);
860 else if (sreg == INVALID_REGNUM)
862 if (need_data_align_sf_opcode (offset))
863 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
864 else if (reg & ~0x3f)
865 cfi->dw_cfi_opc = DW_CFA_offset_extended;
866 else
867 cfi->dw_cfi_opc = DW_CFA_offset;
868 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
870 else if (sreg == reg)
872 /* While we could emit something like DW_CFA_same_value or
873 DW_CFA_restore, we never expect to see something like that
874 in a prologue. This is more likely to be a bug. A backend
875 can always bypass this by using REG_CFA_RESTORE directly. */
876 gcc_unreachable ();
878 else
880 cfi->dw_cfi_opc = DW_CFA_register;
881 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
884 add_cfi (cfi);
885 update_row_reg_save (cur_row, reg, cfi);
888 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
889 and adjust data structures to match. */
891 static void
892 notice_args_size (rtx_insn *insn)
894 HOST_WIDE_INT args_size, delta;
895 rtx note;
897 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
898 if (note == NULL)
899 return;
901 args_size = INTVAL (XEXP (note, 0));
902 delta = args_size - cur_trace->end_true_args_size;
903 if (delta == 0)
904 return;
906 cur_trace->end_true_args_size = args_size;
908 /* If the CFA is computed off the stack pointer, then we must adjust
909 the computation of the CFA as well. */
910 if (cur_cfa->reg == dw_stack_pointer_regnum)
912 gcc_assert (!cur_cfa->indirect);
914 /* Convert a change in args_size (always a positive in the
915 direction of stack growth) to a change in stack pointer. */
916 if (!STACK_GROWS_DOWNWARD)
917 delta = -delta;
919 cur_cfa->offset += delta;
923 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
924 data within the trace related to EH insns and args_size. */
926 static void
927 notice_eh_throw (rtx_insn *insn)
929 HOST_WIDE_INT args_size;
931 args_size = cur_trace->end_true_args_size;
932 if (cur_trace->eh_head == NULL)
934 cur_trace->eh_head = insn;
935 cur_trace->beg_delay_args_size = args_size;
936 cur_trace->end_delay_args_size = args_size;
938 else if (cur_trace->end_delay_args_size != args_size)
940 cur_trace->end_delay_args_size = args_size;
942 /* ??? If the CFA is the stack pointer, search backward for the last
943 CFI note and insert there. Given that the stack changed for the
944 args_size change, there *must* be such a note in between here and
945 the last eh insn. */
946 add_cfi_args_size (args_size);
950 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
951 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
952 used in places where rtl is prohibited. */
954 static inline unsigned
955 dwf_regno (const_rtx reg)
957 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
958 return DWARF_FRAME_REGNUM (REGNO (reg));
961 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
963 static bool
964 compare_reg_or_pc (rtx x, rtx y)
966 if (REG_P (x) && REG_P (y))
967 return REGNO (x) == REGNO (y);
968 return x == y;
971 /* Record SRC as being saved in DEST. DEST may be null to delete an
972 existing entry. SRC may be a register or PC_RTX. */
974 static void
975 record_reg_saved_in_reg (rtx dest, rtx src)
977 reg_saved_in_data *elt;
978 size_t i;
980 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
981 if (compare_reg_or_pc (elt->orig_reg, src))
983 if (dest == NULL)
984 cur_trace->regs_saved_in_regs.unordered_remove (i);
985 else
986 elt->saved_in_reg = dest;
987 return;
990 if (dest == NULL)
991 return;
993 reg_saved_in_data e = {src, dest};
994 cur_trace->regs_saved_in_regs.safe_push (e);
997 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
998 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1000 static void
1001 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1003 queued_reg_save *q;
1004 queued_reg_save e = {reg, sreg, offset};
1005 size_t i;
1007 /* Duplicates waste space, but it's also necessary to remove them
1008 for correctness, since the queue gets output in reverse order. */
1009 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1010 if (compare_reg_or_pc (q->reg, reg))
1012 *q = e;
1013 return;
1016 queued_reg_saves.safe_push (e);
1019 /* Output all the entries in QUEUED_REG_SAVES. */
1021 static void
1022 dwarf2out_flush_queued_reg_saves (void)
1024 queued_reg_save *q;
1025 size_t i;
1027 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1029 unsigned int reg, sreg;
1031 record_reg_saved_in_reg (q->saved_reg, q->reg);
1033 if (q->reg == pc_rtx)
1034 reg = DWARF_FRAME_RETURN_COLUMN;
1035 else
1036 reg = dwf_regno (q->reg);
1037 if (q->saved_reg)
1038 sreg = dwf_regno (q->saved_reg);
1039 else
1040 sreg = INVALID_REGNUM;
1041 reg_save (reg, sreg, q->cfa_offset);
1044 queued_reg_saves.truncate (0);
1047 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1048 location for? Or, does it clobber a register which we've previously
1049 said that some other register is saved in, and for which we now
1050 have a new location for? */
1052 static bool
1053 clobbers_queued_reg_save (const_rtx insn)
1055 queued_reg_save *q;
1056 size_t iq;
1058 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1060 size_t ir;
1061 reg_saved_in_data *rir;
1063 if (modified_in_p (q->reg, insn))
1064 return true;
1066 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1067 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1068 && modified_in_p (rir->saved_in_reg, insn))
1069 return true;
1072 return false;
1075 /* What register, if any, is currently saved in REG? */
1077 static rtx
1078 reg_saved_in (rtx reg)
1080 unsigned int regn = REGNO (reg);
1081 queued_reg_save *q;
1082 reg_saved_in_data *rir;
1083 size_t i;
1085 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1086 if (q->saved_reg && regn == REGNO (q->saved_reg))
1087 return q->reg;
1089 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1090 if (regn == REGNO (rir->saved_in_reg))
1091 return rir->orig_reg;
1093 return NULL_RTX;
1096 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1098 static void
1099 dwarf2out_frame_debug_def_cfa (rtx pat)
1101 memset (cur_cfa, 0, sizeof (*cur_cfa));
1103 if (GET_CODE (pat) == PLUS)
1105 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1106 pat = XEXP (pat, 0);
1108 if (MEM_P (pat))
1110 cur_cfa->indirect = 1;
1111 pat = XEXP (pat, 0);
1112 if (GET_CODE (pat) == PLUS)
1114 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1115 pat = XEXP (pat, 0);
1118 /* ??? If this fails, we could be calling into the _loc functions to
1119 define a full expression. So far no port does that. */
1120 gcc_assert (REG_P (pat));
1121 cur_cfa->reg = dwf_regno (pat);
1124 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1126 static void
1127 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1129 rtx src, dest;
1131 gcc_assert (GET_CODE (pat) == SET);
1132 dest = XEXP (pat, 0);
1133 src = XEXP (pat, 1);
1135 switch (GET_CODE (src))
1137 case PLUS:
1138 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1139 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1140 break;
1142 case REG:
1143 break;
1145 default:
1146 gcc_unreachable ();
1149 cur_cfa->reg = dwf_regno (dest);
1150 gcc_assert (cur_cfa->indirect == 0);
1153 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1155 static void
1156 dwarf2out_frame_debug_cfa_offset (rtx set)
1158 HOST_WIDE_INT offset;
1159 rtx src, addr, span;
1160 unsigned int sregno;
1162 src = XEXP (set, 1);
1163 addr = XEXP (set, 0);
1164 gcc_assert (MEM_P (addr));
1165 addr = XEXP (addr, 0);
1167 /* As documented, only consider extremely simple addresses. */
1168 switch (GET_CODE (addr))
1170 case REG:
1171 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1172 offset = -cur_cfa->offset;
1173 break;
1174 case PLUS:
1175 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1176 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1177 break;
1178 default:
1179 gcc_unreachable ();
1182 if (src == pc_rtx)
1184 span = NULL;
1185 sregno = DWARF_FRAME_RETURN_COLUMN;
1187 else
1189 span = targetm.dwarf_register_span (src);
1190 sregno = dwf_regno (src);
1193 /* ??? We'd like to use queue_reg_save, but we need to come up with
1194 a different flushing heuristic for epilogues. */
1195 if (!span)
1196 reg_save (sregno, INVALID_REGNUM, offset);
1197 else
1199 /* We have a PARALLEL describing where the contents of SRC live.
1200 Adjust the offset for each piece of the PARALLEL. */
1201 HOST_WIDE_INT span_offset = offset;
1203 gcc_assert (GET_CODE (span) == PARALLEL);
1205 const int par_len = XVECLEN (span, 0);
1206 for (int par_index = 0; par_index < par_len; par_index++)
1208 rtx elem = XVECEXP (span, 0, par_index);
1209 sregno = dwf_regno (src);
1210 reg_save (sregno, INVALID_REGNUM, span_offset);
1211 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1216 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1218 static void
1219 dwarf2out_frame_debug_cfa_register (rtx set)
1221 rtx src, dest;
1222 unsigned sregno, dregno;
1224 src = XEXP (set, 1);
1225 dest = XEXP (set, 0);
1227 record_reg_saved_in_reg (dest, src);
1228 if (src == pc_rtx)
1229 sregno = DWARF_FRAME_RETURN_COLUMN;
1230 else
1231 sregno = dwf_regno (src);
1233 dregno = dwf_regno (dest);
1235 /* ??? We'd like to use queue_reg_save, but we need to come up with
1236 a different flushing heuristic for epilogues. */
1237 reg_save (sregno, dregno, 0);
1240 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1242 static void
1243 dwarf2out_frame_debug_cfa_expression (rtx set)
1245 rtx src, dest, span;
1246 dw_cfi_ref cfi = new_cfi ();
1247 unsigned regno;
1249 dest = SET_DEST (set);
1250 src = SET_SRC (set);
1252 gcc_assert (REG_P (src));
1253 gcc_assert (MEM_P (dest));
1255 span = targetm.dwarf_register_span (src);
1256 gcc_assert (!span);
1258 regno = dwf_regno (src);
1260 cfi->dw_cfi_opc = DW_CFA_expression;
1261 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1262 cfi->dw_cfi_oprnd2.dw_cfi_loc
1263 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1264 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1266 /* ??? We'd like to use queue_reg_save, were the interface different,
1267 and, as above, we could manage flushing for epilogues. */
1268 add_cfi (cfi);
1269 update_row_reg_save (cur_row, regno, cfi);
1272 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1273 note. */
1275 static void
1276 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1278 rtx dest = SET_DEST (set);
1279 gcc_assert (REG_P (dest));
1281 rtx span = targetm.dwarf_register_span (dest);
1282 gcc_assert (!span);
1284 rtx src = SET_SRC (set);
1285 dw_cfi_ref cfi = new_cfi ();
1286 cfi->dw_cfi_opc = DW_CFA_val_expression;
1287 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1288 cfi->dw_cfi_oprnd2.dw_cfi_loc
1289 = mem_loc_descriptor (src, GET_MODE (src),
1290 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1291 add_cfi (cfi);
1292 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1295 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1297 static void
1298 dwarf2out_frame_debug_cfa_restore (rtx reg)
1300 gcc_assert (REG_P (reg));
1302 rtx span = targetm.dwarf_register_span (reg);
1303 if (!span)
1305 unsigned int regno = dwf_regno (reg);
1306 add_cfi_restore (regno);
1307 update_row_reg_save (cur_row, regno, NULL);
1309 else
1311 /* We have a PARALLEL describing where the contents of REG live.
1312 Restore the register for each piece of the PARALLEL. */
1313 gcc_assert (GET_CODE (span) == PARALLEL);
1315 const int par_len = XVECLEN (span, 0);
1316 for (int par_index = 0; par_index < par_len; par_index++)
1318 reg = XVECEXP (span, 0, par_index);
1319 gcc_assert (REG_P (reg));
1320 unsigned int regno = dwf_regno (reg);
1321 add_cfi_restore (regno);
1322 update_row_reg_save (cur_row, regno, NULL);
1327 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1328 ??? Perhaps we should note in the CIE where windows are saved (instead of
1329 assuming 0(cfa)) and what registers are in the window. */
1331 static void
1332 dwarf2out_frame_debug_cfa_window_save (void)
1334 dw_cfi_ref cfi = new_cfi ();
1336 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1337 add_cfi (cfi);
1340 /* Record call frame debugging information for an expression EXPR,
1341 which either sets SP or FP (adjusting how we calculate the frame
1342 address) or saves a register to the stack or another register.
1343 LABEL indicates the address of EXPR.
1345 This function encodes a state machine mapping rtxes to actions on
1346 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1347 users need not read the source code.
1349 The High-Level Picture
1351 Changes in the register we use to calculate the CFA: Currently we
1352 assume that if you copy the CFA register into another register, we
1353 should take the other one as the new CFA register; this seems to
1354 work pretty well. If it's wrong for some target, it's simple
1355 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1357 Changes in the register we use for saving registers to the stack:
1358 This is usually SP, but not always. Again, we deduce that if you
1359 copy SP into another register (and SP is not the CFA register),
1360 then the new register is the one we will be using for register
1361 saves. This also seems to work.
1363 Register saves: There's not much guesswork about this one; if
1364 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1365 register save, and the register used to calculate the destination
1366 had better be the one we think we're using for this purpose.
1367 It's also assumed that a copy from a call-saved register to another
1368 register is saving that register if RTX_FRAME_RELATED_P is set on
1369 that instruction. If the copy is from a call-saved register to
1370 the *same* register, that means that the register is now the same
1371 value as in the caller.
1373 Except: If the register being saved is the CFA register, and the
1374 offset is nonzero, we are saving the CFA, so we assume we have to
1375 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1376 the intent is to save the value of SP from the previous frame.
1378 In addition, if a register has previously been saved to a different
1379 register,
1381 Invariants / Summaries of Rules
1383 cfa current rule for calculating the CFA. It usually
1384 consists of a register and an offset. This is
1385 actually stored in *cur_cfa, but abbreviated
1386 for the purposes of this documentation.
1387 cfa_store register used by prologue code to save things to the stack
1388 cfa_store.offset is the offset from the value of
1389 cfa_store.reg to the actual CFA
1390 cfa_temp register holding an integral value. cfa_temp.offset
1391 stores the value, which will be used to adjust the
1392 stack pointer. cfa_temp is also used like cfa_store,
1393 to track stores to the stack via fp or a temp reg.
1395 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1396 with cfa.reg as the first operand changes the cfa.reg and its
1397 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1398 cfa_temp.offset.
1400 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1401 expression yielding a constant. This sets cfa_temp.reg
1402 and cfa_temp.offset.
1404 Rule 5: Create a new register cfa_store used to save items to the
1405 stack.
1407 Rules 10-14: Save a register to the stack. Define offset as the
1408 difference of the original location and cfa_store's
1409 location (or cfa_temp's location if cfa_temp is used).
1411 Rules 16-20: If AND operation happens on sp in prologue, we assume
1412 stack is realigned. We will use a group of DW_OP_XXX
1413 expressions to represent the location of the stored
1414 register instead of CFA+offset.
1416 The Rules
1418 "{a,b}" indicates a choice of a xor b.
1419 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1421 Rule 1:
1422 (set <reg1> <reg2>:cfa.reg)
1423 effects: cfa.reg = <reg1>
1424 cfa.offset unchanged
1425 cfa_temp.reg = <reg1>
1426 cfa_temp.offset = cfa.offset
1428 Rule 2:
1429 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1430 {<const_int>,<reg>:cfa_temp.reg}))
1431 effects: cfa.reg = sp if fp used
1432 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1433 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1434 if cfa_store.reg==sp
1436 Rule 3:
1437 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1438 effects: cfa.reg = fp
1439 cfa_offset += +/- <const_int>
1441 Rule 4:
1442 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1443 constraints: <reg1> != fp
1444 <reg1> != sp
1445 effects: cfa.reg = <reg1>
1446 cfa_temp.reg = <reg1>
1447 cfa_temp.offset = cfa.offset
1449 Rule 5:
1450 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1451 constraints: <reg1> != fp
1452 <reg1> != sp
1453 effects: cfa_store.reg = <reg1>
1454 cfa_store.offset = cfa.offset - cfa_temp.offset
1456 Rule 6:
1457 (set <reg> <const_int>)
1458 effects: cfa_temp.reg = <reg>
1459 cfa_temp.offset = <const_int>
1461 Rule 7:
1462 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1463 effects: cfa_temp.reg = <reg1>
1464 cfa_temp.offset |= <const_int>
1466 Rule 8:
1467 (set <reg> (high <exp>))
1468 effects: none
1470 Rule 9:
1471 (set <reg> (lo_sum <exp> <const_int>))
1472 effects: cfa_temp.reg = <reg>
1473 cfa_temp.offset = <const_int>
1475 Rule 10:
1476 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1477 effects: cfa_store.offset -= <const_int>
1478 cfa.offset = cfa_store.offset if cfa.reg == sp
1479 cfa.reg = sp
1480 cfa.base_offset = -cfa_store.offset
1482 Rule 11:
1483 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1484 effects: cfa_store.offset += -/+ mode_size(mem)
1485 cfa.offset = cfa_store.offset if cfa.reg == sp
1486 cfa.reg = sp
1487 cfa.base_offset = -cfa_store.offset
1489 Rule 12:
1490 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1492 <reg2>)
1493 effects: cfa.reg = <reg1>
1494 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1496 Rule 13:
1497 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1498 effects: cfa.reg = <reg1>
1499 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1501 Rule 14:
1502 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1503 effects: cfa.reg = <reg1>
1504 cfa.base_offset = -cfa_temp.offset
1505 cfa_temp.offset -= mode_size(mem)
1507 Rule 15:
1508 (set <reg> {unspec, unspec_volatile})
1509 effects: target-dependent
1511 Rule 16:
1512 (set sp (and: sp <const_int>))
1513 constraints: cfa_store.reg == sp
1514 effects: cfun->fde.stack_realign = 1
1515 cfa_store.offset = 0
1516 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1518 Rule 17:
1519 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1520 effects: cfa_store.offset += -/+ mode_size(mem)
1522 Rule 18:
1523 (set (mem ({pre_inc, pre_dec} sp)) fp)
1524 constraints: fde->stack_realign == 1
1525 effects: cfa_store.offset = 0
1526 cfa.reg != HARD_FRAME_POINTER_REGNUM
1528 Rule 19:
1529 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1530 constraints: fde->stack_realign == 1
1531 && cfa.offset == 0
1532 && cfa.indirect == 0
1533 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1534 effects: Use DW_CFA_def_cfa_expression to define cfa
1535 cfa.reg == fde->drap_reg */
1537 static void
1538 dwarf2out_frame_debug_expr (rtx expr)
1540 rtx src, dest, span;
1541 HOST_WIDE_INT offset;
1542 dw_fde_ref fde;
1544 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1545 the PARALLEL independently. The first element is always processed if
1546 it is a SET. This is for backward compatibility. Other elements
1547 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1548 flag is set in them. */
1549 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1551 int par_index;
1552 int limit = XVECLEN (expr, 0);
1553 rtx elem;
1555 /* PARALLELs have strict read-modify-write semantics, so we
1556 ought to evaluate every rvalue before changing any lvalue.
1557 It's cumbersome to do that in general, but there's an
1558 easy approximation that is enough for all current users:
1559 handle register saves before register assignments. */
1560 if (GET_CODE (expr) == PARALLEL)
1561 for (par_index = 0; par_index < limit; par_index++)
1563 elem = XVECEXP (expr, 0, par_index);
1564 if (GET_CODE (elem) == SET
1565 && MEM_P (SET_DEST (elem))
1566 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1567 dwarf2out_frame_debug_expr (elem);
1570 for (par_index = 0; par_index < limit; par_index++)
1572 elem = XVECEXP (expr, 0, par_index);
1573 if (GET_CODE (elem) == SET
1574 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1575 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1576 dwarf2out_frame_debug_expr (elem);
1578 return;
1581 gcc_assert (GET_CODE (expr) == SET);
1583 src = SET_SRC (expr);
1584 dest = SET_DEST (expr);
1586 if (REG_P (src))
1588 rtx rsi = reg_saved_in (src);
1589 if (rsi)
1590 src = rsi;
1593 fde = cfun->fde;
1595 switch (GET_CODE (dest))
1597 case REG:
1598 switch (GET_CODE (src))
1600 /* Setting FP from SP. */
1601 case REG:
1602 if (cur_cfa->reg == dwf_regno (src))
1604 /* Rule 1 */
1605 /* Update the CFA rule wrt SP or FP. Make sure src is
1606 relative to the current CFA register.
1608 We used to require that dest be either SP or FP, but the
1609 ARM copies SP to a temporary register, and from there to
1610 FP. So we just rely on the backends to only set
1611 RTX_FRAME_RELATED_P on appropriate insns. */
1612 cur_cfa->reg = dwf_regno (dest);
1613 cur_trace->cfa_temp.reg = cur_cfa->reg;
1614 cur_trace->cfa_temp.offset = cur_cfa->offset;
1616 else
1618 /* Saving a register in a register. */
1619 gcc_assert (!fixed_regs [REGNO (dest)]
1620 /* For the SPARC and its register window. */
1621 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1623 /* After stack is aligned, we can only save SP in FP
1624 if drap register is used. In this case, we have
1625 to restore stack pointer with the CFA value and we
1626 don't generate this DWARF information. */
1627 if (fde
1628 && fde->stack_realign
1629 && REGNO (src) == STACK_POINTER_REGNUM)
1630 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1631 && fde->drap_reg != INVALID_REGNUM
1632 && cur_cfa->reg != dwf_regno (src));
1633 else
1634 queue_reg_save (src, dest, 0);
1636 break;
1638 case PLUS:
1639 case MINUS:
1640 case LO_SUM:
1641 if (dest == stack_pointer_rtx)
1643 /* Rule 2 */
1644 /* Adjusting SP. */
1645 switch (GET_CODE (XEXP (src, 1)))
1647 case CONST_INT:
1648 offset = INTVAL (XEXP (src, 1));
1649 break;
1650 case REG:
1651 gcc_assert (dwf_regno (XEXP (src, 1))
1652 == cur_trace->cfa_temp.reg);
1653 offset = cur_trace->cfa_temp.offset;
1654 break;
1655 default:
1656 gcc_unreachable ();
1659 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1661 /* Restoring SP from FP in the epilogue. */
1662 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1663 cur_cfa->reg = dw_stack_pointer_regnum;
1665 else if (GET_CODE (src) == LO_SUM)
1666 /* Assume we've set the source reg of the LO_SUM from sp. */
1668 else
1669 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1671 if (GET_CODE (src) != MINUS)
1672 offset = -offset;
1673 if (cur_cfa->reg == dw_stack_pointer_regnum)
1674 cur_cfa->offset += offset;
1675 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1676 cur_trace->cfa_store.offset += offset;
1678 else if (dest == hard_frame_pointer_rtx)
1680 /* Rule 3 */
1681 /* Either setting the FP from an offset of the SP,
1682 or adjusting the FP */
1683 gcc_assert (frame_pointer_needed);
1685 gcc_assert (REG_P (XEXP (src, 0))
1686 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1687 && CONST_INT_P (XEXP (src, 1)));
1688 offset = INTVAL (XEXP (src, 1));
1689 if (GET_CODE (src) != MINUS)
1690 offset = -offset;
1691 cur_cfa->offset += offset;
1692 cur_cfa->reg = dw_frame_pointer_regnum;
1694 else
1696 gcc_assert (GET_CODE (src) != MINUS);
1698 /* Rule 4 */
1699 if (REG_P (XEXP (src, 0))
1700 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1701 && CONST_INT_P (XEXP (src, 1)))
1703 /* Setting a temporary CFA register that will be copied
1704 into the FP later on. */
1705 offset = - INTVAL (XEXP (src, 1));
1706 cur_cfa->offset += offset;
1707 cur_cfa->reg = dwf_regno (dest);
1708 /* Or used to save regs to the stack. */
1709 cur_trace->cfa_temp.reg = cur_cfa->reg;
1710 cur_trace->cfa_temp.offset = cur_cfa->offset;
1713 /* Rule 5 */
1714 else if (REG_P (XEXP (src, 0))
1715 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1716 && XEXP (src, 1) == stack_pointer_rtx)
1718 /* Setting a scratch register that we will use instead
1719 of SP for saving registers to the stack. */
1720 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1721 cur_trace->cfa_store.reg = dwf_regno (dest);
1722 cur_trace->cfa_store.offset
1723 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1726 /* Rule 9 */
1727 else if (GET_CODE (src) == LO_SUM
1728 && CONST_INT_P (XEXP (src, 1)))
1730 cur_trace->cfa_temp.reg = dwf_regno (dest);
1731 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1733 else
1734 gcc_unreachable ();
1736 break;
1738 /* Rule 6 */
1739 case CONST_INT:
1740 cur_trace->cfa_temp.reg = dwf_regno (dest);
1741 cur_trace->cfa_temp.offset = INTVAL (src);
1742 break;
1744 /* Rule 7 */
1745 case IOR:
1746 gcc_assert (REG_P (XEXP (src, 0))
1747 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1748 && CONST_INT_P (XEXP (src, 1)));
1750 cur_trace->cfa_temp.reg = dwf_regno (dest);
1751 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1752 break;
1754 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1755 which will fill in all of the bits. */
1756 /* Rule 8 */
1757 case HIGH:
1758 break;
1760 /* Rule 15 */
1761 case UNSPEC:
1762 case UNSPEC_VOLATILE:
1763 /* All unspecs should be represented by REG_CFA_* notes. */
1764 gcc_unreachable ();
1765 return;
1767 /* Rule 16 */
1768 case AND:
1769 /* If this AND operation happens on stack pointer in prologue,
1770 we assume the stack is realigned and we extract the
1771 alignment. */
1772 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1774 /* We interpret reg_save differently with stack_realign set.
1775 Thus we must flush whatever we have queued first. */
1776 dwarf2out_flush_queued_reg_saves ();
1778 gcc_assert (cur_trace->cfa_store.reg
1779 == dwf_regno (XEXP (src, 0)));
1780 fde->stack_realign = 1;
1781 fde->stack_realignment = INTVAL (XEXP (src, 1));
1782 cur_trace->cfa_store.offset = 0;
1784 if (cur_cfa->reg != dw_stack_pointer_regnum
1785 && cur_cfa->reg != dw_frame_pointer_regnum)
1786 fde->drap_reg = cur_cfa->reg;
1788 return;
1790 default:
1791 gcc_unreachable ();
1793 break;
1795 case MEM:
1797 /* Saving a register to the stack. Make sure dest is relative to the
1798 CFA register. */
1799 switch (GET_CODE (XEXP (dest, 0)))
1801 /* Rule 10 */
1802 /* With a push. */
1803 case PRE_MODIFY:
1804 case POST_MODIFY:
1805 /* We can't handle variable size modifications. */
1806 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1807 == CONST_INT);
1808 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1810 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1811 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1813 cur_trace->cfa_store.offset += offset;
1814 if (cur_cfa->reg == dw_stack_pointer_regnum)
1815 cur_cfa->offset = cur_trace->cfa_store.offset;
1817 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1818 offset -= cur_trace->cfa_store.offset;
1819 else
1820 offset = -cur_trace->cfa_store.offset;
1821 break;
1823 /* Rule 11 */
1824 case PRE_INC:
1825 case PRE_DEC:
1826 case POST_DEC:
1827 offset = GET_MODE_SIZE (GET_MODE (dest));
1828 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1829 offset = -offset;
1831 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1832 == STACK_POINTER_REGNUM)
1833 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1835 cur_trace->cfa_store.offset += offset;
1837 /* Rule 18: If stack is aligned, we will use FP as a
1838 reference to represent the address of the stored
1839 regiser. */
1840 if (fde
1841 && fde->stack_realign
1842 && REG_P (src)
1843 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1845 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1846 cur_trace->cfa_store.offset = 0;
1849 if (cur_cfa->reg == dw_stack_pointer_regnum)
1850 cur_cfa->offset = cur_trace->cfa_store.offset;
1852 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1853 offset += -cur_trace->cfa_store.offset;
1854 else
1855 offset = -cur_trace->cfa_store.offset;
1856 break;
1858 /* Rule 12 */
1859 /* With an offset. */
1860 case PLUS:
1861 case MINUS:
1862 case LO_SUM:
1864 unsigned int regno;
1866 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1867 && REG_P (XEXP (XEXP (dest, 0), 0)));
1868 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1869 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1870 offset = -offset;
1872 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1874 if (cur_cfa->reg == regno)
1875 offset -= cur_cfa->offset;
1876 else if (cur_trace->cfa_store.reg == regno)
1877 offset -= cur_trace->cfa_store.offset;
1878 else
1880 gcc_assert (cur_trace->cfa_temp.reg == regno);
1881 offset -= cur_trace->cfa_temp.offset;
1884 break;
1886 /* Rule 13 */
1887 /* Without an offset. */
1888 case REG:
1890 unsigned int regno = dwf_regno (XEXP (dest, 0));
1892 if (cur_cfa->reg == regno)
1893 offset = -cur_cfa->offset;
1894 else if (cur_trace->cfa_store.reg == regno)
1895 offset = -cur_trace->cfa_store.offset;
1896 else
1898 gcc_assert (cur_trace->cfa_temp.reg == regno);
1899 offset = -cur_trace->cfa_temp.offset;
1902 break;
1904 /* Rule 14 */
1905 case POST_INC:
1906 gcc_assert (cur_trace->cfa_temp.reg
1907 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1908 offset = -cur_trace->cfa_temp.offset;
1909 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1910 break;
1912 default:
1913 gcc_unreachable ();
1916 /* Rule 17 */
1917 /* If the source operand of this MEM operation is a memory,
1918 we only care how much stack grew. */
1919 if (MEM_P (src))
1920 break;
1922 if (REG_P (src)
1923 && REGNO (src) != STACK_POINTER_REGNUM
1924 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1925 && dwf_regno (src) == cur_cfa->reg)
1927 /* We're storing the current CFA reg into the stack. */
1929 if (cur_cfa->offset == 0)
1931 /* Rule 19 */
1932 /* If stack is aligned, putting CFA reg into stack means
1933 we can no longer use reg + offset to represent CFA.
1934 Here we use DW_CFA_def_cfa_expression instead. The
1935 result of this expression equals to the original CFA
1936 value. */
1937 if (fde
1938 && fde->stack_realign
1939 && cur_cfa->indirect == 0
1940 && cur_cfa->reg != dw_frame_pointer_regnum)
1942 gcc_assert (fde->drap_reg == cur_cfa->reg);
1944 cur_cfa->indirect = 1;
1945 cur_cfa->reg = dw_frame_pointer_regnum;
1946 cur_cfa->base_offset = offset;
1947 cur_cfa->offset = 0;
1949 fde->drap_reg_saved = 1;
1950 break;
1953 /* If the source register is exactly the CFA, assume
1954 we're saving SP like any other register; this happens
1955 on the ARM. */
1956 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1957 break;
1959 else
1961 /* Otherwise, we'll need to look in the stack to
1962 calculate the CFA. */
1963 rtx x = XEXP (dest, 0);
1965 if (!REG_P (x))
1966 x = XEXP (x, 0);
1967 gcc_assert (REG_P (x));
1969 cur_cfa->reg = dwf_regno (x);
1970 cur_cfa->base_offset = offset;
1971 cur_cfa->indirect = 1;
1972 break;
1976 if (REG_P (src))
1977 span = targetm.dwarf_register_span (src);
1978 else
1979 span = NULL;
1981 if (!span)
1982 queue_reg_save (src, NULL_RTX, offset);
1983 else
1985 /* We have a PARALLEL describing where the contents of SRC live.
1986 Queue register saves for each piece of the PARALLEL. */
1987 HOST_WIDE_INT span_offset = offset;
1989 gcc_assert (GET_CODE (span) == PARALLEL);
1991 const int par_len = XVECLEN (span, 0);
1992 for (int par_index = 0; par_index < par_len; par_index++)
1994 rtx elem = XVECEXP (span, 0, par_index);
1995 queue_reg_save (elem, NULL_RTX, span_offset);
1996 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1999 break;
2001 default:
2002 gcc_unreachable ();
2006 /* Record call frame debugging information for INSN, which either sets
2007 SP or FP (adjusting how we calculate the frame address) or saves a
2008 register to the stack. */
2010 static void
2011 dwarf2out_frame_debug (rtx_insn *insn)
2013 rtx note, n, pat;
2014 bool handled_one = false;
2016 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2017 switch (REG_NOTE_KIND (note))
2019 case REG_FRAME_RELATED_EXPR:
2020 pat = XEXP (note, 0);
2021 goto do_frame_expr;
2023 case REG_CFA_DEF_CFA:
2024 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2025 handled_one = true;
2026 break;
2028 case REG_CFA_ADJUST_CFA:
2029 n = XEXP (note, 0);
2030 if (n == NULL)
2032 n = PATTERN (insn);
2033 if (GET_CODE (n) == PARALLEL)
2034 n = XVECEXP (n, 0, 0);
2036 dwarf2out_frame_debug_adjust_cfa (n);
2037 handled_one = true;
2038 break;
2040 case REG_CFA_OFFSET:
2041 n = XEXP (note, 0);
2042 if (n == NULL)
2043 n = single_set (insn);
2044 dwarf2out_frame_debug_cfa_offset (n);
2045 handled_one = true;
2046 break;
2048 case REG_CFA_REGISTER:
2049 n = XEXP (note, 0);
2050 if (n == NULL)
2052 n = PATTERN (insn);
2053 if (GET_CODE (n) == PARALLEL)
2054 n = XVECEXP (n, 0, 0);
2056 dwarf2out_frame_debug_cfa_register (n);
2057 handled_one = true;
2058 break;
2060 case REG_CFA_EXPRESSION:
2061 case REG_CFA_VAL_EXPRESSION:
2062 n = XEXP (note, 0);
2063 if (n == NULL)
2064 n = single_set (insn);
2066 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2067 dwarf2out_frame_debug_cfa_expression (n);
2068 else
2069 dwarf2out_frame_debug_cfa_val_expression (n);
2071 handled_one = true;
2072 break;
2074 case REG_CFA_RESTORE:
2075 n = XEXP (note, 0);
2076 if (n == NULL)
2078 n = PATTERN (insn);
2079 if (GET_CODE (n) == PARALLEL)
2080 n = XVECEXP (n, 0, 0);
2081 n = XEXP (n, 0);
2083 dwarf2out_frame_debug_cfa_restore (n);
2084 handled_one = true;
2085 break;
2087 case REG_CFA_SET_VDRAP:
2088 n = XEXP (note, 0);
2089 if (REG_P (n))
2091 dw_fde_ref fde = cfun->fde;
2092 if (fde)
2094 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2095 if (REG_P (n))
2096 fde->vdrap_reg = dwf_regno (n);
2099 handled_one = true;
2100 break;
2102 case REG_CFA_TOGGLE_RA_MANGLE:
2103 case REG_CFA_WINDOW_SAVE:
2104 /* We overload both of these operations onto the same DWARF opcode. */
2105 dwarf2out_frame_debug_cfa_window_save ();
2106 handled_one = true;
2107 break;
2109 case REG_CFA_FLUSH_QUEUE:
2110 /* The actual flush happens elsewhere. */
2111 handled_one = true;
2112 break;
2114 default:
2115 break;
2118 if (!handled_one)
2120 pat = PATTERN (insn);
2121 do_frame_expr:
2122 dwarf2out_frame_debug_expr (pat);
2124 /* Check again. A parallel can save and update the same register.
2125 We could probably check just once, here, but this is safer than
2126 removing the check at the start of the function. */
2127 if (clobbers_queued_reg_save (pat))
2128 dwarf2out_flush_queued_reg_saves ();
2132 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2134 static void
2135 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2137 size_t i, n_old, n_new, n_max;
2138 dw_cfi_ref cfi;
2140 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2141 add_cfi (new_row->cfa_cfi);
2142 else
2144 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2145 if (cfi)
2146 add_cfi (cfi);
2149 n_old = vec_safe_length (old_row->reg_save);
2150 n_new = vec_safe_length (new_row->reg_save);
2151 n_max = MAX (n_old, n_new);
2153 for (i = 0; i < n_max; ++i)
2155 dw_cfi_ref r_old = NULL, r_new = NULL;
2157 if (i < n_old)
2158 r_old = (*old_row->reg_save)[i];
2159 if (i < n_new)
2160 r_new = (*new_row->reg_save)[i];
2162 if (r_old == r_new)
2164 else if (r_new == NULL)
2165 add_cfi_restore (i);
2166 else if (!cfi_equal_p (r_old, r_new))
2167 add_cfi (r_new);
2171 /* Examine CFI and return true if a cfi label and set_loc is needed
2172 beforehand. Even when generating CFI assembler instructions, we
2173 still have to add the cfi to the list so that lookup_cfa_1 works
2174 later on. When -g2 and above we even need to force emitting of
2175 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2176 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2177 and so don't use convert_cfa_to_fb_loc_list. */
2179 static bool
2180 cfi_label_required_p (dw_cfi_ref cfi)
2182 if (!dwarf2out_do_cfi_asm ())
2183 return true;
2185 if (dwarf_version == 2
2186 && debug_info_level > DINFO_LEVEL_TERSE
2187 && (write_symbols == DWARF2_DEBUG
2188 || write_symbols == VMS_AND_DWARF2_DEBUG))
2190 switch (cfi->dw_cfi_opc)
2192 case DW_CFA_def_cfa_offset:
2193 case DW_CFA_def_cfa_offset_sf:
2194 case DW_CFA_def_cfa_register:
2195 case DW_CFA_def_cfa:
2196 case DW_CFA_def_cfa_sf:
2197 case DW_CFA_def_cfa_expression:
2198 case DW_CFA_restore_state:
2199 return true;
2200 default:
2201 return false;
2204 return false;
2207 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2208 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2209 necessary. */
2210 static void
2211 add_cfis_to_fde (void)
2213 dw_fde_ref fde = cfun->fde;
2214 rtx_insn *insn, *next;
2215 /* We always start with a function_begin label. */
2216 bool first = false;
2218 for (insn = get_insns (); insn; insn = next)
2220 next = NEXT_INSN (insn);
2222 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2224 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2225 /* Don't attempt to advance_loc4 between labels
2226 in different sections. */
2227 first = true;
2230 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2232 bool required = cfi_label_required_p (NOTE_CFI (insn));
2233 while (next)
2234 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2236 required |= cfi_label_required_p (NOTE_CFI (next));
2237 next = NEXT_INSN (next);
2239 else if (active_insn_p (next)
2240 || (NOTE_P (next) && (NOTE_KIND (next)
2241 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2242 break;
2243 else
2244 next = NEXT_INSN (next);
2245 if (required)
2247 int num = dwarf2out_cfi_label_num;
2248 const char *label = dwarf2out_cfi_label ();
2249 dw_cfi_ref xcfi;
2251 /* Set the location counter to the new label. */
2252 xcfi = new_cfi ();
2253 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2254 : DW_CFA_advance_loc4);
2255 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2256 vec_safe_push (fde->dw_fde_cfi, xcfi);
2258 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2259 NOTE_LABEL_NUMBER (tmp) = num;
2264 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2265 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2266 insn = NEXT_INSN (insn);
2268 while (insn != next);
2269 first = false;
2274 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2276 /* If LABEL is the start of a trace, then initialize the state of that
2277 trace from CUR_TRACE and CUR_ROW. */
2279 static void
2280 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2282 dw_trace_info *ti;
2283 HOST_WIDE_INT args_size;
2285 ti = get_trace_info (start);
2286 gcc_assert (ti != NULL);
2288 if (dump_file)
2290 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2291 cur_trace->id, ti->id,
2292 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2293 (origin ? INSN_UID (origin) : 0));
2296 args_size = cur_trace->end_true_args_size;
2297 if (ti->beg_row == NULL)
2299 /* This is the first time we've encountered this trace. Propagate
2300 state across the edge and push the trace onto the work list. */
2301 ti->beg_row = copy_cfi_row (cur_row);
2302 ti->beg_true_args_size = args_size;
2304 ti->cfa_store = cur_trace->cfa_store;
2305 ti->cfa_temp = cur_trace->cfa_temp;
2306 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2308 trace_work_list.safe_push (ti);
2310 if (dump_file)
2311 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2313 else
2316 /* We ought to have the same state incoming to a given trace no
2317 matter how we arrive at the trace. Anything else means we've
2318 got some kind of optimization error. */
2319 #if CHECKING_P
2320 if (!cfi_row_equal_p (cur_row, ti->beg_row))
2322 if (dump_file)
2324 fprintf (dump_file, "Inconsistent CFI state!\n");
2325 fprintf (dump_file, "SHOULD have:\n");
2326 dump_cfi_row (dump_file, ti->beg_row);
2327 fprintf (dump_file, "DO have:\n");
2328 dump_cfi_row (dump_file, cur_row);
2331 gcc_unreachable ();
2333 #endif
2335 /* The args_size is allowed to conflict if it isn't actually used. */
2336 if (ti->beg_true_args_size != args_size)
2337 ti->args_size_undefined = true;
2341 /* Similarly, but handle the args_size and CFA reset across EH
2342 and non-local goto edges. */
2344 static void
2345 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2347 HOST_WIDE_INT save_args_size, delta;
2348 dw_cfa_location save_cfa;
2350 save_args_size = cur_trace->end_true_args_size;
2351 if (save_args_size == 0)
2353 maybe_record_trace_start (start, origin);
2354 return;
2357 delta = -save_args_size;
2358 cur_trace->end_true_args_size = 0;
2360 save_cfa = cur_row->cfa;
2361 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2363 /* Convert a change in args_size (always a positive in the
2364 direction of stack growth) to a change in stack pointer. */
2365 if (!STACK_GROWS_DOWNWARD)
2366 delta = -delta;
2368 cur_row->cfa.offset += delta;
2371 maybe_record_trace_start (start, origin);
2373 cur_trace->end_true_args_size = save_args_size;
2374 cur_row->cfa = save_cfa;
2377 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2378 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2380 static void
2381 create_trace_edges (rtx_insn *insn)
2383 rtx tmp;
2384 int i, n;
2386 if (JUMP_P (insn))
2388 rtx_jump_table_data *table;
2390 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2391 return;
2393 if (tablejump_p (insn, NULL, &table))
2395 rtvec vec = table->get_labels ();
2397 n = GET_NUM_ELEM (vec);
2398 for (i = 0; i < n; ++i)
2400 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2401 maybe_record_trace_start (lab, insn);
2404 else if (computed_jump_p (insn))
2406 rtx_insn *temp;
2407 unsigned int i;
2408 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2409 maybe_record_trace_start (temp, insn);
2411 else if (returnjump_p (insn))
2413 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2415 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2416 for (i = 0; i < n; ++i)
2418 rtx_insn *lab =
2419 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2420 maybe_record_trace_start (lab, insn);
2423 else
2425 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2426 gcc_assert (lab != NULL);
2427 maybe_record_trace_start (lab, insn);
2430 else if (CALL_P (insn))
2432 /* Sibling calls don't have edges inside this function. */
2433 if (SIBLING_CALL_P (insn))
2434 return;
2436 /* Process non-local goto edges. */
2437 if (can_nonlocal_goto (insn))
2438 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2439 lab;
2440 lab = lab->next ())
2441 maybe_record_trace_start_abnormal (lab->insn (), insn);
2443 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2445 int i, n = seq->len ();
2446 for (i = 0; i < n; ++i)
2447 create_trace_edges (seq->insn (i));
2448 return;
2451 /* Process EH edges. */
2452 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2454 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2455 if (lp)
2456 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2460 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2462 static void
2463 scan_insn_after (rtx_insn *insn)
2465 if (RTX_FRAME_RELATED_P (insn))
2466 dwarf2out_frame_debug (insn);
2467 notice_args_size (insn);
2470 /* Scan the trace beginning at INSN and create the CFI notes for the
2471 instructions therein. */
2473 static void
2474 scan_trace (dw_trace_info *trace)
2476 rtx_insn *prev, *insn = trace->head;
2477 dw_cfa_location this_cfa;
2479 if (dump_file)
2480 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2481 trace->id, rtx_name[(int) GET_CODE (insn)],
2482 INSN_UID (insn));
2484 trace->end_row = copy_cfi_row (trace->beg_row);
2485 trace->end_true_args_size = trace->beg_true_args_size;
2487 cur_trace = trace;
2488 cur_row = trace->end_row;
2490 this_cfa = cur_row->cfa;
2491 cur_cfa = &this_cfa;
2493 for (prev = insn, insn = NEXT_INSN (insn);
2494 insn;
2495 prev = insn, insn = NEXT_INSN (insn))
2497 rtx_insn *control;
2499 /* Do everything that happens "before" the insn. */
2500 add_cfi_insn = prev;
2502 /* Notice the end of a trace. */
2503 if (BARRIER_P (insn))
2505 /* Don't bother saving the unneeded queued registers at all. */
2506 queued_reg_saves.truncate (0);
2507 break;
2509 if (save_point_p (insn))
2511 /* Propagate across fallthru edges. */
2512 dwarf2out_flush_queued_reg_saves ();
2513 maybe_record_trace_start (insn, NULL);
2514 break;
2517 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2518 continue;
2520 /* Handle all changes to the row state. Sequences require special
2521 handling for the positioning of the notes. */
2522 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2524 rtx_insn *elt;
2525 int i, n = pat->len ();
2527 control = pat->insn (0);
2528 if (can_throw_internal (control))
2529 notice_eh_throw (control);
2530 dwarf2out_flush_queued_reg_saves ();
2532 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2534 /* ??? Hopefully multiple delay slots are not annulled. */
2535 gcc_assert (n == 2);
2536 gcc_assert (!RTX_FRAME_RELATED_P (control));
2537 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2539 elt = pat->insn (1);
2541 if (INSN_FROM_TARGET_P (elt))
2543 HOST_WIDE_INT restore_args_size;
2544 cfi_vec save_row_reg_save;
2546 /* If ELT is an instruction from target of an annulled
2547 branch, the effects are for the target only and so
2548 the args_size and CFA along the current path
2549 shouldn't change. */
2550 add_cfi_insn = NULL;
2551 restore_args_size = cur_trace->end_true_args_size;
2552 cur_cfa = &cur_row->cfa;
2553 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2555 scan_insn_after (elt);
2557 /* ??? Should we instead save the entire row state? */
2558 gcc_assert (!queued_reg_saves.length ());
2560 create_trace_edges (control);
2562 cur_trace->end_true_args_size = restore_args_size;
2563 cur_row->cfa = this_cfa;
2564 cur_row->reg_save = save_row_reg_save;
2565 cur_cfa = &this_cfa;
2567 else
2569 /* If ELT is a annulled branch-taken instruction (i.e.
2570 executed only when branch is not taken), the args_size
2571 and CFA should not change through the jump. */
2572 create_trace_edges (control);
2574 /* Update and continue with the trace. */
2575 add_cfi_insn = insn;
2576 scan_insn_after (elt);
2577 def_cfa_1 (&this_cfa);
2579 continue;
2582 /* The insns in the delay slot should all be considered to happen
2583 "before" a call insn. Consider a call with a stack pointer
2584 adjustment in the delay slot. The backtrace from the callee
2585 should include the sp adjustment. Unfortunately, that leaves
2586 us with an unavoidable unwinding error exactly at the call insn
2587 itself. For jump insns we'd prefer to avoid this error by
2588 placing the notes after the sequence. */
2589 if (JUMP_P (control))
2590 add_cfi_insn = insn;
2592 for (i = 1; i < n; ++i)
2594 elt = pat->insn (i);
2595 scan_insn_after (elt);
2598 /* Make sure any register saves are visible at the jump target. */
2599 dwarf2out_flush_queued_reg_saves ();
2600 any_cfis_emitted = false;
2602 /* However, if there is some adjustment on the call itself, e.g.
2603 a call_pop, that action should be considered to happen after
2604 the call returns. */
2605 add_cfi_insn = insn;
2606 scan_insn_after (control);
2608 else
2610 /* Flush data before calls and jumps, and of course if necessary. */
2611 if (can_throw_internal (insn))
2613 notice_eh_throw (insn);
2614 dwarf2out_flush_queued_reg_saves ();
2616 else if (!NONJUMP_INSN_P (insn)
2617 || clobbers_queued_reg_save (insn)
2618 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2619 dwarf2out_flush_queued_reg_saves ();
2620 any_cfis_emitted = false;
2622 add_cfi_insn = insn;
2623 scan_insn_after (insn);
2624 control = insn;
2627 /* Between frame-related-p and args_size we might have otherwise
2628 emitted two cfa adjustments. Do it now. */
2629 def_cfa_1 (&this_cfa);
2631 /* Minimize the number of advances by emitting the entire queue
2632 once anything is emitted. */
2633 if (any_cfis_emitted
2634 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2635 dwarf2out_flush_queued_reg_saves ();
2637 /* Note that a test for control_flow_insn_p does exactly the
2638 same tests as are done to actually create the edges. So
2639 always call the routine and let it not create edges for
2640 non-control-flow insns. */
2641 create_trace_edges (control);
2644 add_cfi_insn = NULL;
2645 cur_row = NULL;
2646 cur_trace = NULL;
2647 cur_cfa = NULL;
2650 /* Scan the function and create the initial set of CFI notes. */
2652 static void
2653 create_cfi_notes (void)
2655 dw_trace_info *ti;
2657 gcc_checking_assert (!queued_reg_saves.exists ());
2658 gcc_checking_assert (!trace_work_list.exists ());
2660 /* Always begin at the entry trace. */
2661 ti = &trace_info[0];
2662 scan_trace (ti);
2664 while (!trace_work_list.is_empty ())
2666 ti = trace_work_list.pop ();
2667 scan_trace (ti);
2670 queued_reg_saves.release ();
2671 trace_work_list.release ();
2674 /* Return the insn before the first NOTE_INSN_CFI after START. */
2676 static rtx_insn *
2677 before_next_cfi_note (rtx_insn *start)
2679 rtx_insn *prev = start;
2680 while (start)
2682 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2683 return prev;
2684 prev = start;
2685 start = NEXT_INSN (start);
2687 gcc_unreachable ();
2690 /* Insert CFI notes between traces to properly change state between them. */
2692 static void
2693 connect_traces (void)
2695 unsigned i, n = trace_info.length ();
2696 dw_trace_info *prev_ti, *ti;
2698 /* ??? Ideally, we should have both queued and processed every trace.
2699 However the current representation of constant pools on various targets
2700 is indistinguishable from unreachable code. Assume for the moment that
2701 we can simply skip over such traces. */
2702 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2703 these are not "real" instructions, and should not be considered.
2704 This could be generically useful for tablejump data as well. */
2705 /* Remove all unprocessed traces from the list. */
2706 for (i = n - 1; i > 0; --i)
2708 ti = &trace_info[i];
2709 if (ti->beg_row == NULL)
2711 trace_info.ordered_remove (i);
2712 n -= 1;
2714 else
2715 gcc_assert (ti->end_row != NULL);
2718 /* Work from the end back to the beginning. This lets us easily insert
2719 remember/restore_state notes in the correct order wrt other notes. */
2720 prev_ti = &trace_info[n - 1];
2721 for (i = n - 1; i > 0; --i)
2723 dw_cfi_row *old_row;
2725 ti = prev_ti;
2726 prev_ti = &trace_info[i - 1];
2728 add_cfi_insn = ti->head;
2730 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2731 for the portion of the function in the alternate text
2732 section. The row state at the very beginning of that
2733 new FDE will be exactly the row state from the CIE. */
2734 if (ti->switch_sections)
2735 old_row = cie_cfi_row;
2736 else
2738 old_row = prev_ti->end_row;
2739 /* If there's no change from the previous end state, fine. */
2740 if (cfi_row_equal_p (old_row, ti->beg_row))
2742 /* Otherwise check for the common case of sharing state with
2743 the beginning of an epilogue, but not the end. Insert
2744 remember/restore opcodes in that case. */
2745 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2747 dw_cfi_ref cfi;
2749 /* Note that if we blindly insert the remember at the
2750 start of the trace, we can wind up increasing the
2751 size of the unwind info due to extra advance opcodes.
2752 Instead, put the remember immediately before the next
2753 state change. We know there must be one, because the
2754 state at the beginning and head of the trace differ. */
2755 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2756 cfi = new_cfi ();
2757 cfi->dw_cfi_opc = DW_CFA_remember_state;
2758 add_cfi (cfi);
2760 add_cfi_insn = ti->head;
2761 cfi = new_cfi ();
2762 cfi->dw_cfi_opc = DW_CFA_restore_state;
2763 add_cfi (cfi);
2765 old_row = prev_ti->beg_row;
2767 /* Otherwise, we'll simply change state from the previous end. */
2770 change_cfi_row (old_row, ti->beg_row);
2772 if (dump_file && add_cfi_insn != ti->head)
2774 rtx_insn *note;
2776 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2777 prev_ti->id, ti->id);
2779 note = ti->head;
2782 note = NEXT_INSN (note);
2783 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2784 output_cfi_directive (dump_file, NOTE_CFI (note));
2786 while (note != add_cfi_insn);
2790 /* Connect args_size between traces that have can_throw_internal insns. */
2791 if (cfun->eh->lp_array)
2793 HOST_WIDE_INT prev_args_size = 0;
2795 for (i = 0; i < n; ++i)
2797 ti = &trace_info[i];
2799 if (ti->switch_sections)
2800 prev_args_size = 0;
2801 if (ti->eh_head == NULL)
2802 continue;
2803 gcc_assert (!ti->args_size_undefined);
2805 if (ti->beg_delay_args_size != prev_args_size)
2807 /* ??? Search back to previous CFI note. */
2808 add_cfi_insn = PREV_INSN (ti->eh_head);
2809 add_cfi_args_size (ti->beg_delay_args_size);
2812 prev_args_size = ti->end_delay_args_size;
2817 /* Set up the pseudo-cfg of instruction traces, as described at the
2818 block comment at the top of the file. */
2820 static void
2821 create_pseudo_cfg (void)
2823 bool saw_barrier, switch_sections;
2824 dw_trace_info ti;
2825 rtx_insn *insn;
2826 unsigned i;
2828 /* The first trace begins at the start of the function,
2829 and begins with the CIE row state. */
2830 trace_info.create (16);
2831 memset (&ti, 0, sizeof (ti));
2832 ti.head = get_insns ();
2833 ti.beg_row = cie_cfi_row;
2834 ti.cfa_store = cie_cfi_row->cfa;
2835 ti.cfa_temp.reg = INVALID_REGNUM;
2836 trace_info.quick_push (ti);
2838 if (cie_return_save)
2839 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2841 /* Walk all the insns, collecting start of trace locations. */
2842 saw_barrier = false;
2843 switch_sections = false;
2844 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2846 if (BARRIER_P (insn))
2847 saw_barrier = true;
2848 else if (NOTE_P (insn)
2849 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2851 /* We should have just seen a barrier. */
2852 gcc_assert (saw_barrier);
2853 switch_sections = true;
2855 /* Watch out for save_point notes between basic blocks.
2856 In particular, a note after a barrier. Do not record these,
2857 delaying trace creation until the label. */
2858 else if (save_point_p (insn)
2859 && (LABEL_P (insn) || !saw_barrier))
2861 memset (&ti, 0, sizeof (ti));
2862 ti.head = insn;
2863 ti.switch_sections = switch_sections;
2864 ti.id = trace_info.length ();
2865 trace_info.safe_push (ti);
2867 saw_barrier = false;
2868 switch_sections = false;
2872 /* Create the trace index after we've finished building trace_info,
2873 avoiding stale pointer problems due to reallocation. */
2874 trace_index
2875 = new hash_table<trace_info_hasher> (trace_info.length ());
2876 dw_trace_info *tp;
2877 FOR_EACH_VEC_ELT (trace_info, i, tp)
2879 dw_trace_info **slot;
2881 if (dump_file)
2882 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2883 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2884 tp->switch_sections ? " (section switch)" : "");
2886 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2887 gcc_assert (*slot == NULL);
2888 *slot = tp;
2892 /* Record the initial position of the return address. RTL is
2893 INCOMING_RETURN_ADDR_RTX. */
2895 static void
2896 initial_return_save (rtx rtl)
2898 unsigned int reg = INVALID_REGNUM;
2899 HOST_WIDE_INT offset = 0;
2901 switch (GET_CODE (rtl))
2903 case REG:
2904 /* RA is in a register. */
2905 reg = dwf_regno (rtl);
2906 break;
2908 case MEM:
2909 /* RA is on the stack. */
2910 rtl = XEXP (rtl, 0);
2911 switch (GET_CODE (rtl))
2913 case REG:
2914 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2915 offset = 0;
2916 break;
2918 case PLUS:
2919 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2920 offset = INTVAL (XEXP (rtl, 1));
2921 break;
2923 case MINUS:
2924 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2925 offset = -INTVAL (XEXP (rtl, 1));
2926 break;
2928 default:
2929 gcc_unreachable ();
2932 break;
2934 case PLUS:
2935 /* The return address is at some offset from any value we can
2936 actually load. For instance, on the SPARC it is in %i7+8. Just
2937 ignore the offset for now; it doesn't matter for unwinding frames. */
2938 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2939 initial_return_save (XEXP (rtl, 0));
2940 return;
2942 default:
2943 gcc_unreachable ();
2946 if (reg != DWARF_FRAME_RETURN_COLUMN)
2948 if (reg != INVALID_REGNUM)
2949 record_reg_saved_in_reg (rtl, pc_rtx);
2950 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2954 static void
2955 create_cie_data (void)
2957 dw_cfa_location loc;
2958 dw_trace_info cie_trace;
2960 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2962 memset (&cie_trace, 0, sizeof (cie_trace));
2963 cur_trace = &cie_trace;
2965 add_cfi_vec = &cie_cfi_vec;
2966 cie_cfi_row = cur_row = new_cfi_row ();
2968 /* On entry, the Canonical Frame Address is at SP. */
2969 memset (&loc, 0, sizeof (loc));
2970 loc.reg = dw_stack_pointer_regnum;
2971 loc.offset = INCOMING_FRAME_SP_OFFSET;
2972 def_cfa_1 (&loc);
2974 if (targetm.debug_unwind_info () == UI_DWARF2
2975 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2977 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2979 /* For a few targets, we have the return address incoming into a
2980 register, but choose a different return column. This will result
2981 in a DW_CFA_register for the return, and an entry in
2982 regs_saved_in_regs to match. If the target later stores that
2983 return address register to the stack, we want to be able to emit
2984 the DW_CFA_offset against the return column, not the intermediate
2985 save register. Save the contents of regs_saved_in_regs so that
2986 we can re-initialize it at the start of each function. */
2987 switch (cie_trace.regs_saved_in_regs.length ())
2989 case 0:
2990 break;
2991 case 1:
2992 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2993 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2994 cie_trace.regs_saved_in_regs.release ();
2995 break;
2996 default:
2997 gcc_unreachable ();
3001 add_cfi_vec = NULL;
3002 cur_row = NULL;
3003 cur_trace = NULL;
3006 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3007 state at each location within the function. These notes will be
3008 emitted during pass_final. */
3010 static unsigned int
3011 execute_dwarf2_frame (void)
3013 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3014 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3016 /* The first time we're called, compute the incoming frame state. */
3017 if (cie_cfi_vec == NULL)
3018 create_cie_data ();
3020 dwarf2out_alloc_current_fde ();
3022 create_pseudo_cfg ();
3024 /* Do the work. */
3025 create_cfi_notes ();
3026 connect_traces ();
3027 add_cfis_to_fde ();
3029 /* Free all the data we allocated. */
3031 size_t i;
3032 dw_trace_info *ti;
3034 FOR_EACH_VEC_ELT (trace_info, i, ti)
3035 ti->regs_saved_in_regs.release ();
3037 trace_info.release ();
3039 delete trace_index;
3040 trace_index = NULL;
3042 return 0;
3045 /* Convert a DWARF call frame info. operation to its string name */
3047 static const char *
3048 dwarf_cfi_name (unsigned int cfi_opc)
3050 const char *name = get_DW_CFA_name (cfi_opc);
3052 if (name != NULL)
3053 return name;
3055 return "DW_CFA_<unknown>";
3058 /* This routine will generate the correct assembly data for a location
3059 description based on a cfi entry with a complex address. */
3061 static void
3062 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3064 dw_loc_descr_ref loc;
3065 unsigned long size;
3067 if (cfi->dw_cfi_opc == DW_CFA_expression
3068 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3070 unsigned r =
3071 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3072 dw2_asm_output_data (1, r, NULL);
3073 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3075 else
3076 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3078 /* Output the size of the block. */
3079 size = size_of_locs (loc);
3080 dw2_asm_output_data_uleb128 (size, NULL);
3082 /* Now output the operations themselves. */
3083 output_loc_sequence (loc, for_eh);
3086 /* Similar, but used for .cfi_escape. */
3088 static void
3089 output_cfa_loc_raw (dw_cfi_ref cfi)
3091 dw_loc_descr_ref loc;
3092 unsigned long size;
3094 if (cfi->dw_cfi_opc == DW_CFA_expression
3095 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3097 unsigned r =
3098 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3099 fprintf (asm_out_file, "%#x,", r);
3100 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3102 else
3103 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3105 /* Output the size of the block. */
3106 size = size_of_locs (loc);
3107 dw2_asm_output_data_uleb128_raw (size);
3108 fputc (',', asm_out_file);
3110 /* Now output the operations themselves. */
3111 output_loc_sequence_raw (loc);
3114 /* Output a Call Frame Information opcode and its operand(s). */
3116 void
3117 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3119 unsigned long r;
3120 HOST_WIDE_INT off;
3122 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3123 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3124 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3125 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3126 ((unsigned HOST_WIDE_INT)
3127 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3128 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3130 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3131 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3132 "DW_CFA_offset, column %#lx", r);
3133 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3134 dw2_asm_output_data_uleb128 (off, NULL);
3136 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3138 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3139 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3140 "DW_CFA_restore, column %#lx", r);
3142 else
3144 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3145 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3147 switch (cfi->dw_cfi_opc)
3149 case DW_CFA_set_loc:
3150 if (for_eh)
3151 dw2_asm_output_encoded_addr_rtx (
3152 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3153 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3154 false, NULL);
3155 else
3156 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3157 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3158 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3159 break;
3161 case DW_CFA_advance_loc1:
3162 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3163 fde->dw_fde_current_label, NULL);
3164 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3165 break;
3167 case DW_CFA_advance_loc2:
3168 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3169 fde->dw_fde_current_label, NULL);
3170 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3171 break;
3173 case DW_CFA_advance_loc4:
3174 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3175 fde->dw_fde_current_label, NULL);
3176 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3177 break;
3179 case DW_CFA_MIPS_advance_loc8:
3180 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3181 fde->dw_fde_current_label, NULL);
3182 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3183 break;
3185 case DW_CFA_offset_extended:
3186 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3187 dw2_asm_output_data_uleb128 (r, NULL);
3188 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3189 dw2_asm_output_data_uleb128 (off, NULL);
3190 break;
3192 case DW_CFA_def_cfa:
3193 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3194 dw2_asm_output_data_uleb128 (r, NULL);
3195 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3196 break;
3198 case DW_CFA_offset_extended_sf:
3199 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3200 dw2_asm_output_data_uleb128 (r, NULL);
3201 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3202 dw2_asm_output_data_sleb128 (off, NULL);
3203 break;
3205 case DW_CFA_def_cfa_sf:
3206 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3207 dw2_asm_output_data_uleb128 (r, NULL);
3208 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3209 dw2_asm_output_data_sleb128 (off, NULL);
3210 break;
3212 case DW_CFA_restore_extended:
3213 case DW_CFA_undefined:
3214 case DW_CFA_same_value:
3215 case DW_CFA_def_cfa_register:
3216 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3217 dw2_asm_output_data_uleb128 (r, NULL);
3218 break;
3220 case DW_CFA_register:
3221 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3222 dw2_asm_output_data_uleb128 (r, NULL);
3223 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3224 dw2_asm_output_data_uleb128 (r, NULL);
3225 break;
3227 case DW_CFA_def_cfa_offset:
3228 case DW_CFA_GNU_args_size:
3229 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3230 break;
3232 case DW_CFA_def_cfa_offset_sf:
3233 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3234 dw2_asm_output_data_sleb128 (off, NULL);
3235 break;
3237 case DW_CFA_GNU_window_save:
3238 break;
3240 case DW_CFA_def_cfa_expression:
3241 case DW_CFA_expression:
3242 case DW_CFA_val_expression:
3243 output_cfa_loc (cfi, for_eh);
3244 break;
3246 case DW_CFA_GNU_negative_offset_extended:
3247 /* Obsoleted by DW_CFA_offset_extended_sf. */
3248 gcc_unreachable ();
3250 default:
3251 break;
3256 /* Similar, but do it via assembler directives instead. */
3258 void
3259 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3261 unsigned long r, r2;
3263 switch (cfi->dw_cfi_opc)
3265 case DW_CFA_advance_loc:
3266 case DW_CFA_advance_loc1:
3267 case DW_CFA_advance_loc2:
3268 case DW_CFA_advance_loc4:
3269 case DW_CFA_MIPS_advance_loc8:
3270 case DW_CFA_set_loc:
3271 /* Should only be created in a code path not followed when emitting
3272 via directives. The assembler is going to take care of this for
3273 us. But this routines is also used for debugging dumps, so
3274 print something. */
3275 gcc_assert (f != asm_out_file);
3276 fprintf (f, "\t.cfi_advance_loc\n");
3277 break;
3279 case DW_CFA_offset:
3280 case DW_CFA_offset_extended:
3281 case DW_CFA_offset_extended_sf:
3282 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3283 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3284 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3285 break;
3287 case DW_CFA_restore:
3288 case DW_CFA_restore_extended:
3289 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3290 fprintf (f, "\t.cfi_restore %lu\n", r);
3291 break;
3293 case DW_CFA_undefined:
3294 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3295 fprintf (f, "\t.cfi_undefined %lu\n", r);
3296 break;
3298 case DW_CFA_same_value:
3299 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3300 fprintf (f, "\t.cfi_same_value %lu\n", r);
3301 break;
3303 case DW_CFA_def_cfa:
3304 case DW_CFA_def_cfa_sf:
3305 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3306 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3307 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3308 break;
3310 case DW_CFA_def_cfa_register:
3311 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3312 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3313 break;
3315 case DW_CFA_register:
3316 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3317 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3318 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3319 break;
3321 case DW_CFA_def_cfa_offset:
3322 case DW_CFA_def_cfa_offset_sf:
3323 fprintf (f, "\t.cfi_def_cfa_offset "
3324 HOST_WIDE_INT_PRINT_DEC"\n",
3325 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3326 break;
3328 case DW_CFA_remember_state:
3329 fprintf (f, "\t.cfi_remember_state\n");
3330 break;
3331 case DW_CFA_restore_state:
3332 fprintf (f, "\t.cfi_restore_state\n");
3333 break;
3335 case DW_CFA_GNU_args_size:
3336 if (f == asm_out_file)
3338 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3339 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3340 if (flag_debug_asm)
3341 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3342 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3343 fputc ('\n', f);
3345 else
3347 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3348 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3350 break;
3352 case DW_CFA_GNU_window_save:
3353 fprintf (f, "\t.cfi_window_save\n");
3354 break;
3356 case DW_CFA_def_cfa_expression:
3357 case DW_CFA_expression:
3358 case DW_CFA_val_expression:
3359 if (f != asm_out_file)
3361 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3362 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3363 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3364 break;
3366 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3367 output_cfa_loc_raw (cfi);
3368 fputc ('\n', f);
3369 break;
3371 default:
3372 gcc_unreachable ();
3376 void
3377 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3379 if (dwarf2out_do_cfi_asm ())
3380 output_cfi_directive (asm_out_file, cfi);
3383 static void
3384 dump_cfi_row (FILE *f, dw_cfi_row *row)
3386 dw_cfi_ref cfi;
3387 unsigned i;
3389 cfi = row->cfa_cfi;
3390 if (!cfi)
3392 dw_cfa_location dummy;
3393 memset (&dummy, 0, sizeof (dummy));
3394 dummy.reg = INVALID_REGNUM;
3395 cfi = def_cfa_0 (&dummy, &row->cfa);
3397 output_cfi_directive (f, cfi);
3399 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3400 if (cfi)
3401 output_cfi_directive (f, cfi);
3404 void debug_cfi_row (dw_cfi_row *row);
3406 void
3407 debug_cfi_row (dw_cfi_row *row)
3409 dump_cfi_row (stderr, row);
3413 /* Save the result of dwarf2out_do_frame across PCH.
3414 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3415 static GTY(()) signed char saved_do_cfi_asm = 0;
3417 /* Decide whether we want to emit frame unwind information for the current
3418 translation unit. */
3420 bool
3421 dwarf2out_do_frame (void)
3423 /* We want to emit correct CFA location expressions or lists, so we
3424 have to return true if we're going to output debug info, even if
3425 we're not going to output frame or unwind info. */
3426 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3427 return true;
3429 if (saved_do_cfi_asm > 0)
3430 return true;
3432 if (targetm.debug_unwind_info () == UI_DWARF2)
3433 return true;
3435 if ((flag_unwind_tables || flag_exceptions)
3436 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3437 return true;
3439 return false;
3442 /* Decide whether to emit frame unwind via assembler directives. */
3444 bool
3445 dwarf2out_do_cfi_asm (void)
3447 int enc;
3449 if (saved_do_cfi_asm != 0)
3450 return saved_do_cfi_asm > 0;
3452 /* Assume failure for a moment. */
3453 saved_do_cfi_asm = -1;
3455 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3456 return false;
3457 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3458 return false;
3460 /* Make sure the personality encoding is one the assembler can support.
3461 In particular, aligned addresses can't be handled. */
3462 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3463 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3464 return false;
3465 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3466 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3467 return false;
3469 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3470 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3471 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3472 && !flag_unwind_tables && !flag_exceptions
3473 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3474 return false;
3476 /* Success! */
3477 saved_do_cfi_asm = 1;
3478 return true;
3481 namespace {
3483 const pass_data pass_data_dwarf2_frame =
3485 RTL_PASS, /* type */
3486 "dwarf2", /* name */
3487 OPTGROUP_NONE, /* optinfo_flags */
3488 TV_FINAL, /* tv_id */
3489 0, /* properties_required */
3490 0, /* properties_provided */
3491 0, /* properties_destroyed */
3492 0, /* todo_flags_start */
3493 0, /* todo_flags_finish */
3496 class pass_dwarf2_frame : public rtl_opt_pass
3498 public:
3499 pass_dwarf2_frame (gcc::context *ctxt)
3500 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3503 /* opt_pass methods: */
3504 virtual bool gate (function *);
3505 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3507 }; // class pass_dwarf2_frame
3509 bool
3510 pass_dwarf2_frame::gate (function *)
3512 /* Targets which still implement the prologue in assembler text
3513 cannot use the generic dwarf2 unwinding. */
3514 if (!targetm.have_prologue ())
3515 return false;
3517 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3518 from the optimized shrink-wrapping annotations that we will compute.
3519 For now, only produce the CFI notes for dwarf2. */
3520 return dwarf2out_do_frame ();
3523 } // anon namespace
3525 rtl_opt_pass *
3526 make_pass_dwarf2_frame (gcc::context *ctxt)
3528 return new pass_dwarf2_frame (ctxt);
3531 #include "gt-dwarf2cfi.h"