pa.c (pa_som_asm_init_sections): Fix comment.
[official-gcc.git] / gcc / dwarf2cfi.c
blob62117e7c8505959b3665394c987437df7cadc434
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
50 #endif
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
54 #endif
56 /* A collected description of an entire row of the abstract CFI table. */
57 struct GTY(()) dw_cfi_row
59 /* The expression that computes the CFA, expressed in two different ways.
60 The CFA member for the simple cases, and the full CFI expression for
61 the complex cases. The later will be a DW_CFA_cfa_expression. */
62 dw_cfa_location cfa;
63 dw_cfi_ref cfa_cfi;
65 /* The expressions for any register column that is saved. */
66 cfi_vec reg_save;
69 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
70 struct GTY(()) reg_saved_in_data {
71 rtx orig_reg;
72 rtx saved_in_reg;
76 /* Since we no longer have a proper CFG, we're going to create a facsimile
77 of one on the fly while processing the frame-related insns.
79 We create dw_trace_info structures for each extended basic block beginning
80 and ending at a "save point". Save points are labels, barriers, certain
81 notes, and of course the beginning and end of the function.
83 As we encounter control transfer insns, we propagate the "current"
84 row state across the edges to the starts of traces. When checking is
85 enabled, we validate that we propagate the same data from all sources.
87 All traces are members of the TRACE_INFO array, in the order in which
88 they appear in the instruction stream.
90 All save points are present in the TRACE_INDEX hash, mapping the insn
91 starting a trace to the dw_trace_info describing the trace. */
93 struct dw_trace_info
95 /* The insn that begins the trace. */
96 rtx_insn *head;
98 /* The row state at the beginning and end of the trace. */
99 dw_cfi_row *beg_row, *end_row;
101 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
102 while scanning insns. However, the args_size value is irrelevant at
103 any point except can_throw_internal_p insns. Therefore the "delay"
104 sizes the values that must actually be emitted for this trace. */
105 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
106 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
108 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
109 rtx_insn *eh_head;
111 /* The following variables contain data used in interpreting frame related
112 expressions. These are not part of the "real" row state as defined by
113 Dwarf, but it seems like they need to be propagated into a trace in case
114 frame related expressions have been sunk. */
115 /* ??? This seems fragile. These variables are fragments of a larger
116 expression. If we do not keep the entire expression together, we risk
117 not being able to put it together properly. Consider forcing targets
118 to generate self-contained expressions and dropping all of the magic
119 interpretation code in this file. Or at least refusing to shrink wrap
120 any frame related insn that doesn't contain a complete expression. */
122 /* The register used for saving registers to the stack, and its offset
123 from the CFA. */
124 dw_cfa_location cfa_store;
126 /* A temporary register holding an integral value used in adjusting SP
127 or setting up the store_reg. The "offset" field holds the integer
128 value, not an offset. */
129 dw_cfa_location cfa_temp;
131 /* A set of registers saved in other registers. This is the inverse of
132 the row->reg_save info, if the entry is a DW_CFA_register. This is
133 implemented as a flat array because it normally contains zero or 1
134 entry, depending on the target. IA-64 is the big spender here, using
135 a maximum of 5 entries. */
136 vec<reg_saved_in_data> regs_saved_in_regs;
138 /* An identifier for this trace. Used only for debugging dumps. */
139 unsigned id;
141 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
142 bool switch_sections;
144 /* True if we've seen different values incoming to beg_true_args_size. */
145 bool args_size_undefined;
149 /* Hashtable helpers. */
151 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
153 static inline hashval_t hash (const dw_trace_info *);
154 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
157 inline hashval_t
158 trace_info_hasher::hash (const dw_trace_info *ti)
160 return INSN_UID (ti->head);
163 inline bool
164 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
166 return a->head == b->head;
170 /* The variables making up the pseudo-cfg, as described above. */
171 static vec<dw_trace_info> trace_info;
172 static vec<dw_trace_info *> trace_work_list;
173 static hash_table<trace_info_hasher> *trace_index;
175 /* A vector of call frame insns for the CIE. */
176 cfi_vec cie_cfi_vec;
178 /* The state of the first row of the FDE table, which includes the
179 state provided by the CIE. */
180 static GTY(()) dw_cfi_row *cie_cfi_row;
182 static GTY(()) reg_saved_in_data *cie_return_save;
184 static GTY(()) unsigned long dwarf2out_cfi_label_num;
186 /* The insn after which a new CFI note should be emitted. */
187 static rtx_insn *add_cfi_insn;
189 /* When non-null, add_cfi will add the CFI to this vector. */
190 static cfi_vec *add_cfi_vec;
192 /* The current instruction trace. */
193 static dw_trace_info *cur_trace;
195 /* The current, i.e. most recently generated, row of the CFI table. */
196 static dw_cfi_row *cur_row;
198 /* A copy of the current CFA, for use during the processing of a
199 single insn. */
200 static dw_cfa_location *cur_cfa;
202 /* We delay emitting a register save until either (a) we reach the end
203 of the prologue or (b) the register is clobbered. This clusters
204 register saves so that there are fewer pc advances. */
206 struct queued_reg_save {
207 rtx reg;
208 rtx saved_reg;
209 HOST_WIDE_INT cfa_offset;
213 static vec<queued_reg_save> queued_reg_saves;
215 /* True if any CFI directives were emitted at the current insn. */
216 static bool any_cfis_emitted;
218 /* Short-hand for commonly used register numbers. */
219 static unsigned dw_stack_pointer_regnum;
220 static unsigned dw_frame_pointer_regnum;
222 /* Hook used by __throw. */
225 expand_builtin_dwarf_sp_column (void)
227 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
228 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
231 /* MEM is a memory reference for the register size table, each element of
232 which has mode MODE. Initialize column C as a return address column. */
234 static void
235 init_return_column_size (scalar_int_mode mode, rtx mem, unsigned int c)
237 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
238 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
239 emit_move_insn (adjust_address (mem, mode, offset),
240 gen_int_mode (size, mode));
243 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
244 init_one_dwarf_reg_size to communicate on what has been done by the
245 latter. */
247 struct init_one_dwarf_reg_state
249 /* Whether the dwarf return column was initialized. */
250 bool wrote_return_column;
252 /* For each hard register REGNO, whether init_one_dwarf_reg_size
253 was given REGNO to process already. */
254 bool processed_regno [FIRST_PSEUDO_REGISTER];
258 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
259 initialize the dwarf register size table entry corresponding to register
260 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
261 use for the size entry to initialize, and INIT_STATE is the communication
262 datastructure conveying what we're doing to our caller. */
264 static
265 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
266 rtx table, machine_mode slotmode,
267 init_one_dwarf_reg_state *init_state)
269 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
270 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
271 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
273 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
274 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
276 init_state->processed_regno[regno] = true;
278 if (rnum >= DWARF_FRAME_REGISTERS)
279 return;
281 if (dnum == DWARF_FRAME_RETURN_COLUMN)
283 if (regmode == VOIDmode)
284 return;
285 init_state->wrote_return_column = true;
288 if (slotoffset < 0)
289 return;
291 emit_move_insn (adjust_address (table, slotmode, slotoffset),
292 gen_int_mode (regsize, slotmode));
295 /* Generate code to initialize the dwarf register size table located
296 at the provided ADDRESS. */
298 void
299 expand_builtin_init_dwarf_reg_sizes (tree address)
301 unsigned int i;
302 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node);
303 rtx addr = expand_normal (address);
304 rtx mem = gen_rtx_MEM (BLKmode, addr);
306 init_one_dwarf_reg_state init_state;
308 memset ((char *)&init_state, 0, sizeof (init_state));
310 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
312 machine_mode save_mode;
313 rtx span;
315 /* No point in processing a register multiple times. This could happen
316 with register spans, e.g. when a reg is first processed as a piece of
317 a span, then as a register on its own later on. */
319 if (init_state.processed_regno[i])
320 continue;
322 save_mode = targetm.dwarf_frame_reg_mode (i);
323 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
325 if (!span)
326 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
327 else
329 for (int si = 0; si < XVECLEN (span, 0); si++)
331 rtx reg = XVECEXP (span, 0, si);
333 init_one_dwarf_reg_size
334 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
339 if (!init_state.wrote_return_column)
340 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
342 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
343 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
344 #endif
346 targetm.init_dwarf_reg_sizes_extra (address);
350 static dw_trace_info *
351 get_trace_info (rtx_insn *insn)
353 dw_trace_info dummy;
354 dummy.head = insn;
355 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
358 static bool
359 save_point_p (rtx_insn *insn)
361 /* Labels, except those that are really jump tables. */
362 if (LABEL_P (insn))
363 return inside_basic_block_p (insn);
365 /* We split traces at the prologue/epilogue notes because those
366 are points at which the unwind info is usually stable. This
367 makes it easier to find spots with identical unwind info so
368 that we can use remember/restore_state opcodes. */
369 if (NOTE_P (insn))
370 switch (NOTE_KIND (insn))
372 case NOTE_INSN_PROLOGUE_END:
373 case NOTE_INSN_EPILOGUE_BEG:
374 return true;
377 return false;
380 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
382 static inline HOST_WIDE_INT
383 div_data_align (HOST_WIDE_INT off)
385 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
386 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
387 return r;
390 /* Return true if we need a signed version of a given opcode
391 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
393 static inline bool
394 need_data_align_sf_opcode (HOST_WIDE_INT off)
396 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
399 /* Return a pointer to a newly allocated Call Frame Instruction. */
401 static inline dw_cfi_ref
402 new_cfi (void)
404 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
406 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
407 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
409 return cfi;
412 /* Return a newly allocated CFI row, with no defined data. */
414 static dw_cfi_row *
415 new_cfi_row (void)
417 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
419 row->cfa.reg = INVALID_REGNUM;
421 return row;
424 /* Return a copy of an existing CFI row. */
426 static dw_cfi_row *
427 copy_cfi_row (dw_cfi_row *src)
429 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
431 *dst = *src;
432 dst->reg_save = vec_safe_copy (src->reg_save);
434 return dst;
437 /* Generate a new label for the CFI info to refer to. */
439 static char *
440 dwarf2out_cfi_label (void)
442 int num = dwarf2out_cfi_label_num++;
443 char label[20];
445 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
447 return xstrdup (label);
450 /* Add CFI either to the current insn stream or to a vector, or both. */
452 static void
453 add_cfi (dw_cfi_ref cfi)
455 any_cfis_emitted = true;
457 if (add_cfi_insn != NULL)
459 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
460 NOTE_CFI (add_cfi_insn) = cfi;
463 if (add_cfi_vec != NULL)
464 vec_safe_push (*add_cfi_vec, cfi);
467 static void
468 add_cfi_args_size (HOST_WIDE_INT size)
470 dw_cfi_ref cfi = new_cfi ();
472 /* While we can occasionally have args_size < 0 internally, this state
473 should not persist at a point we actually need an opcode. */
474 gcc_assert (size >= 0);
476 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
477 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
479 add_cfi (cfi);
482 static void
483 add_cfi_restore (unsigned reg)
485 dw_cfi_ref cfi = new_cfi ();
487 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
488 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
490 add_cfi (cfi);
493 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
494 that the register column is no longer saved. */
496 static void
497 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
499 if (vec_safe_length (row->reg_save) <= column)
500 vec_safe_grow_cleared (row->reg_save, column + 1);
501 (*row->reg_save)[column] = cfi;
504 /* This function fills in aa dw_cfa_location structure from a dwarf location
505 descriptor sequence. */
507 static void
508 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
510 struct dw_loc_descr_node *ptr;
511 cfa->offset = 0;
512 cfa->base_offset = 0;
513 cfa->indirect = 0;
514 cfa->reg = -1;
516 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
518 enum dwarf_location_atom op = ptr->dw_loc_opc;
520 switch (op)
522 case DW_OP_reg0:
523 case DW_OP_reg1:
524 case DW_OP_reg2:
525 case DW_OP_reg3:
526 case DW_OP_reg4:
527 case DW_OP_reg5:
528 case DW_OP_reg6:
529 case DW_OP_reg7:
530 case DW_OP_reg8:
531 case DW_OP_reg9:
532 case DW_OP_reg10:
533 case DW_OP_reg11:
534 case DW_OP_reg12:
535 case DW_OP_reg13:
536 case DW_OP_reg14:
537 case DW_OP_reg15:
538 case DW_OP_reg16:
539 case DW_OP_reg17:
540 case DW_OP_reg18:
541 case DW_OP_reg19:
542 case DW_OP_reg20:
543 case DW_OP_reg21:
544 case DW_OP_reg22:
545 case DW_OP_reg23:
546 case DW_OP_reg24:
547 case DW_OP_reg25:
548 case DW_OP_reg26:
549 case DW_OP_reg27:
550 case DW_OP_reg28:
551 case DW_OP_reg29:
552 case DW_OP_reg30:
553 case DW_OP_reg31:
554 cfa->reg = op - DW_OP_reg0;
555 break;
556 case DW_OP_regx:
557 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
558 break;
559 case DW_OP_breg0:
560 case DW_OP_breg1:
561 case DW_OP_breg2:
562 case DW_OP_breg3:
563 case DW_OP_breg4:
564 case DW_OP_breg5:
565 case DW_OP_breg6:
566 case DW_OP_breg7:
567 case DW_OP_breg8:
568 case DW_OP_breg9:
569 case DW_OP_breg10:
570 case DW_OP_breg11:
571 case DW_OP_breg12:
572 case DW_OP_breg13:
573 case DW_OP_breg14:
574 case DW_OP_breg15:
575 case DW_OP_breg16:
576 case DW_OP_breg17:
577 case DW_OP_breg18:
578 case DW_OP_breg19:
579 case DW_OP_breg20:
580 case DW_OP_breg21:
581 case DW_OP_breg22:
582 case DW_OP_breg23:
583 case DW_OP_breg24:
584 case DW_OP_breg25:
585 case DW_OP_breg26:
586 case DW_OP_breg27:
587 case DW_OP_breg28:
588 case DW_OP_breg29:
589 case DW_OP_breg30:
590 case DW_OP_breg31:
591 cfa->reg = op - DW_OP_breg0;
592 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
593 break;
594 case DW_OP_bregx:
595 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
596 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
597 break;
598 case DW_OP_deref:
599 cfa->indirect = 1;
600 break;
601 case DW_OP_plus_uconst:
602 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
603 break;
604 default:
605 gcc_unreachable ();
610 /* Find the previous value for the CFA, iteratively. CFI is the opcode
611 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
612 one level of remember/restore state processing. */
614 void
615 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
617 switch (cfi->dw_cfi_opc)
619 case DW_CFA_def_cfa_offset:
620 case DW_CFA_def_cfa_offset_sf:
621 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
622 break;
623 case DW_CFA_def_cfa_register:
624 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
625 break;
626 case DW_CFA_def_cfa:
627 case DW_CFA_def_cfa_sf:
628 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
629 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
630 break;
631 case DW_CFA_def_cfa_expression:
632 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
633 break;
635 case DW_CFA_remember_state:
636 gcc_assert (!remember->in_use);
637 *remember = *loc;
638 remember->in_use = 1;
639 break;
640 case DW_CFA_restore_state:
641 gcc_assert (remember->in_use);
642 *loc = *remember;
643 remember->in_use = 0;
644 break;
646 default:
647 break;
651 /* Determine if two dw_cfa_location structures define the same data. */
653 bool
654 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
656 return (loc1->reg == loc2->reg
657 && loc1->offset == loc2->offset
658 && loc1->indirect == loc2->indirect
659 && (loc1->indirect == 0
660 || loc1->base_offset == loc2->base_offset));
663 /* Determine if two CFI operands are identical. */
665 static bool
666 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
668 switch (t)
670 case dw_cfi_oprnd_unused:
671 return true;
672 case dw_cfi_oprnd_reg_num:
673 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
674 case dw_cfi_oprnd_offset:
675 return a->dw_cfi_offset == b->dw_cfi_offset;
676 case dw_cfi_oprnd_addr:
677 return (a->dw_cfi_addr == b->dw_cfi_addr
678 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
679 case dw_cfi_oprnd_loc:
680 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
682 gcc_unreachable ();
685 /* Determine if two CFI entries are identical. */
687 static bool
688 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
690 enum dwarf_call_frame_info opc;
692 /* Make things easier for our callers, including missing operands. */
693 if (a == b)
694 return true;
695 if (a == NULL || b == NULL)
696 return false;
698 /* Obviously, the opcodes must match. */
699 opc = a->dw_cfi_opc;
700 if (opc != b->dw_cfi_opc)
701 return false;
703 /* Compare the two operands, re-using the type of the operands as
704 already exposed elsewhere. */
705 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
706 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
707 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
708 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
711 /* Determine if two CFI_ROW structures are identical. */
713 static bool
714 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
716 size_t i, n_a, n_b, n_max;
718 if (a->cfa_cfi)
720 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
721 return false;
723 else if (!cfa_equal_p (&a->cfa, &b->cfa))
724 return false;
726 n_a = vec_safe_length (a->reg_save);
727 n_b = vec_safe_length (b->reg_save);
728 n_max = MAX (n_a, n_b);
730 for (i = 0; i < n_max; ++i)
732 dw_cfi_ref r_a = NULL, r_b = NULL;
734 if (i < n_a)
735 r_a = (*a->reg_save)[i];
736 if (i < n_b)
737 r_b = (*b->reg_save)[i];
739 if (!cfi_equal_p (r_a, r_b))
740 return false;
743 return true;
746 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
747 what opcode to emit. Returns the CFI opcode to effect the change, or
748 NULL if NEW_CFA == OLD_CFA. */
750 static dw_cfi_ref
751 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
753 dw_cfi_ref cfi;
755 /* If nothing changed, no need to issue any call frame instructions. */
756 if (cfa_equal_p (old_cfa, new_cfa))
757 return NULL;
759 cfi = new_cfi ();
761 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
763 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
764 the CFA register did not change but the offset did. The data
765 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
766 in the assembler via the .cfi_def_cfa_offset directive. */
767 if (new_cfa->offset < 0)
768 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
769 else
770 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
771 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
773 else if (new_cfa->offset == old_cfa->offset
774 && old_cfa->reg != INVALID_REGNUM
775 && !new_cfa->indirect
776 && !old_cfa->indirect)
778 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
779 indicating the CFA register has changed to <register> but the
780 offset has not changed. */
781 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
782 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
784 else if (new_cfa->indirect == 0)
786 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
787 indicating the CFA register has changed to <register> with
788 the specified offset. The data factoring for DW_CFA_def_cfa_sf
789 happens in output_cfi, or in the assembler via the .cfi_def_cfa
790 directive. */
791 if (new_cfa->offset < 0)
792 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
793 else
794 cfi->dw_cfi_opc = DW_CFA_def_cfa;
795 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
796 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
798 else
800 /* Construct a DW_CFA_def_cfa_expression instruction to
801 calculate the CFA using a full location expression since no
802 register-offset pair is available. */
803 struct dw_loc_descr_node *loc_list;
805 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
806 loc_list = build_cfa_loc (new_cfa, 0);
807 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
810 return cfi;
813 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
815 static void
816 def_cfa_1 (dw_cfa_location *new_cfa)
818 dw_cfi_ref cfi;
820 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
821 cur_trace->cfa_store.offset = new_cfa->offset;
823 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
824 if (cfi)
826 cur_row->cfa = *new_cfa;
827 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
828 ? cfi : NULL);
830 add_cfi (cfi);
834 /* Add the CFI for saving a register. REG is the CFA column number.
835 If SREG is -1, the register is saved at OFFSET from the CFA;
836 otherwise it is saved in SREG. */
838 static void
839 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
841 dw_fde_ref fde = cfun ? cfun->fde : NULL;
842 dw_cfi_ref cfi = new_cfi ();
844 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
846 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
847 if (fde
848 && fde->stack_realign
849 && sreg == INVALID_REGNUM)
851 cfi->dw_cfi_opc = DW_CFA_expression;
852 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
853 cfi->dw_cfi_oprnd2.dw_cfi_loc
854 = build_cfa_aligned_loc (&cur_row->cfa, offset,
855 fde->stack_realignment);
857 else if (sreg == INVALID_REGNUM)
859 if (need_data_align_sf_opcode (offset))
860 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
861 else if (reg & ~0x3f)
862 cfi->dw_cfi_opc = DW_CFA_offset_extended;
863 else
864 cfi->dw_cfi_opc = DW_CFA_offset;
865 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
867 else if (sreg == reg)
869 /* While we could emit something like DW_CFA_same_value or
870 DW_CFA_restore, we never expect to see something like that
871 in a prologue. This is more likely to be a bug. A backend
872 can always bypass this by using REG_CFA_RESTORE directly. */
873 gcc_unreachable ();
875 else
877 cfi->dw_cfi_opc = DW_CFA_register;
878 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
881 add_cfi (cfi);
882 update_row_reg_save (cur_row, reg, cfi);
885 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
886 and adjust data structures to match. */
888 static void
889 notice_args_size (rtx_insn *insn)
891 HOST_WIDE_INT args_size, delta;
892 rtx note;
894 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
895 if (note == NULL)
896 return;
898 args_size = INTVAL (XEXP (note, 0));
899 delta = args_size - cur_trace->end_true_args_size;
900 if (delta == 0)
901 return;
903 cur_trace->end_true_args_size = args_size;
905 /* If the CFA is computed off the stack pointer, then we must adjust
906 the computation of the CFA as well. */
907 if (cur_cfa->reg == dw_stack_pointer_regnum)
909 gcc_assert (!cur_cfa->indirect);
911 /* Convert a change in args_size (always a positive in the
912 direction of stack growth) to a change in stack pointer. */
913 if (!STACK_GROWS_DOWNWARD)
914 delta = -delta;
916 cur_cfa->offset += delta;
920 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
921 data within the trace related to EH insns and args_size. */
923 static void
924 notice_eh_throw (rtx_insn *insn)
926 HOST_WIDE_INT args_size;
928 args_size = cur_trace->end_true_args_size;
929 if (cur_trace->eh_head == NULL)
931 cur_trace->eh_head = insn;
932 cur_trace->beg_delay_args_size = args_size;
933 cur_trace->end_delay_args_size = args_size;
935 else if (cur_trace->end_delay_args_size != args_size)
937 cur_trace->end_delay_args_size = args_size;
939 /* ??? If the CFA is the stack pointer, search backward for the last
940 CFI note and insert there. Given that the stack changed for the
941 args_size change, there *must* be such a note in between here and
942 the last eh insn. */
943 add_cfi_args_size (args_size);
947 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
948 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
949 used in places where rtl is prohibited. */
951 static inline unsigned
952 dwf_regno (const_rtx reg)
954 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
955 return DWARF_FRAME_REGNUM (REGNO (reg));
958 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
960 static bool
961 compare_reg_or_pc (rtx x, rtx y)
963 if (REG_P (x) && REG_P (y))
964 return REGNO (x) == REGNO (y);
965 return x == y;
968 /* Record SRC as being saved in DEST. DEST may be null to delete an
969 existing entry. SRC may be a register or PC_RTX. */
971 static void
972 record_reg_saved_in_reg (rtx dest, rtx src)
974 reg_saved_in_data *elt;
975 size_t i;
977 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
978 if (compare_reg_or_pc (elt->orig_reg, src))
980 if (dest == NULL)
981 cur_trace->regs_saved_in_regs.unordered_remove (i);
982 else
983 elt->saved_in_reg = dest;
984 return;
987 if (dest == NULL)
988 return;
990 reg_saved_in_data e = {src, dest};
991 cur_trace->regs_saved_in_regs.safe_push (e);
994 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
995 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
997 static void
998 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1000 queued_reg_save *q;
1001 queued_reg_save e = {reg, sreg, offset};
1002 size_t i;
1004 /* Duplicates waste space, but it's also necessary to remove them
1005 for correctness, since the queue gets output in reverse order. */
1006 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1007 if (compare_reg_or_pc (q->reg, reg))
1009 *q = e;
1010 return;
1013 queued_reg_saves.safe_push (e);
1016 /* Output all the entries in QUEUED_REG_SAVES. */
1018 static void
1019 dwarf2out_flush_queued_reg_saves (void)
1021 queued_reg_save *q;
1022 size_t i;
1024 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1026 unsigned int reg, sreg;
1028 record_reg_saved_in_reg (q->saved_reg, q->reg);
1030 if (q->reg == pc_rtx)
1031 reg = DWARF_FRAME_RETURN_COLUMN;
1032 else
1033 reg = dwf_regno (q->reg);
1034 if (q->saved_reg)
1035 sreg = dwf_regno (q->saved_reg);
1036 else
1037 sreg = INVALID_REGNUM;
1038 reg_save (reg, sreg, q->cfa_offset);
1041 queued_reg_saves.truncate (0);
1044 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1045 location for? Or, does it clobber a register which we've previously
1046 said that some other register is saved in, and for which we now
1047 have a new location for? */
1049 static bool
1050 clobbers_queued_reg_save (const_rtx insn)
1052 queued_reg_save *q;
1053 size_t iq;
1055 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1057 size_t ir;
1058 reg_saved_in_data *rir;
1060 if (modified_in_p (q->reg, insn))
1061 return true;
1063 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1064 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1065 && modified_in_p (rir->saved_in_reg, insn))
1066 return true;
1069 return false;
1072 /* What register, if any, is currently saved in REG? */
1074 static rtx
1075 reg_saved_in (rtx reg)
1077 unsigned int regn = REGNO (reg);
1078 queued_reg_save *q;
1079 reg_saved_in_data *rir;
1080 size_t i;
1082 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1083 if (q->saved_reg && regn == REGNO (q->saved_reg))
1084 return q->reg;
1086 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1087 if (regn == REGNO (rir->saved_in_reg))
1088 return rir->orig_reg;
1090 return NULL_RTX;
1093 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1095 static void
1096 dwarf2out_frame_debug_def_cfa (rtx pat)
1098 memset (cur_cfa, 0, sizeof (*cur_cfa));
1100 if (GET_CODE (pat) == PLUS)
1102 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1103 pat = XEXP (pat, 0);
1105 if (MEM_P (pat))
1107 cur_cfa->indirect = 1;
1108 pat = XEXP (pat, 0);
1109 if (GET_CODE (pat) == PLUS)
1111 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1112 pat = XEXP (pat, 0);
1115 /* ??? If this fails, we could be calling into the _loc functions to
1116 define a full expression. So far no port does that. */
1117 gcc_assert (REG_P (pat));
1118 cur_cfa->reg = dwf_regno (pat);
1121 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1123 static void
1124 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1126 rtx src, dest;
1128 gcc_assert (GET_CODE (pat) == SET);
1129 dest = XEXP (pat, 0);
1130 src = XEXP (pat, 1);
1132 switch (GET_CODE (src))
1134 case PLUS:
1135 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1136 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1137 break;
1139 case REG:
1140 break;
1142 default:
1143 gcc_unreachable ();
1146 cur_cfa->reg = dwf_regno (dest);
1147 gcc_assert (cur_cfa->indirect == 0);
1150 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1152 static void
1153 dwarf2out_frame_debug_cfa_offset (rtx set)
1155 HOST_WIDE_INT offset;
1156 rtx src, addr, span;
1157 unsigned int sregno;
1159 src = XEXP (set, 1);
1160 addr = XEXP (set, 0);
1161 gcc_assert (MEM_P (addr));
1162 addr = XEXP (addr, 0);
1164 /* As documented, only consider extremely simple addresses. */
1165 switch (GET_CODE (addr))
1167 case REG:
1168 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1169 offset = -cur_cfa->offset;
1170 break;
1171 case PLUS:
1172 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1173 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1174 break;
1175 default:
1176 gcc_unreachable ();
1179 if (src == pc_rtx)
1181 span = NULL;
1182 sregno = DWARF_FRAME_RETURN_COLUMN;
1184 else
1186 span = targetm.dwarf_register_span (src);
1187 sregno = dwf_regno (src);
1190 /* ??? We'd like to use queue_reg_save, but we need to come up with
1191 a different flushing heuristic for epilogues. */
1192 if (!span)
1193 reg_save (sregno, INVALID_REGNUM, offset);
1194 else
1196 /* We have a PARALLEL describing where the contents of SRC live.
1197 Adjust the offset for each piece of the PARALLEL. */
1198 HOST_WIDE_INT span_offset = offset;
1200 gcc_assert (GET_CODE (span) == PARALLEL);
1202 const int par_len = XVECLEN (span, 0);
1203 for (int par_index = 0; par_index < par_len; par_index++)
1205 rtx elem = XVECEXP (span, 0, par_index);
1206 sregno = dwf_regno (src);
1207 reg_save (sregno, INVALID_REGNUM, span_offset);
1208 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1213 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1215 static void
1216 dwarf2out_frame_debug_cfa_register (rtx set)
1218 rtx src, dest;
1219 unsigned sregno, dregno;
1221 src = XEXP (set, 1);
1222 dest = XEXP (set, 0);
1224 record_reg_saved_in_reg (dest, src);
1225 if (src == pc_rtx)
1226 sregno = DWARF_FRAME_RETURN_COLUMN;
1227 else
1228 sregno = dwf_regno (src);
1230 dregno = dwf_regno (dest);
1232 /* ??? We'd like to use queue_reg_save, but we need to come up with
1233 a different flushing heuristic for epilogues. */
1234 reg_save (sregno, dregno, 0);
1237 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1239 static void
1240 dwarf2out_frame_debug_cfa_expression (rtx set)
1242 rtx src, dest, span;
1243 dw_cfi_ref cfi = new_cfi ();
1244 unsigned regno;
1246 dest = SET_DEST (set);
1247 src = SET_SRC (set);
1249 gcc_assert (REG_P (src));
1250 gcc_assert (MEM_P (dest));
1252 span = targetm.dwarf_register_span (src);
1253 gcc_assert (!span);
1255 regno = dwf_regno (src);
1257 cfi->dw_cfi_opc = DW_CFA_expression;
1258 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1259 cfi->dw_cfi_oprnd2.dw_cfi_loc
1260 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1261 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1263 /* ??? We'd like to use queue_reg_save, were the interface different,
1264 and, as above, we could manage flushing for epilogues. */
1265 add_cfi (cfi);
1266 update_row_reg_save (cur_row, regno, cfi);
1269 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1270 note. */
1272 static void
1273 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1275 rtx dest = SET_DEST (set);
1276 gcc_assert (REG_P (dest));
1278 rtx span = targetm.dwarf_register_span (dest);
1279 gcc_assert (!span);
1281 rtx src = SET_SRC (set);
1282 dw_cfi_ref cfi = new_cfi ();
1283 cfi->dw_cfi_opc = DW_CFA_val_expression;
1284 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1285 cfi->dw_cfi_oprnd2.dw_cfi_loc
1286 = mem_loc_descriptor (src, GET_MODE (src),
1287 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1288 add_cfi (cfi);
1289 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1292 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1294 static void
1295 dwarf2out_frame_debug_cfa_restore (rtx reg)
1297 gcc_assert (REG_P (reg));
1299 rtx span = targetm.dwarf_register_span (reg);
1300 if (!span)
1302 unsigned int regno = dwf_regno (reg);
1303 add_cfi_restore (regno);
1304 update_row_reg_save (cur_row, regno, NULL);
1306 else
1308 /* We have a PARALLEL describing where the contents of REG live.
1309 Restore the register for each piece of the PARALLEL. */
1310 gcc_assert (GET_CODE (span) == PARALLEL);
1312 const int par_len = XVECLEN (span, 0);
1313 for (int par_index = 0; par_index < par_len; par_index++)
1315 reg = XVECEXP (span, 0, par_index);
1316 gcc_assert (REG_P (reg));
1317 unsigned int regno = dwf_regno (reg);
1318 add_cfi_restore (regno);
1319 update_row_reg_save (cur_row, regno, NULL);
1324 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1325 ??? Perhaps we should note in the CIE where windows are saved (instead of
1326 assuming 0(cfa)) and what registers are in the window. */
1328 static void
1329 dwarf2out_frame_debug_cfa_window_save (void)
1331 dw_cfi_ref cfi = new_cfi ();
1333 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1334 add_cfi (cfi);
1337 /* Record call frame debugging information for an expression EXPR,
1338 which either sets SP or FP (adjusting how we calculate the frame
1339 address) or saves a register to the stack or another register.
1340 LABEL indicates the address of EXPR.
1342 This function encodes a state machine mapping rtxes to actions on
1343 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1344 users need not read the source code.
1346 The High-Level Picture
1348 Changes in the register we use to calculate the CFA: Currently we
1349 assume that if you copy the CFA register into another register, we
1350 should take the other one as the new CFA register; this seems to
1351 work pretty well. If it's wrong for some target, it's simple
1352 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1354 Changes in the register we use for saving registers to the stack:
1355 This is usually SP, but not always. Again, we deduce that if you
1356 copy SP into another register (and SP is not the CFA register),
1357 then the new register is the one we will be using for register
1358 saves. This also seems to work.
1360 Register saves: There's not much guesswork about this one; if
1361 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1362 register save, and the register used to calculate the destination
1363 had better be the one we think we're using for this purpose.
1364 It's also assumed that a copy from a call-saved register to another
1365 register is saving that register if RTX_FRAME_RELATED_P is set on
1366 that instruction. If the copy is from a call-saved register to
1367 the *same* register, that means that the register is now the same
1368 value as in the caller.
1370 Except: If the register being saved is the CFA register, and the
1371 offset is nonzero, we are saving the CFA, so we assume we have to
1372 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1373 the intent is to save the value of SP from the previous frame.
1375 In addition, if a register has previously been saved to a different
1376 register,
1378 Invariants / Summaries of Rules
1380 cfa current rule for calculating the CFA. It usually
1381 consists of a register and an offset. This is
1382 actually stored in *cur_cfa, but abbreviated
1383 for the purposes of this documentation.
1384 cfa_store register used by prologue code to save things to the stack
1385 cfa_store.offset is the offset from the value of
1386 cfa_store.reg to the actual CFA
1387 cfa_temp register holding an integral value. cfa_temp.offset
1388 stores the value, which will be used to adjust the
1389 stack pointer. cfa_temp is also used like cfa_store,
1390 to track stores to the stack via fp or a temp reg.
1392 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1393 with cfa.reg as the first operand changes the cfa.reg and its
1394 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1395 cfa_temp.offset.
1397 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1398 expression yielding a constant. This sets cfa_temp.reg
1399 and cfa_temp.offset.
1401 Rule 5: Create a new register cfa_store used to save items to the
1402 stack.
1404 Rules 10-14: Save a register to the stack. Define offset as the
1405 difference of the original location and cfa_store's
1406 location (or cfa_temp's location if cfa_temp is used).
1408 Rules 16-20: If AND operation happens on sp in prologue, we assume
1409 stack is realigned. We will use a group of DW_OP_XXX
1410 expressions to represent the location of the stored
1411 register instead of CFA+offset.
1413 The Rules
1415 "{a,b}" indicates a choice of a xor b.
1416 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1418 Rule 1:
1419 (set <reg1> <reg2>:cfa.reg)
1420 effects: cfa.reg = <reg1>
1421 cfa.offset unchanged
1422 cfa_temp.reg = <reg1>
1423 cfa_temp.offset = cfa.offset
1425 Rule 2:
1426 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1427 {<const_int>,<reg>:cfa_temp.reg}))
1428 effects: cfa.reg = sp if fp used
1429 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1430 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1431 if cfa_store.reg==sp
1433 Rule 3:
1434 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1435 effects: cfa.reg = fp
1436 cfa_offset += +/- <const_int>
1438 Rule 4:
1439 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1440 constraints: <reg1> != fp
1441 <reg1> != sp
1442 effects: cfa.reg = <reg1>
1443 cfa_temp.reg = <reg1>
1444 cfa_temp.offset = cfa.offset
1446 Rule 5:
1447 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1448 constraints: <reg1> != fp
1449 <reg1> != sp
1450 effects: cfa_store.reg = <reg1>
1451 cfa_store.offset = cfa.offset - cfa_temp.offset
1453 Rule 6:
1454 (set <reg> <const_int>)
1455 effects: cfa_temp.reg = <reg>
1456 cfa_temp.offset = <const_int>
1458 Rule 7:
1459 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1460 effects: cfa_temp.reg = <reg1>
1461 cfa_temp.offset |= <const_int>
1463 Rule 8:
1464 (set <reg> (high <exp>))
1465 effects: none
1467 Rule 9:
1468 (set <reg> (lo_sum <exp> <const_int>))
1469 effects: cfa_temp.reg = <reg>
1470 cfa_temp.offset = <const_int>
1472 Rule 10:
1473 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1474 effects: cfa_store.offset -= <const_int>
1475 cfa.offset = cfa_store.offset if cfa.reg == sp
1476 cfa.reg = sp
1477 cfa.base_offset = -cfa_store.offset
1479 Rule 11:
1480 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1481 effects: cfa_store.offset += -/+ mode_size(mem)
1482 cfa.offset = cfa_store.offset if cfa.reg == sp
1483 cfa.reg = sp
1484 cfa.base_offset = -cfa_store.offset
1486 Rule 12:
1487 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1489 <reg2>)
1490 effects: cfa.reg = <reg1>
1491 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1493 Rule 13:
1494 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1495 effects: cfa.reg = <reg1>
1496 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1498 Rule 14:
1499 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1500 effects: cfa.reg = <reg1>
1501 cfa.base_offset = -cfa_temp.offset
1502 cfa_temp.offset -= mode_size(mem)
1504 Rule 15:
1505 (set <reg> {unspec, unspec_volatile})
1506 effects: target-dependent
1508 Rule 16:
1509 (set sp (and: sp <const_int>))
1510 constraints: cfa_store.reg == sp
1511 effects: cfun->fde.stack_realign = 1
1512 cfa_store.offset = 0
1513 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1515 Rule 17:
1516 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1517 effects: cfa_store.offset += -/+ mode_size(mem)
1519 Rule 18:
1520 (set (mem ({pre_inc, pre_dec} sp)) fp)
1521 constraints: fde->stack_realign == 1
1522 effects: cfa_store.offset = 0
1523 cfa.reg != HARD_FRAME_POINTER_REGNUM
1525 Rule 19:
1526 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1527 constraints: fde->stack_realign == 1
1528 && cfa.offset == 0
1529 && cfa.indirect == 0
1530 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1531 effects: Use DW_CFA_def_cfa_expression to define cfa
1532 cfa.reg == fde->drap_reg */
1534 static void
1535 dwarf2out_frame_debug_expr (rtx expr)
1537 rtx src, dest, span;
1538 HOST_WIDE_INT offset;
1539 dw_fde_ref fde;
1541 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1542 the PARALLEL independently. The first element is always processed if
1543 it is a SET. This is for backward compatibility. Other elements
1544 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1545 flag is set in them. */
1546 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1548 int par_index;
1549 int limit = XVECLEN (expr, 0);
1550 rtx elem;
1552 /* PARALLELs have strict read-modify-write semantics, so we
1553 ought to evaluate every rvalue before changing any lvalue.
1554 It's cumbersome to do that in general, but there's an
1555 easy approximation that is enough for all current users:
1556 handle register saves before register assignments. */
1557 if (GET_CODE (expr) == PARALLEL)
1558 for (par_index = 0; par_index < limit; par_index++)
1560 elem = XVECEXP (expr, 0, par_index);
1561 if (GET_CODE (elem) == SET
1562 && MEM_P (SET_DEST (elem))
1563 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1564 dwarf2out_frame_debug_expr (elem);
1567 for (par_index = 0; par_index < limit; par_index++)
1569 elem = XVECEXP (expr, 0, par_index);
1570 if (GET_CODE (elem) == SET
1571 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1572 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1573 dwarf2out_frame_debug_expr (elem);
1575 return;
1578 gcc_assert (GET_CODE (expr) == SET);
1580 src = SET_SRC (expr);
1581 dest = SET_DEST (expr);
1583 if (REG_P (src))
1585 rtx rsi = reg_saved_in (src);
1586 if (rsi)
1587 src = rsi;
1590 fde = cfun->fde;
1592 switch (GET_CODE (dest))
1594 case REG:
1595 switch (GET_CODE (src))
1597 /* Setting FP from SP. */
1598 case REG:
1599 if (cur_cfa->reg == dwf_regno (src))
1601 /* Rule 1 */
1602 /* Update the CFA rule wrt SP or FP. Make sure src is
1603 relative to the current CFA register.
1605 We used to require that dest be either SP or FP, but the
1606 ARM copies SP to a temporary register, and from there to
1607 FP. So we just rely on the backends to only set
1608 RTX_FRAME_RELATED_P on appropriate insns. */
1609 cur_cfa->reg = dwf_regno (dest);
1610 cur_trace->cfa_temp.reg = cur_cfa->reg;
1611 cur_trace->cfa_temp.offset = cur_cfa->offset;
1613 else
1615 /* Saving a register in a register. */
1616 gcc_assert (!fixed_regs [REGNO (dest)]
1617 /* For the SPARC and its register window. */
1618 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1620 /* After stack is aligned, we can only save SP in FP
1621 if drap register is used. In this case, we have
1622 to restore stack pointer with the CFA value and we
1623 don't generate this DWARF information. */
1624 if (fde
1625 && fde->stack_realign
1626 && REGNO (src) == STACK_POINTER_REGNUM)
1627 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1628 && fde->drap_reg != INVALID_REGNUM
1629 && cur_cfa->reg != dwf_regno (src));
1630 else
1631 queue_reg_save (src, dest, 0);
1633 break;
1635 case PLUS:
1636 case MINUS:
1637 case LO_SUM:
1638 if (dest == stack_pointer_rtx)
1640 /* Rule 2 */
1641 /* Adjusting SP. */
1642 switch (GET_CODE (XEXP (src, 1)))
1644 case CONST_INT:
1645 offset = INTVAL (XEXP (src, 1));
1646 break;
1647 case REG:
1648 gcc_assert (dwf_regno (XEXP (src, 1))
1649 == cur_trace->cfa_temp.reg);
1650 offset = cur_trace->cfa_temp.offset;
1651 break;
1652 default:
1653 gcc_unreachable ();
1656 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1658 /* Restoring SP from FP in the epilogue. */
1659 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1660 cur_cfa->reg = dw_stack_pointer_regnum;
1662 else if (GET_CODE (src) == LO_SUM)
1663 /* Assume we've set the source reg of the LO_SUM from sp. */
1665 else
1666 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1668 if (GET_CODE (src) != MINUS)
1669 offset = -offset;
1670 if (cur_cfa->reg == dw_stack_pointer_regnum)
1671 cur_cfa->offset += offset;
1672 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1673 cur_trace->cfa_store.offset += offset;
1675 else if (dest == hard_frame_pointer_rtx)
1677 /* Rule 3 */
1678 /* Either setting the FP from an offset of the SP,
1679 or adjusting the FP */
1680 gcc_assert (frame_pointer_needed);
1682 gcc_assert (REG_P (XEXP (src, 0))
1683 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1684 && CONST_INT_P (XEXP (src, 1)));
1685 offset = INTVAL (XEXP (src, 1));
1686 if (GET_CODE (src) != MINUS)
1687 offset = -offset;
1688 cur_cfa->offset += offset;
1689 cur_cfa->reg = dw_frame_pointer_regnum;
1691 else
1693 gcc_assert (GET_CODE (src) != MINUS);
1695 /* Rule 4 */
1696 if (REG_P (XEXP (src, 0))
1697 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1698 && CONST_INT_P (XEXP (src, 1)))
1700 /* Setting a temporary CFA register that will be copied
1701 into the FP later on. */
1702 offset = - INTVAL (XEXP (src, 1));
1703 cur_cfa->offset += offset;
1704 cur_cfa->reg = dwf_regno (dest);
1705 /* Or used to save regs to the stack. */
1706 cur_trace->cfa_temp.reg = cur_cfa->reg;
1707 cur_trace->cfa_temp.offset = cur_cfa->offset;
1710 /* Rule 5 */
1711 else if (REG_P (XEXP (src, 0))
1712 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1713 && XEXP (src, 1) == stack_pointer_rtx)
1715 /* Setting a scratch register that we will use instead
1716 of SP for saving registers to the stack. */
1717 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1718 cur_trace->cfa_store.reg = dwf_regno (dest);
1719 cur_trace->cfa_store.offset
1720 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1723 /* Rule 9 */
1724 else if (GET_CODE (src) == LO_SUM
1725 && CONST_INT_P (XEXP (src, 1)))
1727 cur_trace->cfa_temp.reg = dwf_regno (dest);
1728 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1730 else
1731 gcc_unreachable ();
1733 break;
1735 /* Rule 6 */
1736 case CONST_INT:
1737 cur_trace->cfa_temp.reg = dwf_regno (dest);
1738 cur_trace->cfa_temp.offset = INTVAL (src);
1739 break;
1741 /* Rule 7 */
1742 case IOR:
1743 gcc_assert (REG_P (XEXP (src, 0))
1744 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1745 && CONST_INT_P (XEXP (src, 1)));
1747 cur_trace->cfa_temp.reg = dwf_regno (dest);
1748 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1749 break;
1751 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1752 which will fill in all of the bits. */
1753 /* Rule 8 */
1754 case HIGH:
1755 break;
1757 /* Rule 15 */
1758 case UNSPEC:
1759 case UNSPEC_VOLATILE:
1760 /* All unspecs should be represented by REG_CFA_* notes. */
1761 gcc_unreachable ();
1762 return;
1764 /* Rule 16 */
1765 case AND:
1766 /* If this AND operation happens on stack pointer in prologue,
1767 we assume the stack is realigned and we extract the
1768 alignment. */
1769 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1771 /* We interpret reg_save differently with stack_realign set.
1772 Thus we must flush whatever we have queued first. */
1773 dwarf2out_flush_queued_reg_saves ();
1775 gcc_assert (cur_trace->cfa_store.reg
1776 == dwf_regno (XEXP (src, 0)));
1777 fde->stack_realign = 1;
1778 fde->stack_realignment = INTVAL (XEXP (src, 1));
1779 cur_trace->cfa_store.offset = 0;
1781 if (cur_cfa->reg != dw_stack_pointer_regnum
1782 && cur_cfa->reg != dw_frame_pointer_regnum)
1783 fde->drap_reg = cur_cfa->reg;
1785 return;
1787 default:
1788 gcc_unreachable ();
1790 break;
1792 case MEM:
1794 /* Saving a register to the stack. Make sure dest is relative to the
1795 CFA register. */
1796 switch (GET_CODE (XEXP (dest, 0)))
1798 /* Rule 10 */
1799 /* With a push. */
1800 case PRE_MODIFY:
1801 case POST_MODIFY:
1802 /* We can't handle variable size modifications. */
1803 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1804 == CONST_INT);
1805 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1807 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1808 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1810 cur_trace->cfa_store.offset += offset;
1811 if (cur_cfa->reg == dw_stack_pointer_regnum)
1812 cur_cfa->offset = cur_trace->cfa_store.offset;
1814 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1815 offset -= cur_trace->cfa_store.offset;
1816 else
1817 offset = -cur_trace->cfa_store.offset;
1818 break;
1820 /* Rule 11 */
1821 case PRE_INC:
1822 case PRE_DEC:
1823 case POST_DEC:
1824 offset = GET_MODE_SIZE (GET_MODE (dest));
1825 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1826 offset = -offset;
1828 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1829 == STACK_POINTER_REGNUM)
1830 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1832 cur_trace->cfa_store.offset += offset;
1834 /* Rule 18: If stack is aligned, we will use FP as a
1835 reference to represent the address of the stored
1836 regiser. */
1837 if (fde
1838 && fde->stack_realign
1839 && REG_P (src)
1840 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1842 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1843 cur_trace->cfa_store.offset = 0;
1846 if (cur_cfa->reg == dw_stack_pointer_regnum)
1847 cur_cfa->offset = cur_trace->cfa_store.offset;
1849 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1850 offset += -cur_trace->cfa_store.offset;
1851 else
1852 offset = -cur_trace->cfa_store.offset;
1853 break;
1855 /* Rule 12 */
1856 /* With an offset. */
1857 case PLUS:
1858 case MINUS:
1859 case LO_SUM:
1861 unsigned int regno;
1863 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1864 && REG_P (XEXP (XEXP (dest, 0), 0)));
1865 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1866 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1867 offset = -offset;
1869 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1871 if (cur_cfa->reg == regno)
1872 offset -= cur_cfa->offset;
1873 else if (cur_trace->cfa_store.reg == regno)
1874 offset -= cur_trace->cfa_store.offset;
1875 else
1877 gcc_assert (cur_trace->cfa_temp.reg == regno);
1878 offset -= cur_trace->cfa_temp.offset;
1881 break;
1883 /* Rule 13 */
1884 /* Without an offset. */
1885 case REG:
1887 unsigned int regno = dwf_regno (XEXP (dest, 0));
1889 if (cur_cfa->reg == regno)
1890 offset = -cur_cfa->offset;
1891 else if (cur_trace->cfa_store.reg == regno)
1892 offset = -cur_trace->cfa_store.offset;
1893 else
1895 gcc_assert (cur_trace->cfa_temp.reg == regno);
1896 offset = -cur_trace->cfa_temp.offset;
1899 break;
1901 /* Rule 14 */
1902 case POST_INC:
1903 gcc_assert (cur_trace->cfa_temp.reg
1904 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1905 offset = -cur_trace->cfa_temp.offset;
1906 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1907 break;
1909 default:
1910 gcc_unreachable ();
1913 /* Rule 17 */
1914 /* If the source operand of this MEM operation is a memory,
1915 we only care how much stack grew. */
1916 if (MEM_P (src))
1917 break;
1919 if (REG_P (src)
1920 && REGNO (src) != STACK_POINTER_REGNUM
1921 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1922 && dwf_regno (src) == cur_cfa->reg)
1924 /* We're storing the current CFA reg into the stack. */
1926 if (cur_cfa->offset == 0)
1928 /* Rule 19 */
1929 /* If stack is aligned, putting CFA reg into stack means
1930 we can no longer use reg + offset to represent CFA.
1931 Here we use DW_CFA_def_cfa_expression instead. The
1932 result of this expression equals to the original CFA
1933 value. */
1934 if (fde
1935 && fde->stack_realign
1936 && cur_cfa->indirect == 0
1937 && cur_cfa->reg != dw_frame_pointer_regnum)
1939 gcc_assert (fde->drap_reg == cur_cfa->reg);
1941 cur_cfa->indirect = 1;
1942 cur_cfa->reg = dw_frame_pointer_regnum;
1943 cur_cfa->base_offset = offset;
1944 cur_cfa->offset = 0;
1946 fde->drap_reg_saved = 1;
1947 break;
1950 /* If the source register is exactly the CFA, assume
1951 we're saving SP like any other register; this happens
1952 on the ARM. */
1953 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1954 break;
1956 else
1958 /* Otherwise, we'll need to look in the stack to
1959 calculate the CFA. */
1960 rtx x = XEXP (dest, 0);
1962 if (!REG_P (x))
1963 x = XEXP (x, 0);
1964 gcc_assert (REG_P (x));
1966 cur_cfa->reg = dwf_regno (x);
1967 cur_cfa->base_offset = offset;
1968 cur_cfa->indirect = 1;
1969 break;
1973 if (REG_P (src))
1974 span = targetm.dwarf_register_span (src);
1975 else
1976 span = NULL;
1978 if (!span)
1979 queue_reg_save (src, NULL_RTX, offset);
1980 else
1982 /* We have a PARALLEL describing where the contents of SRC live.
1983 Queue register saves for each piece of the PARALLEL. */
1984 HOST_WIDE_INT span_offset = offset;
1986 gcc_assert (GET_CODE (span) == PARALLEL);
1988 const int par_len = XVECLEN (span, 0);
1989 for (int par_index = 0; par_index < par_len; par_index++)
1991 rtx elem = XVECEXP (span, 0, par_index);
1992 queue_reg_save (elem, NULL_RTX, span_offset);
1993 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1996 break;
1998 default:
1999 gcc_unreachable ();
2003 /* Record call frame debugging information for INSN, which either sets
2004 SP or FP (adjusting how we calculate the frame address) or saves a
2005 register to the stack. */
2007 static void
2008 dwarf2out_frame_debug (rtx_insn *insn)
2010 rtx note, n, pat;
2011 bool handled_one = false;
2013 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2014 switch (REG_NOTE_KIND (note))
2016 case REG_FRAME_RELATED_EXPR:
2017 pat = XEXP (note, 0);
2018 goto do_frame_expr;
2020 case REG_CFA_DEF_CFA:
2021 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2022 handled_one = true;
2023 break;
2025 case REG_CFA_ADJUST_CFA:
2026 n = XEXP (note, 0);
2027 if (n == NULL)
2029 n = PATTERN (insn);
2030 if (GET_CODE (n) == PARALLEL)
2031 n = XVECEXP (n, 0, 0);
2033 dwarf2out_frame_debug_adjust_cfa (n);
2034 handled_one = true;
2035 break;
2037 case REG_CFA_OFFSET:
2038 n = XEXP (note, 0);
2039 if (n == NULL)
2040 n = single_set (insn);
2041 dwarf2out_frame_debug_cfa_offset (n);
2042 handled_one = true;
2043 break;
2045 case REG_CFA_REGISTER:
2046 n = XEXP (note, 0);
2047 if (n == NULL)
2049 n = PATTERN (insn);
2050 if (GET_CODE (n) == PARALLEL)
2051 n = XVECEXP (n, 0, 0);
2053 dwarf2out_frame_debug_cfa_register (n);
2054 handled_one = true;
2055 break;
2057 case REG_CFA_EXPRESSION:
2058 case REG_CFA_VAL_EXPRESSION:
2059 n = XEXP (note, 0);
2060 if (n == NULL)
2061 n = single_set (insn);
2063 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2064 dwarf2out_frame_debug_cfa_expression (n);
2065 else
2066 dwarf2out_frame_debug_cfa_val_expression (n);
2068 handled_one = true;
2069 break;
2071 case REG_CFA_RESTORE:
2072 n = XEXP (note, 0);
2073 if (n == NULL)
2075 n = PATTERN (insn);
2076 if (GET_CODE (n) == PARALLEL)
2077 n = XVECEXP (n, 0, 0);
2078 n = XEXP (n, 0);
2080 dwarf2out_frame_debug_cfa_restore (n);
2081 handled_one = true;
2082 break;
2084 case REG_CFA_SET_VDRAP:
2085 n = XEXP (note, 0);
2086 if (REG_P (n))
2088 dw_fde_ref fde = cfun->fde;
2089 if (fde)
2091 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2092 if (REG_P (n))
2093 fde->vdrap_reg = dwf_regno (n);
2096 handled_one = true;
2097 break;
2099 case REG_CFA_TOGGLE_RA_MANGLE:
2100 case REG_CFA_WINDOW_SAVE:
2101 /* We overload both of these operations onto the same DWARF opcode. */
2102 dwarf2out_frame_debug_cfa_window_save ();
2103 handled_one = true;
2104 break;
2106 case REG_CFA_FLUSH_QUEUE:
2107 /* The actual flush happens elsewhere. */
2108 handled_one = true;
2109 break;
2111 default:
2112 break;
2115 if (!handled_one)
2117 pat = PATTERN (insn);
2118 do_frame_expr:
2119 dwarf2out_frame_debug_expr (pat);
2121 /* Check again. A parallel can save and update the same register.
2122 We could probably check just once, here, but this is safer than
2123 removing the check at the start of the function. */
2124 if (clobbers_queued_reg_save (pat))
2125 dwarf2out_flush_queued_reg_saves ();
2129 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2131 static void
2132 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2134 size_t i, n_old, n_new, n_max;
2135 dw_cfi_ref cfi;
2137 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2138 add_cfi (new_row->cfa_cfi);
2139 else
2141 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2142 if (cfi)
2143 add_cfi (cfi);
2146 n_old = vec_safe_length (old_row->reg_save);
2147 n_new = vec_safe_length (new_row->reg_save);
2148 n_max = MAX (n_old, n_new);
2150 for (i = 0; i < n_max; ++i)
2152 dw_cfi_ref r_old = NULL, r_new = NULL;
2154 if (i < n_old)
2155 r_old = (*old_row->reg_save)[i];
2156 if (i < n_new)
2157 r_new = (*new_row->reg_save)[i];
2159 if (r_old == r_new)
2161 else if (r_new == NULL)
2162 add_cfi_restore (i);
2163 else if (!cfi_equal_p (r_old, r_new))
2164 add_cfi (r_new);
2168 /* Examine CFI and return true if a cfi label and set_loc is needed
2169 beforehand. Even when generating CFI assembler instructions, we
2170 still have to add the cfi to the list so that lookup_cfa_1 works
2171 later on. When -g2 and above we even need to force emitting of
2172 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2173 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2174 and so don't use convert_cfa_to_fb_loc_list. */
2176 static bool
2177 cfi_label_required_p (dw_cfi_ref cfi)
2179 if (!dwarf2out_do_cfi_asm ())
2180 return true;
2182 if (dwarf_version == 2
2183 && debug_info_level > DINFO_LEVEL_TERSE
2184 && (write_symbols == DWARF2_DEBUG
2185 || write_symbols == VMS_AND_DWARF2_DEBUG))
2187 switch (cfi->dw_cfi_opc)
2189 case DW_CFA_def_cfa_offset:
2190 case DW_CFA_def_cfa_offset_sf:
2191 case DW_CFA_def_cfa_register:
2192 case DW_CFA_def_cfa:
2193 case DW_CFA_def_cfa_sf:
2194 case DW_CFA_def_cfa_expression:
2195 case DW_CFA_restore_state:
2196 return true;
2197 default:
2198 return false;
2201 return false;
2204 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2205 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2206 necessary. */
2207 static void
2208 add_cfis_to_fde (void)
2210 dw_fde_ref fde = cfun->fde;
2211 rtx_insn *insn, *next;
2213 for (insn = get_insns (); insn; insn = next)
2215 next = NEXT_INSN (insn);
2217 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2218 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2220 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2222 bool required = cfi_label_required_p (NOTE_CFI (insn));
2223 while (next)
2224 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2226 required |= cfi_label_required_p (NOTE_CFI (next));
2227 next = NEXT_INSN (next);
2229 else if (active_insn_p (next)
2230 || (NOTE_P (next) && (NOTE_KIND (next)
2231 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2232 break;
2233 else
2234 next = NEXT_INSN (next);
2235 if (required)
2237 int num = dwarf2out_cfi_label_num;
2238 const char *label = dwarf2out_cfi_label ();
2239 dw_cfi_ref xcfi;
2241 /* Set the location counter to the new label. */
2242 xcfi = new_cfi ();
2243 xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
2244 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2245 vec_safe_push (fde->dw_fde_cfi, xcfi);
2247 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2248 NOTE_LABEL_NUMBER (tmp) = num;
2253 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2254 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2255 insn = NEXT_INSN (insn);
2257 while (insn != next);
2262 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2264 /* If LABEL is the start of a trace, then initialize the state of that
2265 trace from CUR_TRACE and CUR_ROW. */
2267 static void
2268 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2270 dw_trace_info *ti;
2271 HOST_WIDE_INT args_size;
2273 ti = get_trace_info (start);
2274 gcc_assert (ti != NULL);
2276 if (dump_file)
2278 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2279 cur_trace->id, ti->id,
2280 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2281 (origin ? INSN_UID (origin) : 0));
2284 args_size = cur_trace->end_true_args_size;
2285 if (ti->beg_row == NULL)
2287 /* This is the first time we've encountered this trace. Propagate
2288 state across the edge and push the trace onto the work list. */
2289 ti->beg_row = copy_cfi_row (cur_row);
2290 ti->beg_true_args_size = args_size;
2292 ti->cfa_store = cur_trace->cfa_store;
2293 ti->cfa_temp = cur_trace->cfa_temp;
2294 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2296 trace_work_list.safe_push (ti);
2298 if (dump_file)
2299 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2301 else
2304 /* We ought to have the same state incoming to a given trace no
2305 matter how we arrive at the trace. Anything else means we've
2306 got some kind of optimization error. */
2307 #if CHECKING_P
2308 if (!cfi_row_equal_p (cur_row, ti->beg_row))
2310 if (dump_file)
2312 fprintf (dump_file, "Inconsistent CFI state!\n");
2313 fprintf (dump_file, "SHOULD have:\n");
2314 dump_cfi_row (dump_file, ti->beg_row);
2315 fprintf (dump_file, "DO have:\n");
2316 dump_cfi_row (dump_file, cur_row);
2319 gcc_unreachable ();
2321 #endif
2323 /* The args_size is allowed to conflict if it isn't actually used. */
2324 if (ti->beg_true_args_size != args_size)
2325 ti->args_size_undefined = true;
2329 /* Similarly, but handle the args_size and CFA reset across EH
2330 and non-local goto edges. */
2332 static void
2333 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2335 HOST_WIDE_INT save_args_size, delta;
2336 dw_cfa_location save_cfa;
2338 save_args_size = cur_trace->end_true_args_size;
2339 if (save_args_size == 0)
2341 maybe_record_trace_start (start, origin);
2342 return;
2345 delta = -save_args_size;
2346 cur_trace->end_true_args_size = 0;
2348 save_cfa = cur_row->cfa;
2349 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2351 /* Convert a change in args_size (always a positive in the
2352 direction of stack growth) to a change in stack pointer. */
2353 if (!STACK_GROWS_DOWNWARD)
2354 delta = -delta;
2356 cur_row->cfa.offset += delta;
2359 maybe_record_trace_start (start, origin);
2361 cur_trace->end_true_args_size = save_args_size;
2362 cur_row->cfa = save_cfa;
2365 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2366 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2368 static void
2369 create_trace_edges (rtx_insn *insn)
2371 rtx tmp;
2372 int i, n;
2374 if (JUMP_P (insn))
2376 rtx_jump_table_data *table;
2378 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2379 return;
2381 if (tablejump_p (insn, NULL, &table))
2383 rtvec vec = table->get_labels ();
2385 n = GET_NUM_ELEM (vec);
2386 for (i = 0; i < n; ++i)
2388 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2389 maybe_record_trace_start (lab, insn);
2392 else if (computed_jump_p (insn))
2394 rtx_insn *temp;
2395 unsigned int i;
2396 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2397 maybe_record_trace_start (temp, insn);
2399 else if (returnjump_p (insn))
2401 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2403 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2404 for (i = 0; i < n; ++i)
2406 rtx_insn *lab =
2407 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2408 maybe_record_trace_start (lab, insn);
2411 else
2413 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2414 gcc_assert (lab != NULL);
2415 maybe_record_trace_start (lab, insn);
2418 else if (CALL_P (insn))
2420 /* Sibling calls don't have edges inside this function. */
2421 if (SIBLING_CALL_P (insn))
2422 return;
2424 /* Process non-local goto edges. */
2425 if (can_nonlocal_goto (insn))
2426 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2427 lab;
2428 lab = lab->next ())
2429 maybe_record_trace_start_abnormal (lab->insn (), insn);
2431 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2433 int i, n = seq->len ();
2434 for (i = 0; i < n; ++i)
2435 create_trace_edges (seq->insn (i));
2436 return;
2439 /* Process EH edges. */
2440 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2442 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2443 if (lp)
2444 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2448 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2450 static void
2451 scan_insn_after (rtx_insn *insn)
2453 if (RTX_FRAME_RELATED_P (insn))
2454 dwarf2out_frame_debug (insn);
2455 notice_args_size (insn);
2458 /* Scan the trace beginning at INSN and create the CFI notes for the
2459 instructions therein. */
2461 static void
2462 scan_trace (dw_trace_info *trace)
2464 rtx_insn *prev, *insn = trace->head;
2465 dw_cfa_location this_cfa;
2467 if (dump_file)
2468 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2469 trace->id, rtx_name[(int) GET_CODE (insn)],
2470 INSN_UID (insn));
2472 trace->end_row = copy_cfi_row (trace->beg_row);
2473 trace->end_true_args_size = trace->beg_true_args_size;
2475 cur_trace = trace;
2476 cur_row = trace->end_row;
2478 this_cfa = cur_row->cfa;
2479 cur_cfa = &this_cfa;
2481 for (prev = insn, insn = NEXT_INSN (insn);
2482 insn;
2483 prev = insn, insn = NEXT_INSN (insn))
2485 rtx_insn *control;
2487 /* Do everything that happens "before" the insn. */
2488 add_cfi_insn = prev;
2490 /* Notice the end of a trace. */
2491 if (BARRIER_P (insn))
2493 /* Don't bother saving the unneeded queued registers at all. */
2494 queued_reg_saves.truncate (0);
2495 break;
2497 if (save_point_p (insn))
2499 /* Propagate across fallthru edges. */
2500 dwarf2out_flush_queued_reg_saves ();
2501 maybe_record_trace_start (insn, NULL);
2502 break;
2505 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2506 continue;
2508 /* Handle all changes to the row state. Sequences require special
2509 handling for the positioning of the notes. */
2510 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2512 rtx_insn *elt;
2513 int i, n = pat->len ();
2515 control = pat->insn (0);
2516 if (can_throw_internal (control))
2517 notice_eh_throw (control);
2518 dwarf2out_flush_queued_reg_saves ();
2520 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2522 /* ??? Hopefully multiple delay slots are not annulled. */
2523 gcc_assert (n == 2);
2524 gcc_assert (!RTX_FRAME_RELATED_P (control));
2525 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2527 elt = pat->insn (1);
2529 if (INSN_FROM_TARGET_P (elt))
2531 HOST_WIDE_INT restore_args_size;
2532 cfi_vec save_row_reg_save;
2534 /* If ELT is an instruction from target of an annulled
2535 branch, the effects are for the target only and so
2536 the args_size and CFA along the current path
2537 shouldn't change. */
2538 add_cfi_insn = NULL;
2539 restore_args_size = cur_trace->end_true_args_size;
2540 cur_cfa = &cur_row->cfa;
2541 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2543 scan_insn_after (elt);
2545 /* ??? Should we instead save the entire row state? */
2546 gcc_assert (!queued_reg_saves.length ());
2548 create_trace_edges (control);
2550 cur_trace->end_true_args_size = restore_args_size;
2551 cur_row->cfa = this_cfa;
2552 cur_row->reg_save = save_row_reg_save;
2553 cur_cfa = &this_cfa;
2555 else
2557 /* If ELT is a annulled branch-taken instruction (i.e.
2558 executed only when branch is not taken), the args_size
2559 and CFA should not change through the jump. */
2560 create_trace_edges (control);
2562 /* Update and continue with the trace. */
2563 add_cfi_insn = insn;
2564 scan_insn_after (elt);
2565 def_cfa_1 (&this_cfa);
2567 continue;
2570 /* The insns in the delay slot should all be considered to happen
2571 "before" a call insn. Consider a call with a stack pointer
2572 adjustment in the delay slot. The backtrace from the callee
2573 should include the sp adjustment. Unfortunately, that leaves
2574 us with an unavoidable unwinding error exactly at the call insn
2575 itself. For jump insns we'd prefer to avoid this error by
2576 placing the notes after the sequence. */
2577 if (JUMP_P (control))
2578 add_cfi_insn = insn;
2580 for (i = 1; i < n; ++i)
2582 elt = pat->insn (i);
2583 scan_insn_after (elt);
2586 /* Make sure any register saves are visible at the jump target. */
2587 dwarf2out_flush_queued_reg_saves ();
2588 any_cfis_emitted = false;
2590 /* However, if there is some adjustment on the call itself, e.g.
2591 a call_pop, that action should be considered to happen after
2592 the call returns. */
2593 add_cfi_insn = insn;
2594 scan_insn_after (control);
2596 else
2598 /* Flush data before calls and jumps, and of course if necessary. */
2599 if (can_throw_internal (insn))
2601 notice_eh_throw (insn);
2602 dwarf2out_flush_queued_reg_saves ();
2604 else if (!NONJUMP_INSN_P (insn)
2605 || clobbers_queued_reg_save (insn)
2606 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2607 dwarf2out_flush_queued_reg_saves ();
2608 any_cfis_emitted = false;
2610 add_cfi_insn = insn;
2611 scan_insn_after (insn);
2612 control = insn;
2615 /* Between frame-related-p and args_size we might have otherwise
2616 emitted two cfa adjustments. Do it now. */
2617 def_cfa_1 (&this_cfa);
2619 /* Minimize the number of advances by emitting the entire queue
2620 once anything is emitted. */
2621 if (any_cfis_emitted
2622 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2623 dwarf2out_flush_queued_reg_saves ();
2625 /* Note that a test for control_flow_insn_p does exactly the
2626 same tests as are done to actually create the edges. So
2627 always call the routine and let it not create edges for
2628 non-control-flow insns. */
2629 create_trace_edges (control);
2632 add_cfi_insn = NULL;
2633 cur_row = NULL;
2634 cur_trace = NULL;
2635 cur_cfa = NULL;
2638 /* Scan the function and create the initial set of CFI notes. */
2640 static void
2641 create_cfi_notes (void)
2643 dw_trace_info *ti;
2645 gcc_checking_assert (!queued_reg_saves.exists ());
2646 gcc_checking_assert (!trace_work_list.exists ());
2648 /* Always begin at the entry trace. */
2649 ti = &trace_info[0];
2650 scan_trace (ti);
2652 while (!trace_work_list.is_empty ())
2654 ti = trace_work_list.pop ();
2655 scan_trace (ti);
2658 queued_reg_saves.release ();
2659 trace_work_list.release ();
2662 /* Return the insn before the first NOTE_INSN_CFI after START. */
2664 static rtx_insn *
2665 before_next_cfi_note (rtx_insn *start)
2667 rtx_insn *prev = start;
2668 while (start)
2670 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2671 return prev;
2672 prev = start;
2673 start = NEXT_INSN (start);
2675 gcc_unreachable ();
2678 /* Insert CFI notes between traces to properly change state between them. */
2680 static void
2681 connect_traces (void)
2683 unsigned i, n = trace_info.length ();
2684 dw_trace_info *prev_ti, *ti;
2686 /* ??? Ideally, we should have both queued and processed every trace.
2687 However the current representation of constant pools on various targets
2688 is indistinguishable from unreachable code. Assume for the moment that
2689 we can simply skip over such traces. */
2690 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2691 these are not "real" instructions, and should not be considered.
2692 This could be generically useful for tablejump data as well. */
2693 /* Remove all unprocessed traces from the list. */
2694 for (i = n - 1; i > 0; --i)
2696 ti = &trace_info[i];
2697 if (ti->beg_row == NULL)
2699 trace_info.ordered_remove (i);
2700 n -= 1;
2702 else
2703 gcc_assert (ti->end_row != NULL);
2706 /* Work from the end back to the beginning. This lets us easily insert
2707 remember/restore_state notes in the correct order wrt other notes. */
2708 prev_ti = &trace_info[n - 1];
2709 for (i = n - 1; i > 0; --i)
2711 dw_cfi_row *old_row;
2713 ti = prev_ti;
2714 prev_ti = &trace_info[i - 1];
2716 add_cfi_insn = ti->head;
2718 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2719 for the portion of the function in the alternate text
2720 section. The row state at the very beginning of that
2721 new FDE will be exactly the row state from the CIE. */
2722 if (ti->switch_sections)
2723 old_row = cie_cfi_row;
2724 else
2726 old_row = prev_ti->end_row;
2727 /* If there's no change from the previous end state, fine. */
2728 if (cfi_row_equal_p (old_row, ti->beg_row))
2730 /* Otherwise check for the common case of sharing state with
2731 the beginning of an epilogue, but not the end. Insert
2732 remember/restore opcodes in that case. */
2733 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2735 dw_cfi_ref cfi;
2737 /* Note that if we blindly insert the remember at the
2738 start of the trace, we can wind up increasing the
2739 size of the unwind info due to extra advance opcodes.
2740 Instead, put the remember immediately before the next
2741 state change. We know there must be one, because the
2742 state at the beginning and head of the trace differ. */
2743 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2744 cfi = new_cfi ();
2745 cfi->dw_cfi_opc = DW_CFA_remember_state;
2746 add_cfi (cfi);
2748 add_cfi_insn = ti->head;
2749 cfi = new_cfi ();
2750 cfi->dw_cfi_opc = DW_CFA_restore_state;
2751 add_cfi (cfi);
2753 old_row = prev_ti->beg_row;
2755 /* Otherwise, we'll simply change state from the previous end. */
2758 change_cfi_row (old_row, ti->beg_row);
2760 if (dump_file && add_cfi_insn != ti->head)
2762 rtx_insn *note;
2764 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2765 prev_ti->id, ti->id);
2767 note = ti->head;
2770 note = NEXT_INSN (note);
2771 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2772 output_cfi_directive (dump_file, NOTE_CFI (note));
2774 while (note != add_cfi_insn);
2778 /* Connect args_size between traces that have can_throw_internal insns. */
2779 if (cfun->eh->lp_array)
2781 HOST_WIDE_INT prev_args_size = 0;
2783 for (i = 0; i < n; ++i)
2785 ti = &trace_info[i];
2787 if (ti->switch_sections)
2788 prev_args_size = 0;
2789 if (ti->eh_head == NULL)
2790 continue;
2791 gcc_assert (!ti->args_size_undefined);
2793 if (ti->beg_delay_args_size != prev_args_size)
2795 /* ??? Search back to previous CFI note. */
2796 add_cfi_insn = PREV_INSN (ti->eh_head);
2797 add_cfi_args_size (ti->beg_delay_args_size);
2800 prev_args_size = ti->end_delay_args_size;
2805 /* Set up the pseudo-cfg of instruction traces, as described at the
2806 block comment at the top of the file. */
2808 static void
2809 create_pseudo_cfg (void)
2811 bool saw_barrier, switch_sections;
2812 dw_trace_info ti;
2813 rtx_insn *insn;
2814 unsigned i;
2816 /* The first trace begins at the start of the function,
2817 and begins with the CIE row state. */
2818 trace_info.create (16);
2819 memset (&ti, 0, sizeof (ti));
2820 ti.head = get_insns ();
2821 ti.beg_row = cie_cfi_row;
2822 ti.cfa_store = cie_cfi_row->cfa;
2823 ti.cfa_temp.reg = INVALID_REGNUM;
2824 trace_info.quick_push (ti);
2826 if (cie_return_save)
2827 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2829 /* Walk all the insns, collecting start of trace locations. */
2830 saw_barrier = false;
2831 switch_sections = false;
2832 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2834 if (BARRIER_P (insn))
2835 saw_barrier = true;
2836 else if (NOTE_P (insn)
2837 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2839 /* We should have just seen a barrier. */
2840 gcc_assert (saw_barrier);
2841 switch_sections = true;
2843 /* Watch out for save_point notes between basic blocks.
2844 In particular, a note after a barrier. Do not record these,
2845 delaying trace creation until the label. */
2846 else if (save_point_p (insn)
2847 && (LABEL_P (insn) || !saw_barrier))
2849 memset (&ti, 0, sizeof (ti));
2850 ti.head = insn;
2851 ti.switch_sections = switch_sections;
2852 ti.id = trace_info.length ();
2853 trace_info.safe_push (ti);
2855 saw_barrier = false;
2856 switch_sections = false;
2860 /* Create the trace index after we've finished building trace_info,
2861 avoiding stale pointer problems due to reallocation. */
2862 trace_index
2863 = new hash_table<trace_info_hasher> (trace_info.length ());
2864 dw_trace_info *tp;
2865 FOR_EACH_VEC_ELT (trace_info, i, tp)
2867 dw_trace_info **slot;
2869 if (dump_file)
2870 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2871 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2872 tp->switch_sections ? " (section switch)" : "");
2874 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2875 gcc_assert (*slot == NULL);
2876 *slot = tp;
2880 /* Record the initial position of the return address. RTL is
2881 INCOMING_RETURN_ADDR_RTX. */
2883 static void
2884 initial_return_save (rtx rtl)
2886 unsigned int reg = INVALID_REGNUM;
2887 HOST_WIDE_INT offset = 0;
2889 switch (GET_CODE (rtl))
2891 case REG:
2892 /* RA is in a register. */
2893 reg = dwf_regno (rtl);
2894 break;
2896 case MEM:
2897 /* RA is on the stack. */
2898 rtl = XEXP (rtl, 0);
2899 switch (GET_CODE (rtl))
2901 case REG:
2902 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2903 offset = 0;
2904 break;
2906 case PLUS:
2907 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2908 offset = INTVAL (XEXP (rtl, 1));
2909 break;
2911 case MINUS:
2912 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2913 offset = -INTVAL (XEXP (rtl, 1));
2914 break;
2916 default:
2917 gcc_unreachable ();
2920 break;
2922 case PLUS:
2923 /* The return address is at some offset from any value we can
2924 actually load. For instance, on the SPARC it is in %i7+8. Just
2925 ignore the offset for now; it doesn't matter for unwinding frames. */
2926 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2927 initial_return_save (XEXP (rtl, 0));
2928 return;
2930 default:
2931 gcc_unreachable ();
2934 if (reg != DWARF_FRAME_RETURN_COLUMN)
2936 if (reg != INVALID_REGNUM)
2937 record_reg_saved_in_reg (rtl, pc_rtx);
2938 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2942 static void
2943 create_cie_data (void)
2945 dw_cfa_location loc;
2946 dw_trace_info cie_trace;
2948 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2950 memset (&cie_trace, 0, sizeof (cie_trace));
2951 cur_trace = &cie_trace;
2953 add_cfi_vec = &cie_cfi_vec;
2954 cie_cfi_row = cur_row = new_cfi_row ();
2956 /* On entry, the Canonical Frame Address is at SP. */
2957 memset (&loc, 0, sizeof (loc));
2958 loc.reg = dw_stack_pointer_regnum;
2959 loc.offset = INCOMING_FRAME_SP_OFFSET;
2960 def_cfa_1 (&loc);
2962 if (targetm.debug_unwind_info () == UI_DWARF2
2963 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2965 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2967 /* For a few targets, we have the return address incoming into a
2968 register, but choose a different return column. This will result
2969 in a DW_CFA_register for the return, and an entry in
2970 regs_saved_in_regs to match. If the target later stores that
2971 return address register to the stack, we want to be able to emit
2972 the DW_CFA_offset against the return column, not the intermediate
2973 save register. Save the contents of regs_saved_in_regs so that
2974 we can re-initialize it at the start of each function. */
2975 switch (cie_trace.regs_saved_in_regs.length ())
2977 case 0:
2978 break;
2979 case 1:
2980 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2981 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2982 cie_trace.regs_saved_in_regs.release ();
2983 break;
2984 default:
2985 gcc_unreachable ();
2989 add_cfi_vec = NULL;
2990 cur_row = NULL;
2991 cur_trace = NULL;
2994 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2995 state at each location within the function. These notes will be
2996 emitted during pass_final. */
2998 static unsigned int
2999 execute_dwarf2_frame (void)
3001 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3002 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3004 /* The first time we're called, compute the incoming frame state. */
3005 if (cie_cfi_vec == NULL)
3006 create_cie_data ();
3008 dwarf2out_alloc_current_fde ();
3010 create_pseudo_cfg ();
3012 /* Do the work. */
3013 create_cfi_notes ();
3014 connect_traces ();
3015 add_cfis_to_fde ();
3017 /* Free all the data we allocated. */
3019 size_t i;
3020 dw_trace_info *ti;
3022 FOR_EACH_VEC_ELT (trace_info, i, ti)
3023 ti->regs_saved_in_regs.release ();
3025 trace_info.release ();
3027 delete trace_index;
3028 trace_index = NULL;
3030 return 0;
3033 /* Convert a DWARF call frame info. operation to its string name */
3035 static const char *
3036 dwarf_cfi_name (unsigned int cfi_opc)
3038 const char *name = get_DW_CFA_name (cfi_opc);
3040 if (name != NULL)
3041 return name;
3043 return "DW_CFA_<unknown>";
3046 /* This routine will generate the correct assembly data for a location
3047 description based on a cfi entry with a complex address. */
3049 static void
3050 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3052 dw_loc_descr_ref loc;
3053 unsigned long size;
3055 if (cfi->dw_cfi_opc == DW_CFA_expression
3056 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3058 unsigned r =
3059 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3060 dw2_asm_output_data (1, r, NULL);
3061 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3063 else
3064 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3066 /* Output the size of the block. */
3067 size = size_of_locs (loc);
3068 dw2_asm_output_data_uleb128 (size, NULL);
3070 /* Now output the operations themselves. */
3071 output_loc_sequence (loc, for_eh);
3074 /* Similar, but used for .cfi_escape. */
3076 static void
3077 output_cfa_loc_raw (dw_cfi_ref cfi)
3079 dw_loc_descr_ref loc;
3080 unsigned long size;
3082 if (cfi->dw_cfi_opc == DW_CFA_expression
3083 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3085 unsigned r =
3086 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3087 fprintf (asm_out_file, "%#x,", r);
3088 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3090 else
3091 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3093 /* Output the size of the block. */
3094 size = size_of_locs (loc);
3095 dw2_asm_output_data_uleb128_raw (size);
3096 fputc (',', asm_out_file);
3098 /* Now output the operations themselves. */
3099 output_loc_sequence_raw (loc);
3102 /* Output a Call Frame Information opcode and its operand(s). */
3104 void
3105 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3107 unsigned long r;
3108 HOST_WIDE_INT off;
3110 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3111 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3112 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3113 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3114 ((unsigned HOST_WIDE_INT)
3115 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3116 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3118 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3119 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3120 "DW_CFA_offset, column %#lx", r);
3121 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3122 dw2_asm_output_data_uleb128 (off, NULL);
3124 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3126 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3127 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3128 "DW_CFA_restore, column %#lx", r);
3130 else
3132 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3133 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3135 switch (cfi->dw_cfi_opc)
3137 case DW_CFA_set_loc:
3138 if (for_eh)
3139 dw2_asm_output_encoded_addr_rtx (
3140 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3141 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3142 false, NULL);
3143 else
3144 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3145 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3146 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3147 break;
3149 case DW_CFA_advance_loc1:
3150 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3151 fde->dw_fde_current_label, NULL);
3152 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3153 break;
3155 case DW_CFA_advance_loc2:
3156 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3157 fde->dw_fde_current_label, NULL);
3158 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3159 break;
3161 case DW_CFA_advance_loc4:
3162 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3163 fde->dw_fde_current_label, NULL);
3164 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3165 break;
3167 case DW_CFA_MIPS_advance_loc8:
3168 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3169 fde->dw_fde_current_label, NULL);
3170 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3171 break;
3173 case DW_CFA_offset_extended:
3174 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3175 dw2_asm_output_data_uleb128 (r, NULL);
3176 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3177 dw2_asm_output_data_uleb128 (off, NULL);
3178 break;
3180 case DW_CFA_def_cfa:
3181 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3182 dw2_asm_output_data_uleb128 (r, NULL);
3183 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3184 break;
3186 case DW_CFA_offset_extended_sf:
3187 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3188 dw2_asm_output_data_uleb128 (r, NULL);
3189 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3190 dw2_asm_output_data_sleb128 (off, NULL);
3191 break;
3193 case DW_CFA_def_cfa_sf:
3194 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3195 dw2_asm_output_data_uleb128 (r, NULL);
3196 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3197 dw2_asm_output_data_sleb128 (off, NULL);
3198 break;
3200 case DW_CFA_restore_extended:
3201 case DW_CFA_undefined:
3202 case DW_CFA_same_value:
3203 case DW_CFA_def_cfa_register:
3204 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3205 dw2_asm_output_data_uleb128 (r, NULL);
3206 break;
3208 case DW_CFA_register:
3209 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3210 dw2_asm_output_data_uleb128 (r, NULL);
3211 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3212 dw2_asm_output_data_uleb128 (r, NULL);
3213 break;
3215 case DW_CFA_def_cfa_offset:
3216 case DW_CFA_GNU_args_size:
3217 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3218 break;
3220 case DW_CFA_def_cfa_offset_sf:
3221 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3222 dw2_asm_output_data_sleb128 (off, NULL);
3223 break;
3225 case DW_CFA_GNU_window_save:
3226 break;
3228 case DW_CFA_def_cfa_expression:
3229 case DW_CFA_expression:
3230 case DW_CFA_val_expression:
3231 output_cfa_loc (cfi, for_eh);
3232 break;
3234 case DW_CFA_GNU_negative_offset_extended:
3235 /* Obsoleted by DW_CFA_offset_extended_sf. */
3236 gcc_unreachable ();
3238 default:
3239 break;
3244 /* Similar, but do it via assembler directives instead. */
3246 void
3247 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3249 unsigned long r, r2;
3251 switch (cfi->dw_cfi_opc)
3253 case DW_CFA_advance_loc:
3254 case DW_CFA_advance_loc1:
3255 case DW_CFA_advance_loc2:
3256 case DW_CFA_advance_loc4:
3257 case DW_CFA_MIPS_advance_loc8:
3258 case DW_CFA_set_loc:
3259 /* Should only be created in a code path not followed when emitting
3260 via directives. The assembler is going to take care of this for
3261 us. But this routines is also used for debugging dumps, so
3262 print something. */
3263 gcc_assert (f != asm_out_file);
3264 fprintf (f, "\t.cfi_advance_loc\n");
3265 break;
3267 case DW_CFA_offset:
3268 case DW_CFA_offset_extended:
3269 case DW_CFA_offset_extended_sf:
3270 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3271 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3272 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3273 break;
3275 case DW_CFA_restore:
3276 case DW_CFA_restore_extended:
3277 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3278 fprintf (f, "\t.cfi_restore %lu\n", r);
3279 break;
3281 case DW_CFA_undefined:
3282 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3283 fprintf (f, "\t.cfi_undefined %lu\n", r);
3284 break;
3286 case DW_CFA_same_value:
3287 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3288 fprintf (f, "\t.cfi_same_value %lu\n", r);
3289 break;
3291 case DW_CFA_def_cfa:
3292 case DW_CFA_def_cfa_sf:
3293 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3294 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3295 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3296 break;
3298 case DW_CFA_def_cfa_register:
3299 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3300 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3301 break;
3303 case DW_CFA_register:
3304 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3305 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3306 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3307 break;
3309 case DW_CFA_def_cfa_offset:
3310 case DW_CFA_def_cfa_offset_sf:
3311 fprintf (f, "\t.cfi_def_cfa_offset "
3312 HOST_WIDE_INT_PRINT_DEC"\n",
3313 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3314 break;
3316 case DW_CFA_remember_state:
3317 fprintf (f, "\t.cfi_remember_state\n");
3318 break;
3319 case DW_CFA_restore_state:
3320 fprintf (f, "\t.cfi_restore_state\n");
3321 break;
3323 case DW_CFA_GNU_args_size:
3324 if (f == asm_out_file)
3326 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3327 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3328 if (flag_debug_asm)
3329 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3330 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3331 fputc ('\n', f);
3333 else
3335 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3336 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3338 break;
3340 case DW_CFA_GNU_window_save:
3341 fprintf (f, "\t.cfi_window_save\n");
3342 break;
3344 case DW_CFA_def_cfa_expression:
3345 case DW_CFA_expression:
3346 case DW_CFA_val_expression:
3347 if (f != asm_out_file)
3349 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3350 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3351 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3352 break;
3354 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3355 output_cfa_loc_raw (cfi);
3356 fputc ('\n', f);
3357 break;
3359 default:
3360 gcc_unreachable ();
3364 void
3365 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3367 if (dwarf2out_do_cfi_asm ())
3368 output_cfi_directive (asm_out_file, cfi);
3371 static void
3372 dump_cfi_row (FILE *f, dw_cfi_row *row)
3374 dw_cfi_ref cfi;
3375 unsigned i;
3377 cfi = row->cfa_cfi;
3378 if (!cfi)
3380 dw_cfa_location dummy;
3381 memset (&dummy, 0, sizeof (dummy));
3382 dummy.reg = INVALID_REGNUM;
3383 cfi = def_cfa_0 (&dummy, &row->cfa);
3385 output_cfi_directive (f, cfi);
3387 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3388 if (cfi)
3389 output_cfi_directive (f, cfi);
3392 void debug_cfi_row (dw_cfi_row *row);
3394 void
3395 debug_cfi_row (dw_cfi_row *row)
3397 dump_cfi_row (stderr, row);
3401 /* Save the result of dwarf2out_do_frame across PCH.
3402 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3403 static GTY(()) signed char saved_do_cfi_asm = 0;
3405 /* Decide whether to emit EH frame unwind information for the current
3406 translation unit. */
3408 bool
3409 dwarf2out_do_eh_frame (void)
3411 return
3412 (flag_unwind_tables || flag_exceptions)
3413 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2;
3416 /* Decide whether we want to emit frame unwind information for the current
3417 translation unit. */
3419 bool
3420 dwarf2out_do_frame (void)
3422 /* We want to emit correct CFA location expressions or lists, so we
3423 have to return true if we're going to output debug info, even if
3424 we're not going to output frame or unwind info. */
3425 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3426 return true;
3428 if (saved_do_cfi_asm > 0)
3429 return true;
3431 if (targetm.debug_unwind_info () == UI_DWARF2)
3432 return true;
3434 if (dwarf2out_do_eh_frame ())
3435 return true;
3437 return false;
3440 /* Decide whether to emit frame unwind via assembler directives. */
3442 bool
3443 dwarf2out_do_cfi_asm (void)
3445 int enc;
3447 if (saved_do_cfi_asm != 0)
3448 return saved_do_cfi_asm > 0;
3450 /* Assume failure for a moment. */
3451 saved_do_cfi_asm = -1;
3453 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3454 return false;
3455 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3456 return false;
3458 /* Make sure the personality encoding is one the assembler can support.
3459 In particular, aligned addresses can't be handled. */
3460 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3461 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3462 return false;
3463 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3464 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3465 return false;
3467 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3468 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3469 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE && !dwarf2out_do_eh_frame ())
3470 return false;
3472 /* Success! */
3473 saved_do_cfi_asm = 1;
3474 return true;
3477 namespace {
3479 const pass_data pass_data_dwarf2_frame =
3481 RTL_PASS, /* type */
3482 "dwarf2", /* name */
3483 OPTGROUP_NONE, /* optinfo_flags */
3484 TV_FINAL, /* tv_id */
3485 0, /* properties_required */
3486 0, /* properties_provided */
3487 0, /* properties_destroyed */
3488 0, /* todo_flags_start */
3489 0, /* todo_flags_finish */
3492 class pass_dwarf2_frame : public rtl_opt_pass
3494 public:
3495 pass_dwarf2_frame (gcc::context *ctxt)
3496 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3499 /* opt_pass methods: */
3500 virtual bool gate (function *);
3501 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3503 }; // class pass_dwarf2_frame
3505 bool
3506 pass_dwarf2_frame::gate (function *)
3508 /* Targets which still implement the prologue in assembler text
3509 cannot use the generic dwarf2 unwinding. */
3510 if (!targetm.have_prologue ())
3511 return false;
3513 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3514 from the optimized shrink-wrapping annotations that we will compute.
3515 For now, only produce the CFI notes for dwarf2. */
3516 return dwarf2out_do_frame ();
3519 } // anon namespace
3521 rtl_opt_pass *
3522 make_pass_dwarf2_frame (gcc::context *ctxt)
3524 return new pass_dwarf2_frame (ctxt);
3527 #include "gt-dwarf2cfi.h"