PR tree-optimization/84740
[official-gcc.git] / gcc / dwarf2cfi.c
blob07e6a5a2887f6014b01aae0a2f69bfe92f371f3e
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
50 #endif
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
54 #endif
56 #ifndef DEFAULT_INCOMING_FRAME_SP_OFFSET
57 #define DEFAULT_INCOMING_FRAME_SP_OFFSET INCOMING_FRAME_SP_OFFSET
58 #endif
60 /* A collected description of an entire row of the abstract CFI table. */
61 struct GTY(()) dw_cfi_row
63 /* The expression that computes the CFA, expressed in two different ways.
64 The CFA member for the simple cases, and the full CFI expression for
65 the complex cases. The later will be a DW_CFA_cfa_expression. */
66 dw_cfa_location cfa;
67 dw_cfi_ref cfa_cfi;
69 /* The expressions for any register column that is saved. */
70 cfi_vec reg_save;
73 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
74 struct GTY(()) reg_saved_in_data {
75 rtx orig_reg;
76 rtx saved_in_reg;
80 /* Since we no longer have a proper CFG, we're going to create a facsimile
81 of one on the fly while processing the frame-related insns.
83 We create dw_trace_info structures for each extended basic block beginning
84 and ending at a "save point". Save points are labels, barriers, certain
85 notes, and of course the beginning and end of the function.
87 As we encounter control transfer insns, we propagate the "current"
88 row state across the edges to the starts of traces. When checking is
89 enabled, we validate that we propagate the same data from all sources.
91 All traces are members of the TRACE_INFO array, in the order in which
92 they appear in the instruction stream.
94 All save points are present in the TRACE_INDEX hash, mapping the insn
95 starting a trace to the dw_trace_info describing the trace. */
97 struct dw_trace_info
99 /* The insn that begins the trace. */
100 rtx_insn *head;
102 /* The row state at the beginning and end of the trace. */
103 dw_cfi_row *beg_row, *end_row;
105 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
106 while scanning insns. However, the args_size value is irrelevant at
107 any point except can_throw_internal_p insns. Therefore the "delay"
108 sizes the values that must actually be emitted for this trace. */
109 poly_int64_pod beg_true_args_size, end_true_args_size;
110 poly_int64_pod beg_delay_args_size, end_delay_args_size;
112 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
113 rtx_insn *eh_head;
115 /* The following variables contain data used in interpreting frame related
116 expressions. These are not part of the "real" row state as defined by
117 Dwarf, but it seems like they need to be propagated into a trace in case
118 frame related expressions have been sunk. */
119 /* ??? This seems fragile. These variables are fragments of a larger
120 expression. If we do not keep the entire expression together, we risk
121 not being able to put it together properly. Consider forcing targets
122 to generate self-contained expressions and dropping all of the magic
123 interpretation code in this file. Or at least refusing to shrink wrap
124 any frame related insn that doesn't contain a complete expression. */
126 /* The register used for saving registers to the stack, and its offset
127 from the CFA. */
128 dw_cfa_location cfa_store;
130 /* A temporary register holding an integral value used in adjusting SP
131 or setting up the store_reg. The "offset" field holds the integer
132 value, not an offset. */
133 dw_cfa_location cfa_temp;
135 /* A set of registers saved in other registers. This is the inverse of
136 the row->reg_save info, if the entry is a DW_CFA_register. This is
137 implemented as a flat array because it normally contains zero or 1
138 entry, depending on the target. IA-64 is the big spender here, using
139 a maximum of 5 entries. */
140 vec<reg_saved_in_data> regs_saved_in_regs;
142 /* An identifier for this trace. Used only for debugging dumps. */
143 unsigned id;
145 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
146 bool switch_sections;
148 /* True if we've seen different values incoming to beg_true_args_size. */
149 bool args_size_undefined;
153 /* Hashtable helpers. */
155 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
157 static inline hashval_t hash (const dw_trace_info *);
158 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
161 inline hashval_t
162 trace_info_hasher::hash (const dw_trace_info *ti)
164 return INSN_UID (ti->head);
167 inline bool
168 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
170 return a->head == b->head;
174 /* The variables making up the pseudo-cfg, as described above. */
175 static vec<dw_trace_info> trace_info;
176 static vec<dw_trace_info *> trace_work_list;
177 static hash_table<trace_info_hasher> *trace_index;
179 /* A vector of call frame insns for the CIE. */
180 cfi_vec cie_cfi_vec;
182 /* The state of the first row of the FDE table, which includes the
183 state provided by the CIE. */
184 static GTY(()) dw_cfi_row *cie_cfi_row;
186 static GTY(()) reg_saved_in_data *cie_return_save;
188 static GTY(()) unsigned long dwarf2out_cfi_label_num;
190 /* The insn after which a new CFI note should be emitted. */
191 static rtx_insn *add_cfi_insn;
193 /* When non-null, add_cfi will add the CFI to this vector. */
194 static cfi_vec *add_cfi_vec;
196 /* The current instruction trace. */
197 static dw_trace_info *cur_trace;
199 /* The current, i.e. most recently generated, row of the CFI table. */
200 static dw_cfi_row *cur_row;
202 /* A copy of the current CFA, for use during the processing of a
203 single insn. */
204 static dw_cfa_location *cur_cfa;
206 /* We delay emitting a register save until either (a) we reach the end
207 of the prologue or (b) the register is clobbered. This clusters
208 register saves so that there are fewer pc advances. */
210 struct queued_reg_save {
211 rtx reg;
212 rtx saved_reg;
213 poly_int64_pod cfa_offset;
217 static vec<queued_reg_save> queued_reg_saves;
219 /* True if any CFI directives were emitted at the current insn. */
220 static bool any_cfis_emitted;
222 /* Short-hand for commonly used register numbers. */
223 static unsigned dw_stack_pointer_regnum;
224 static unsigned dw_frame_pointer_regnum;
226 /* Hook used by __throw. */
229 expand_builtin_dwarf_sp_column (void)
231 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
232 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
235 /* MEM is a memory reference for the register size table, each element of
236 which has mode MODE. Initialize column C as a return address column. */
238 static void
239 init_return_column_size (scalar_int_mode mode, rtx mem, unsigned int c)
241 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
242 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
243 emit_move_insn (adjust_address (mem, mode, offset),
244 gen_int_mode (size, mode));
247 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
248 init_one_dwarf_reg_size to communicate on what has been done by the
249 latter. */
251 struct init_one_dwarf_reg_state
253 /* Whether the dwarf return column was initialized. */
254 bool wrote_return_column;
256 /* For each hard register REGNO, whether init_one_dwarf_reg_size
257 was given REGNO to process already. */
258 bool processed_regno [FIRST_PSEUDO_REGISTER];
262 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
263 initialize the dwarf register size table entry corresponding to register
264 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
265 use for the size entry to initialize, and INIT_STATE is the communication
266 datastructure conveying what we're doing to our caller. */
268 static
269 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
270 rtx table, machine_mode slotmode,
271 init_one_dwarf_reg_state *init_state)
273 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
274 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
275 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
277 poly_int64 slotoffset = dcol * GET_MODE_SIZE (slotmode);
278 poly_int64 regsize = GET_MODE_SIZE (regmode);
280 init_state->processed_regno[regno] = true;
282 if (rnum >= DWARF_FRAME_REGISTERS)
283 return;
285 if (dnum == DWARF_FRAME_RETURN_COLUMN)
287 if (regmode == VOIDmode)
288 return;
289 init_state->wrote_return_column = true;
292 /* ??? When is this true? Should it be a test based on DCOL instead? */
293 if (maybe_lt (slotoffset, 0))
294 return;
296 emit_move_insn (adjust_address (table, slotmode, slotoffset),
297 gen_int_mode (regsize, slotmode));
300 /* Generate code to initialize the dwarf register size table located
301 at the provided ADDRESS. */
303 void
304 expand_builtin_init_dwarf_reg_sizes (tree address)
306 unsigned int i;
307 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node);
308 rtx addr = expand_normal (address);
309 rtx mem = gen_rtx_MEM (BLKmode, addr);
311 init_one_dwarf_reg_state init_state;
313 memset ((char *)&init_state, 0, sizeof (init_state));
315 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
317 machine_mode save_mode;
318 rtx span;
320 /* No point in processing a register multiple times. This could happen
321 with register spans, e.g. when a reg is first processed as a piece of
322 a span, then as a register on its own later on. */
324 if (init_state.processed_regno[i])
325 continue;
327 save_mode = targetm.dwarf_frame_reg_mode (i);
328 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
330 if (!span)
331 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
332 else
334 for (int si = 0; si < XVECLEN (span, 0); si++)
336 rtx reg = XVECEXP (span, 0, si);
338 init_one_dwarf_reg_size
339 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
344 if (!init_state.wrote_return_column)
345 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
347 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
348 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
349 #endif
351 targetm.init_dwarf_reg_sizes_extra (address);
355 static dw_trace_info *
356 get_trace_info (rtx_insn *insn)
358 dw_trace_info dummy;
359 dummy.head = insn;
360 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
363 static bool
364 save_point_p (rtx_insn *insn)
366 /* Labels, except those that are really jump tables. */
367 if (LABEL_P (insn))
368 return inside_basic_block_p (insn);
370 /* We split traces at the prologue/epilogue notes because those
371 are points at which the unwind info is usually stable. This
372 makes it easier to find spots with identical unwind info so
373 that we can use remember/restore_state opcodes. */
374 if (NOTE_P (insn))
375 switch (NOTE_KIND (insn))
377 case NOTE_INSN_PROLOGUE_END:
378 case NOTE_INSN_EPILOGUE_BEG:
379 return true;
382 return false;
385 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
387 static inline HOST_WIDE_INT
388 div_data_align (HOST_WIDE_INT off)
390 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
391 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
392 return r;
395 /* Return true if we need a signed version of a given opcode
396 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
398 static inline bool
399 need_data_align_sf_opcode (HOST_WIDE_INT off)
401 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
404 /* Return a pointer to a newly allocated Call Frame Instruction. */
406 static inline dw_cfi_ref
407 new_cfi (void)
409 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
411 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
412 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
414 return cfi;
417 /* Return a newly allocated CFI row, with no defined data. */
419 static dw_cfi_row *
420 new_cfi_row (void)
422 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
424 row->cfa.reg = INVALID_REGNUM;
426 return row;
429 /* Return a copy of an existing CFI row. */
431 static dw_cfi_row *
432 copy_cfi_row (dw_cfi_row *src)
434 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
436 *dst = *src;
437 dst->reg_save = vec_safe_copy (src->reg_save);
439 return dst;
442 /* Return a copy of an existing CFA location. */
444 static dw_cfa_location *
445 copy_cfa (dw_cfa_location *src)
447 dw_cfa_location *dst = ggc_alloc<dw_cfa_location> ();
448 *dst = *src;
449 return dst;
452 /* Generate a new label for the CFI info to refer to. */
454 static char *
455 dwarf2out_cfi_label (void)
457 int num = dwarf2out_cfi_label_num++;
458 char label[20];
460 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
462 return xstrdup (label);
465 /* Add CFI either to the current insn stream or to a vector, or both. */
467 static void
468 add_cfi (dw_cfi_ref cfi)
470 any_cfis_emitted = true;
472 if (add_cfi_insn != NULL)
474 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
475 NOTE_CFI (add_cfi_insn) = cfi;
478 if (add_cfi_vec != NULL)
479 vec_safe_push (*add_cfi_vec, cfi);
482 static void
483 add_cfi_args_size (poly_int64 size)
485 /* We don't yet have a representation for polynomial sizes. */
486 HOST_WIDE_INT const_size = size.to_constant ();
488 dw_cfi_ref cfi = new_cfi ();
490 /* While we can occasionally have args_size < 0 internally, this state
491 should not persist at a point we actually need an opcode. */
492 gcc_assert (const_size >= 0);
494 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
495 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_size;
497 add_cfi (cfi);
500 static void
501 add_cfi_restore (unsigned reg)
503 dw_cfi_ref cfi = new_cfi ();
505 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
506 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
508 add_cfi (cfi);
511 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
512 that the register column is no longer saved. */
514 static void
515 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
517 if (vec_safe_length (row->reg_save) <= column)
518 vec_safe_grow_cleared (row->reg_save, column + 1);
519 (*row->reg_save)[column] = cfi;
522 /* This function fills in aa dw_cfa_location structure from a dwarf location
523 descriptor sequence. */
525 static void
526 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
528 struct dw_loc_descr_node *ptr;
529 cfa->offset = 0;
530 cfa->base_offset = 0;
531 cfa->indirect = 0;
532 cfa->reg = -1;
534 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
536 enum dwarf_location_atom op = ptr->dw_loc_opc;
538 switch (op)
540 case DW_OP_reg0:
541 case DW_OP_reg1:
542 case DW_OP_reg2:
543 case DW_OP_reg3:
544 case DW_OP_reg4:
545 case DW_OP_reg5:
546 case DW_OP_reg6:
547 case DW_OP_reg7:
548 case DW_OP_reg8:
549 case DW_OP_reg9:
550 case DW_OP_reg10:
551 case DW_OP_reg11:
552 case DW_OP_reg12:
553 case DW_OP_reg13:
554 case DW_OP_reg14:
555 case DW_OP_reg15:
556 case DW_OP_reg16:
557 case DW_OP_reg17:
558 case DW_OP_reg18:
559 case DW_OP_reg19:
560 case DW_OP_reg20:
561 case DW_OP_reg21:
562 case DW_OP_reg22:
563 case DW_OP_reg23:
564 case DW_OP_reg24:
565 case DW_OP_reg25:
566 case DW_OP_reg26:
567 case DW_OP_reg27:
568 case DW_OP_reg28:
569 case DW_OP_reg29:
570 case DW_OP_reg30:
571 case DW_OP_reg31:
572 cfa->reg = op - DW_OP_reg0;
573 break;
574 case DW_OP_regx:
575 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
576 break;
577 case DW_OP_breg0:
578 case DW_OP_breg1:
579 case DW_OP_breg2:
580 case DW_OP_breg3:
581 case DW_OP_breg4:
582 case DW_OP_breg5:
583 case DW_OP_breg6:
584 case DW_OP_breg7:
585 case DW_OP_breg8:
586 case DW_OP_breg9:
587 case DW_OP_breg10:
588 case DW_OP_breg11:
589 case DW_OP_breg12:
590 case DW_OP_breg13:
591 case DW_OP_breg14:
592 case DW_OP_breg15:
593 case DW_OP_breg16:
594 case DW_OP_breg17:
595 case DW_OP_breg18:
596 case DW_OP_breg19:
597 case DW_OP_breg20:
598 case DW_OP_breg21:
599 case DW_OP_breg22:
600 case DW_OP_breg23:
601 case DW_OP_breg24:
602 case DW_OP_breg25:
603 case DW_OP_breg26:
604 case DW_OP_breg27:
605 case DW_OP_breg28:
606 case DW_OP_breg29:
607 case DW_OP_breg30:
608 case DW_OP_breg31:
609 cfa->reg = op - DW_OP_breg0;
610 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
611 break;
612 case DW_OP_bregx:
613 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
614 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
615 break;
616 case DW_OP_deref:
617 cfa->indirect = 1;
618 break;
619 case DW_OP_plus_uconst:
620 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
621 break;
622 default:
623 gcc_unreachable ();
628 /* Find the previous value for the CFA, iteratively. CFI is the opcode
629 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
630 one level of remember/restore state processing. */
632 void
633 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
635 switch (cfi->dw_cfi_opc)
637 case DW_CFA_def_cfa_offset:
638 case DW_CFA_def_cfa_offset_sf:
639 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
640 break;
641 case DW_CFA_def_cfa_register:
642 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
643 break;
644 case DW_CFA_def_cfa:
645 case DW_CFA_def_cfa_sf:
646 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
647 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
648 break;
649 case DW_CFA_def_cfa_expression:
650 if (cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc)
651 *loc = *cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc;
652 else
653 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
654 break;
656 case DW_CFA_remember_state:
657 gcc_assert (!remember->in_use);
658 *remember = *loc;
659 remember->in_use = 1;
660 break;
661 case DW_CFA_restore_state:
662 gcc_assert (remember->in_use);
663 *loc = *remember;
664 remember->in_use = 0;
665 break;
667 default:
668 break;
672 /* Determine if two dw_cfa_location structures define the same data. */
674 bool
675 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
677 return (loc1->reg == loc2->reg
678 && known_eq (loc1->offset, loc2->offset)
679 && loc1->indirect == loc2->indirect
680 && (loc1->indirect == 0
681 || known_eq (loc1->base_offset, loc2->base_offset)));
684 /* Determine if two CFI operands are identical. */
686 static bool
687 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
689 switch (t)
691 case dw_cfi_oprnd_unused:
692 return true;
693 case dw_cfi_oprnd_reg_num:
694 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
695 case dw_cfi_oprnd_offset:
696 return a->dw_cfi_offset == b->dw_cfi_offset;
697 case dw_cfi_oprnd_addr:
698 return (a->dw_cfi_addr == b->dw_cfi_addr
699 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
700 case dw_cfi_oprnd_loc:
701 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
702 case dw_cfi_oprnd_cfa_loc:
703 return cfa_equal_p (a->dw_cfi_cfa_loc, b->dw_cfi_cfa_loc);
705 gcc_unreachable ();
708 /* Determine if two CFI entries are identical. */
710 static bool
711 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
713 enum dwarf_call_frame_info opc;
715 /* Make things easier for our callers, including missing operands. */
716 if (a == b)
717 return true;
718 if (a == NULL || b == NULL)
719 return false;
721 /* Obviously, the opcodes must match. */
722 opc = a->dw_cfi_opc;
723 if (opc != b->dw_cfi_opc)
724 return false;
726 /* Compare the two operands, re-using the type of the operands as
727 already exposed elsewhere. */
728 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
729 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
730 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
731 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
734 /* Determine if two CFI_ROW structures are identical. */
736 static bool
737 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
739 size_t i, n_a, n_b, n_max;
741 if (a->cfa_cfi)
743 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
744 return false;
746 else if (!cfa_equal_p (&a->cfa, &b->cfa))
747 return false;
749 n_a = vec_safe_length (a->reg_save);
750 n_b = vec_safe_length (b->reg_save);
751 n_max = MAX (n_a, n_b);
753 for (i = 0; i < n_max; ++i)
755 dw_cfi_ref r_a = NULL, r_b = NULL;
757 if (i < n_a)
758 r_a = (*a->reg_save)[i];
759 if (i < n_b)
760 r_b = (*b->reg_save)[i];
762 if (!cfi_equal_p (r_a, r_b))
763 return false;
766 return true;
769 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
770 what opcode to emit. Returns the CFI opcode to effect the change, or
771 NULL if NEW_CFA == OLD_CFA. */
773 static dw_cfi_ref
774 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
776 dw_cfi_ref cfi;
778 /* If nothing changed, no need to issue any call frame instructions. */
779 if (cfa_equal_p (old_cfa, new_cfa))
780 return NULL;
782 cfi = new_cfi ();
784 HOST_WIDE_INT const_offset;
785 if (new_cfa->reg == old_cfa->reg
786 && !new_cfa->indirect
787 && !old_cfa->indirect
788 && new_cfa->offset.is_constant (&const_offset))
790 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
791 the CFA register did not change but the offset did. The data
792 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
793 in the assembler via the .cfi_def_cfa_offset directive. */
794 if (const_offset < 0)
795 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
796 else
797 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
798 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_offset;
800 else if (new_cfa->offset.is_constant ()
801 && known_eq (new_cfa->offset, old_cfa->offset)
802 && old_cfa->reg != INVALID_REGNUM
803 && !new_cfa->indirect
804 && !old_cfa->indirect)
806 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
807 indicating the CFA register has changed to <register> but the
808 offset has not changed. This requires the old CFA to have
809 been set as a register plus offset rather than a general
810 DW_CFA_def_cfa_expression. */
811 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
812 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
814 else if (new_cfa->indirect == 0
815 && new_cfa->offset.is_constant (&const_offset))
817 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
818 indicating the CFA register has changed to <register> with
819 the specified offset. The data factoring for DW_CFA_def_cfa_sf
820 happens in output_cfi, or in the assembler via the .cfi_def_cfa
821 directive. */
822 if (const_offset < 0)
823 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
824 else
825 cfi->dw_cfi_opc = DW_CFA_def_cfa;
826 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
827 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
829 else
831 /* Construct a DW_CFA_def_cfa_expression instruction to
832 calculate the CFA using a full location expression since no
833 register-offset pair is available. */
834 struct dw_loc_descr_node *loc_list;
836 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
837 loc_list = build_cfa_loc (new_cfa, 0);
838 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
839 if (!new_cfa->offset.is_constant ()
840 || !new_cfa->base_offset.is_constant ())
841 /* It's hard to reconstruct the CFA location for a polynomial
842 expression, so just cache it instead. */
843 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = copy_cfa (new_cfa);
844 else
845 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = NULL;
848 return cfi;
851 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
853 static void
854 def_cfa_1 (dw_cfa_location *new_cfa)
856 dw_cfi_ref cfi;
858 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
859 cur_trace->cfa_store.offset = new_cfa->offset;
861 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
862 if (cfi)
864 cur_row->cfa = *new_cfa;
865 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
866 ? cfi : NULL);
868 add_cfi (cfi);
872 /* Add the CFI for saving a register. REG is the CFA column number.
873 If SREG is -1, the register is saved at OFFSET from the CFA;
874 otherwise it is saved in SREG. */
876 static void
877 reg_save (unsigned int reg, unsigned int sreg, poly_int64 offset)
879 dw_fde_ref fde = cfun ? cfun->fde : NULL;
880 dw_cfi_ref cfi = new_cfi ();
882 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
884 if (sreg == INVALID_REGNUM)
886 HOST_WIDE_INT const_offset;
887 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
888 if (fde && fde->stack_realign)
890 cfi->dw_cfi_opc = DW_CFA_expression;
891 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
892 cfi->dw_cfi_oprnd2.dw_cfi_loc
893 = build_cfa_aligned_loc (&cur_row->cfa, offset,
894 fde->stack_realignment);
896 else if (offset.is_constant (&const_offset))
898 if (need_data_align_sf_opcode (const_offset))
899 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
900 else if (reg & ~0x3f)
901 cfi->dw_cfi_opc = DW_CFA_offset_extended;
902 else
903 cfi->dw_cfi_opc = DW_CFA_offset;
904 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
906 else
908 cfi->dw_cfi_opc = DW_CFA_expression;
909 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
910 cfi->dw_cfi_oprnd2.dw_cfi_loc
911 = build_cfa_loc (&cur_row->cfa, offset);
914 else if (sreg == reg)
916 /* While we could emit something like DW_CFA_same_value or
917 DW_CFA_restore, we never expect to see something like that
918 in a prologue. This is more likely to be a bug. A backend
919 can always bypass this by using REG_CFA_RESTORE directly. */
920 gcc_unreachable ();
922 else
924 cfi->dw_cfi_opc = DW_CFA_register;
925 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
928 add_cfi (cfi);
929 update_row_reg_save (cur_row, reg, cfi);
932 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
933 and adjust data structures to match. */
935 static void
936 notice_args_size (rtx_insn *insn)
938 poly_int64 args_size, delta;
939 rtx note;
941 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
942 if (note == NULL)
943 return;
945 args_size = get_args_size (note);
946 delta = args_size - cur_trace->end_true_args_size;
947 if (known_eq (delta, 0))
948 return;
950 cur_trace->end_true_args_size = args_size;
952 /* If the CFA is computed off the stack pointer, then we must adjust
953 the computation of the CFA as well. */
954 if (cur_cfa->reg == dw_stack_pointer_regnum)
956 gcc_assert (!cur_cfa->indirect);
958 /* Convert a change in args_size (always a positive in the
959 direction of stack growth) to a change in stack pointer. */
960 if (!STACK_GROWS_DOWNWARD)
961 delta = -delta;
963 cur_cfa->offset += delta;
967 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
968 data within the trace related to EH insns and args_size. */
970 static void
971 notice_eh_throw (rtx_insn *insn)
973 poly_int64 args_size = cur_trace->end_true_args_size;
974 if (cur_trace->eh_head == NULL)
976 cur_trace->eh_head = insn;
977 cur_trace->beg_delay_args_size = args_size;
978 cur_trace->end_delay_args_size = args_size;
980 else if (maybe_ne (cur_trace->end_delay_args_size, args_size))
982 cur_trace->end_delay_args_size = args_size;
984 /* ??? If the CFA is the stack pointer, search backward for the last
985 CFI note and insert there. Given that the stack changed for the
986 args_size change, there *must* be such a note in between here and
987 the last eh insn. */
988 add_cfi_args_size (args_size);
992 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
993 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
994 used in places where rtl is prohibited. */
996 static inline unsigned
997 dwf_regno (const_rtx reg)
999 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
1000 return DWARF_FRAME_REGNUM (REGNO (reg));
1003 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1005 static bool
1006 compare_reg_or_pc (rtx x, rtx y)
1008 if (REG_P (x) && REG_P (y))
1009 return REGNO (x) == REGNO (y);
1010 return x == y;
1013 /* Record SRC as being saved in DEST. DEST may be null to delete an
1014 existing entry. SRC may be a register or PC_RTX. */
1016 static void
1017 record_reg_saved_in_reg (rtx dest, rtx src)
1019 reg_saved_in_data *elt;
1020 size_t i;
1022 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1023 if (compare_reg_or_pc (elt->orig_reg, src))
1025 if (dest == NULL)
1026 cur_trace->regs_saved_in_regs.unordered_remove (i);
1027 else
1028 elt->saved_in_reg = dest;
1029 return;
1032 if (dest == NULL)
1033 return;
1035 reg_saved_in_data e = {src, dest};
1036 cur_trace->regs_saved_in_regs.safe_push (e);
1039 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1040 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1042 static void
1043 queue_reg_save (rtx reg, rtx sreg, poly_int64 offset)
1045 queued_reg_save *q;
1046 queued_reg_save e = {reg, sreg, offset};
1047 size_t i;
1049 /* Duplicates waste space, but it's also necessary to remove them
1050 for correctness, since the queue gets output in reverse order. */
1051 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1052 if (compare_reg_or_pc (q->reg, reg))
1054 *q = e;
1055 return;
1058 queued_reg_saves.safe_push (e);
1061 /* Output all the entries in QUEUED_REG_SAVES. */
1063 static void
1064 dwarf2out_flush_queued_reg_saves (void)
1066 queued_reg_save *q;
1067 size_t i;
1069 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1071 unsigned int reg, sreg;
1073 record_reg_saved_in_reg (q->saved_reg, q->reg);
1075 if (q->reg == pc_rtx)
1076 reg = DWARF_FRAME_RETURN_COLUMN;
1077 else
1078 reg = dwf_regno (q->reg);
1079 if (q->saved_reg)
1080 sreg = dwf_regno (q->saved_reg);
1081 else
1082 sreg = INVALID_REGNUM;
1083 reg_save (reg, sreg, q->cfa_offset);
1086 queued_reg_saves.truncate (0);
1089 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1090 location for? Or, does it clobber a register which we've previously
1091 said that some other register is saved in, and for which we now
1092 have a new location for? */
1094 static bool
1095 clobbers_queued_reg_save (const_rtx insn)
1097 queued_reg_save *q;
1098 size_t iq;
1100 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1102 size_t ir;
1103 reg_saved_in_data *rir;
1105 if (modified_in_p (q->reg, insn))
1106 return true;
1108 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1109 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1110 && modified_in_p (rir->saved_in_reg, insn))
1111 return true;
1114 return false;
1117 /* What register, if any, is currently saved in REG? */
1119 static rtx
1120 reg_saved_in (rtx reg)
1122 unsigned int regn = REGNO (reg);
1123 queued_reg_save *q;
1124 reg_saved_in_data *rir;
1125 size_t i;
1127 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1128 if (q->saved_reg && regn == REGNO (q->saved_reg))
1129 return q->reg;
1131 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1132 if (regn == REGNO (rir->saved_in_reg))
1133 return rir->orig_reg;
1135 return NULL_RTX;
1138 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1140 static void
1141 dwarf2out_frame_debug_def_cfa (rtx pat)
1143 memset (cur_cfa, 0, sizeof (*cur_cfa));
1145 pat = strip_offset (pat, &cur_cfa->offset);
1146 if (MEM_P (pat))
1148 cur_cfa->indirect = 1;
1149 pat = strip_offset (XEXP (pat, 0), &cur_cfa->base_offset);
1151 /* ??? If this fails, we could be calling into the _loc functions to
1152 define a full expression. So far no port does that. */
1153 gcc_assert (REG_P (pat));
1154 cur_cfa->reg = dwf_regno (pat);
1157 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1159 static void
1160 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1162 rtx src, dest;
1164 gcc_assert (GET_CODE (pat) == SET);
1165 dest = XEXP (pat, 0);
1166 src = XEXP (pat, 1);
1168 switch (GET_CODE (src))
1170 case PLUS:
1171 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1172 cur_cfa->offset -= rtx_to_poly_int64 (XEXP (src, 1));
1173 break;
1175 case REG:
1176 break;
1178 default:
1179 gcc_unreachable ();
1182 cur_cfa->reg = dwf_regno (dest);
1183 gcc_assert (cur_cfa->indirect == 0);
1186 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1188 static void
1189 dwarf2out_frame_debug_cfa_offset (rtx set)
1191 poly_int64 offset;
1192 rtx src, addr, span;
1193 unsigned int sregno;
1195 src = XEXP (set, 1);
1196 addr = XEXP (set, 0);
1197 gcc_assert (MEM_P (addr));
1198 addr = XEXP (addr, 0);
1200 /* As documented, only consider extremely simple addresses. */
1201 switch (GET_CODE (addr))
1203 case REG:
1204 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1205 offset = -cur_cfa->offset;
1206 break;
1207 case PLUS:
1208 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1209 offset = rtx_to_poly_int64 (XEXP (addr, 1)) - cur_cfa->offset;
1210 break;
1211 default:
1212 gcc_unreachable ();
1215 if (src == pc_rtx)
1217 span = NULL;
1218 sregno = DWARF_FRAME_RETURN_COLUMN;
1220 else
1222 span = targetm.dwarf_register_span (src);
1223 sregno = dwf_regno (src);
1226 /* ??? We'd like to use queue_reg_save, but we need to come up with
1227 a different flushing heuristic for epilogues. */
1228 if (!span)
1229 reg_save (sregno, INVALID_REGNUM, offset);
1230 else
1232 /* We have a PARALLEL describing where the contents of SRC live.
1233 Adjust the offset for each piece of the PARALLEL. */
1234 poly_int64 span_offset = offset;
1236 gcc_assert (GET_CODE (span) == PARALLEL);
1238 const int par_len = XVECLEN (span, 0);
1239 for (int par_index = 0; par_index < par_len; par_index++)
1241 rtx elem = XVECEXP (span, 0, par_index);
1242 sregno = dwf_regno (src);
1243 reg_save (sregno, INVALID_REGNUM, span_offset);
1244 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1249 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1251 static void
1252 dwarf2out_frame_debug_cfa_register (rtx set)
1254 rtx src, dest;
1255 unsigned sregno, dregno;
1257 src = XEXP (set, 1);
1258 dest = XEXP (set, 0);
1260 record_reg_saved_in_reg (dest, src);
1261 if (src == pc_rtx)
1262 sregno = DWARF_FRAME_RETURN_COLUMN;
1263 else
1264 sregno = dwf_regno (src);
1266 dregno = dwf_regno (dest);
1268 /* ??? We'd like to use queue_reg_save, but we need to come up with
1269 a different flushing heuristic for epilogues. */
1270 reg_save (sregno, dregno, 0);
1273 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1275 static void
1276 dwarf2out_frame_debug_cfa_expression (rtx set)
1278 rtx src, dest, span;
1279 dw_cfi_ref cfi = new_cfi ();
1280 unsigned regno;
1282 dest = SET_DEST (set);
1283 src = SET_SRC (set);
1285 gcc_assert (REG_P (src));
1286 gcc_assert (MEM_P (dest));
1288 span = targetm.dwarf_register_span (src);
1289 gcc_assert (!span);
1291 regno = dwf_regno (src);
1293 cfi->dw_cfi_opc = DW_CFA_expression;
1294 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1295 cfi->dw_cfi_oprnd2.dw_cfi_loc
1296 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1297 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1299 /* ??? We'd like to use queue_reg_save, were the interface different,
1300 and, as above, we could manage flushing for epilogues. */
1301 add_cfi (cfi);
1302 update_row_reg_save (cur_row, regno, cfi);
1305 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1306 note. */
1308 static void
1309 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1311 rtx dest = SET_DEST (set);
1312 gcc_assert (REG_P (dest));
1314 rtx span = targetm.dwarf_register_span (dest);
1315 gcc_assert (!span);
1317 rtx src = SET_SRC (set);
1318 dw_cfi_ref cfi = new_cfi ();
1319 cfi->dw_cfi_opc = DW_CFA_val_expression;
1320 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1321 cfi->dw_cfi_oprnd2.dw_cfi_loc
1322 = mem_loc_descriptor (src, GET_MODE (src),
1323 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1324 add_cfi (cfi);
1325 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1328 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1330 static void
1331 dwarf2out_frame_debug_cfa_restore (rtx reg)
1333 gcc_assert (REG_P (reg));
1335 rtx span = targetm.dwarf_register_span (reg);
1336 if (!span)
1338 unsigned int regno = dwf_regno (reg);
1339 add_cfi_restore (regno);
1340 update_row_reg_save (cur_row, regno, NULL);
1342 else
1344 /* We have a PARALLEL describing where the contents of REG live.
1345 Restore the register for each piece of the PARALLEL. */
1346 gcc_assert (GET_CODE (span) == PARALLEL);
1348 const int par_len = XVECLEN (span, 0);
1349 for (int par_index = 0; par_index < par_len; par_index++)
1351 reg = XVECEXP (span, 0, par_index);
1352 gcc_assert (REG_P (reg));
1353 unsigned int regno = dwf_regno (reg);
1354 add_cfi_restore (regno);
1355 update_row_reg_save (cur_row, regno, NULL);
1360 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1361 ??? Perhaps we should note in the CIE where windows are saved (instead of
1362 assuming 0(cfa)) and what registers are in the window. */
1364 static void
1365 dwarf2out_frame_debug_cfa_window_save (void)
1367 dw_cfi_ref cfi = new_cfi ();
1369 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1370 add_cfi (cfi);
1373 /* Record call frame debugging information for an expression EXPR,
1374 which either sets SP or FP (adjusting how we calculate the frame
1375 address) or saves a register to the stack or another register.
1376 LABEL indicates the address of EXPR.
1378 This function encodes a state machine mapping rtxes to actions on
1379 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1380 users need not read the source code.
1382 The High-Level Picture
1384 Changes in the register we use to calculate the CFA: Currently we
1385 assume that if you copy the CFA register into another register, we
1386 should take the other one as the new CFA register; this seems to
1387 work pretty well. If it's wrong for some target, it's simple
1388 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1390 Changes in the register we use for saving registers to the stack:
1391 This is usually SP, but not always. Again, we deduce that if you
1392 copy SP into another register (and SP is not the CFA register),
1393 then the new register is the one we will be using for register
1394 saves. This also seems to work.
1396 Register saves: There's not much guesswork about this one; if
1397 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1398 register save, and the register used to calculate the destination
1399 had better be the one we think we're using for this purpose.
1400 It's also assumed that a copy from a call-saved register to another
1401 register is saving that register if RTX_FRAME_RELATED_P is set on
1402 that instruction. If the copy is from a call-saved register to
1403 the *same* register, that means that the register is now the same
1404 value as in the caller.
1406 Except: If the register being saved is the CFA register, and the
1407 offset is nonzero, we are saving the CFA, so we assume we have to
1408 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1409 the intent is to save the value of SP from the previous frame.
1411 In addition, if a register has previously been saved to a different
1412 register,
1414 Invariants / Summaries of Rules
1416 cfa current rule for calculating the CFA. It usually
1417 consists of a register and an offset. This is
1418 actually stored in *cur_cfa, but abbreviated
1419 for the purposes of this documentation.
1420 cfa_store register used by prologue code to save things to the stack
1421 cfa_store.offset is the offset from the value of
1422 cfa_store.reg to the actual CFA
1423 cfa_temp register holding an integral value. cfa_temp.offset
1424 stores the value, which will be used to adjust the
1425 stack pointer. cfa_temp is also used like cfa_store,
1426 to track stores to the stack via fp or a temp reg.
1428 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1429 with cfa.reg as the first operand changes the cfa.reg and its
1430 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1431 cfa_temp.offset.
1433 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1434 expression yielding a constant. This sets cfa_temp.reg
1435 and cfa_temp.offset.
1437 Rule 5: Create a new register cfa_store used to save items to the
1438 stack.
1440 Rules 10-14: Save a register to the stack. Define offset as the
1441 difference of the original location and cfa_store's
1442 location (or cfa_temp's location if cfa_temp is used).
1444 Rules 16-20: If AND operation happens on sp in prologue, we assume
1445 stack is realigned. We will use a group of DW_OP_XXX
1446 expressions to represent the location of the stored
1447 register instead of CFA+offset.
1449 The Rules
1451 "{a,b}" indicates a choice of a xor b.
1452 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1454 Rule 1:
1455 (set <reg1> <reg2>:cfa.reg)
1456 effects: cfa.reg = <reg1>
1457 cfa.offset unchanged
1458 cfa_temp.reg = <reg1>
1459 cfa_temp.offset = cfa.offset
1461 Rule 2:
1462 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1463 {<const_int>,<reg>:cfa_temp.reg}))
1464 effects: cfa.reg = sp if fp used
1465 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1466 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1467 if cfa_store.reg==sp
1469 Rule 3:
1470 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1471 effects: cfa.reg = fp
1472 cfa_offset += +/- <const_int>
1474 Rule 4:
1475 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1476 constraints: <reg1> != fp
1477 <reg1> != sp
1478 effects: cfa.reg = <reg1>
1479 cfa_temp.reg = <reg1>
1480 cfa_temp.offset = cfa.offset
1482 Rule 5:
1483 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1484 constraints: <reg1> != fp
1485 <reg1> != sp
1486 effects: cfa_store.reg = <reg1>
1487 cfa_store.offset = cfa.offset - cfa_temp.offset
1489 Rule 6:
1490 (set <reg> <const_int>)
1491 effects: cfa_temp.reg = <reg>
1492 cfa_temp.offset = <const_int>
1494 Rule 7:
1495 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1496 effects: cfa_temp.reg = <reg1>
1497 cfa_temp.offset |= <const_int>
1499 Rule 8:
1500 (set <reg> (high <exp>))
1501 effects: none
1503 Rule 9:
1504 (set <reg> (lo_sum <exp> <const_int>))
1505 effects: cfa_temp.reg = <reg>
1506 cfa_temp.offset = <const_int>
1508 Rule 10:
1509 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1510 effects: cfa_store.offset -= <const_int>
1511 cfa.offset = cfa_store.offset if cfa.reg == sp
1512 cfa.reg = sp
1513 cfa.base_offset = -cfa_store.offset
1515 Rule 11:
1516 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1517 effects: cfa_store.offset += -/+ mode_size(mem)
1518 cfa.offset = cfa_store.offset if cfa.reg == sp
1519 cfa.reg = sp
1520 cfa.base_offset = -cfa_store.offset
1522 Rule 12:
1523 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1525 <reg2>)
1526 effects: cfa.reg = <reg1>
1527 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1529 Rule 13:
1530 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1531 effects: cfa.reg = <reg1>
1532 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1534 Rule 14:
1535 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1536 effects: cfa.reg = <reg1>
1537 cfa.base_offset = -cfa_temp.offset
1538 cfa_temp.offset -= mode_size(mem)
1540 Rule 15:
1541 (set <reg> {unspec, unspec_volatile})
1542 effects: target-dependent
1544 Rule 16:
1545 (set sp (and: sp <const_int>))
1546 constraints: cfa_store.reg == sp
1547 effects: cfun->fde.stack_realign = 1
1548 cfa_store.offset = 0
1549 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1551 Rule 17:
1552 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1553 effects: cfa_store.offset += -/+ mode_size(mem)
1555 Rule 18:
1556 (set (mem ({pre_inc, pre_dec} sp)) fp)
1557 constraints: fde->stack_realign == 1
1558 effects: cfa_store.offset = 0
1559 cfa.reg != HARD_FRAME_POINTER_REGNUM
1561 Rule 19:
1562 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1563 constraints: fde->stack_realign == 1
1564 && cfa.offset == 0
1565 && cfa.indirect == 0
1566 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1567 effects: Use DW_CFA_def_cfa_expression to define cfa
1568 cfa.reg == fde->drap_reg */
1570 static void
1571 dwarf2out_frame_debug_expr (rtx expr)
1573 rtx src, dest, span;
1574 poly_int64 offset;
1575 dw_fde_ref fde;
1577 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1578 the PARALLEL independently. The first element is always processed if
1579 it is a SET. This is for backward compatibility. Other elements
1580 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1581 flag is set in them. */
1582 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1584 int par_index;
1585 int limit = XVECLEN (expr, 0);
1586 rtx elem;
1588 /* PARALLELs have strict read-modify-write semantics, so we
1589 ought to evaluate every rvalue before changing any lvalue.
1590 It's cumbersome to do that in general, but there's an
1591 easy approximation that is enough for all current users:
1592 handle register saves before register assignments. */
1593 if (GET_CODE (expr) == PARALLEL)
1594 for (par_index = 0; par_index < limit; par_index++)
1596 elem = XVECEXP (expr, 0, par_index);
1597 if (GET_CODE (elem) == SET
1598 && MEM_P (SET_DEST (elem))
1599 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1600 dwarf2out_frame_debug_expr (elem);
1603 for (par_index = 0; par_index < limit; par_index++)
1605 elem = XVECEXP (expr, 0, par_index);
1606 if (GET_CODE (elem) == SET
1607 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1608 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1609 dwarf2out_frame_debug_expr (elem);
1611 return;
1614 gcc_assert (GET_CODE (expr) == SET);
1616 src = SET_SRC (expr);
1617 dest = SET_DEST (expr);
1619 if (REG_P (src))
1621 rtx rsi = reg_saved_in (src);
1622 if (rsi)
1623 src = rsi;
1626 fde = cfun->fde;
1628 switch (GET_CODE (dest))
1630 case REG:
1631 switch (GET_CODE (src))
1633 /* Setting FP from SP. */
1634 case REG:
1635 if (cur_cfa->reg == dwf_regno (src))
1637 /* Rule 1 */
1638 /* Update the CFA rule wrt SP or FP. Make sure src is
1639 relative to the current CFA register.
1641 We used to require that dest be either SP or FP, but the
1642 ARM copies SP to a temporary register, and from there to
1643 FP. So we just rely on the backends to only set
1644 RTX_FRAME_RELATED_P on appropriate insns. */
1645 cur_cfa->reg = dwf_regno (dest);
1646 cur_trace->cfa_temp.reg = cur_cfa->reg;
1647 cur_trace->cfa_temp.offset = cur_cfa->offset;
1649 else
1651 /* Saving a register in a register. */
1652 gcc_assert (!fixed_regs [REGNO (dest)]
1653 /* For the SPARC and its register window. */
1654 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1656 /* After stack is aligned, we can only save SP in FP
1657 if drap register is used. In this case, we have
1658 to restore stack pointer with the CFA value and we
1659 don't generate this DWARF information. */
1660 if (fde
1661 && fde->stack_realign
1662 && REGNO (src) == STACK_POINTER_REGNUM)
1663 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1664 && fde->drap_reg != INVALID_REGNUM
1665 && cur_cfa->reg != dwf_regno (src));
1666 else
1667 queue_reg_save (src, dest, 0);
1669 break;
1671 case PLUS:
1672 case MINUS:
1673 case LO_SUM:
1674 if (dest == stack_pointer_rtx)
1676 /* Rule 2 */
1677 /* Adjusting SP. */
1678 if (REG_P (XEXP (src, 1)))
1680 gcc_assert (dwf_regno (XEXP (src, 1))
1681 == cur_trace->cfa_temp.reg);
1682 offset = cur_trace->cfa_temp.offset;
1684 else if (!poly_int_rtx_p (XEXP (src, 1), &offset))
1685 gcc_unreachable ();
1687 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1689 /* Restoring SP from FP in the epilogue. */
1690 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1691 cur_cfa->reg = dw_stack_pointer_regnum;
1693 else if (GET_CODE (src) == LO_SUM)
1694 /* Assume we've set the source reg of the LO_SUM from sp. */
1696 else
1697 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1699 if (GET_CODE (src) != MINUS)
1700 offset = -offset;
1701 if (cur_cfa->reg == dw_stack_pointer_regnum)
1702 cur_cfa->offset += offset;
1703 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1704 cur_trace->cfa_store.offset += offset;
1706 else if (dest == hard_frame_pointer_rtx)
1708 /* Rule 3 */
1709 /* Either setting the FP from an offset of the SP,
1710 or adjusting the FP */
1711 gcc_assert (frame_pointer_needed);
1713 gcc_assert (REG_P (XEXP (src, 0))
1714 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1715 offset = rtx_to_poly_int64 (XEXP (src, 1));
1716 if (GET_CODE (src) != MINUS)
1717 offset = -offset;
1718 cur_cfa->offset += offset;
1719 cur_cfa->reg = dw_frame_pointer_regnum;
1721 else
1723 gcc_assert (GET_CODE (src) != MINUS);
1725 /* Rule 4 */
1726 if (REG_P (XEXP (src, 0))
1727 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1728 && poly_int_rtx_p (XEXP (src, 1), &offset))
1730 /* Setting a temporary CFA register that will be copied
1731 into the FP later on. */
1732 offset = -offset;
1733 cur_cfa->offset += offset;
1734 cur_cfa->reg = dwf_regno (dest);
1735 /* Or used to save regs to the stack. */
1736 cur_trace->cfa_temp.reg = cur_cfa->reg;
1737 cur_trace->cfa_temp.offset = cur_cfa->offset;
1740 /* Rule 5 */
1741 else if (REG_P (XEXP (src, 0))
1742 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1743 && XEXP (src, 1) == stack_pointer_rtx)
1745 /* Setting a scratch register that we will use instead
1746 of SP for saving registers to the stack. */
1747 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1748 cur_trace->cfa_store.reg = dwf_regno (dest);
1749 cur_trace->cfa_store.offset
1750 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1753 /* Rule 9 */
1754 else if (GET_CODE (src) == LO_SUM
1755 && poly_int_rtx_p (XEXP (src, 1),
1756 &cur_trace->cfa_temp.offset))
1757 cur_trace->cfa_temp.reg = dwf_regno (dest);
1758 else
1759 gcc_unreachable ();
1761 break;
1763 /* Rule 6 */
1764 case CONST_INT:
1765 case POLY_INT_CST:
1766 cur_trace->cfa_temp.reg = dwf_regno (dest);
1767 cur_trace->cfa_temp.offset = rtx_to_poly_int64 (src);
1768 break;
1770 /* Rule 7 */
1771 case IOR:
1772 gcc_assert (REG_P (XEXP (src, 0))
1773 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1774 && CONST_INT_P (XEXP (src, 1)));
1776 cur_trace->cfa_temp.reg = dwf_regno (dest);
1777 if (!can_ior_p (cur_trace->cfa_temp.offset, INTVAL (XEXP (src, 1)),
1778 &cur_trace->cfa_temp.offset))
1779 /* The target shouldn't generate this kind of CFI note if we
1780 can't represent it. */
1781 gcc_unreachable ();
1782 break;
1784 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1785 which will fill in all of the bits. */
1786 /* Rule 8 */
1787 case HIGH:
1788 break;
1790 /* Rule 15 */
1791 case UNSPEC:
1792 case UNSPEC_VOLATILE:
1793 /* All unspecs should be represented by REG_CFA_* notes. */
1794 gcc_unreachable ();
1795 return;
1797 /* Rule 16 */
1798 case AND:
1799 /* If this AND operation happens on stack pointer in prologue,
1800 we assume the stack is realigned and we extract the
1801 alignment. */
1802 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1804 /* We interpret reg_save differently with stack_realign set.
1805 Thus we must flush whatever we have queued first. */
1806 dwarf2out_flush_queued_reg_saves ();
1808 gcc_assert (cur_trace->cfa_store.reg
1809 == dwf_regno (XEXP (src, 0)));
1810 fde->stack_realign = 1;
1811 fde->stack_realignment = INTVAL (XEXP (src, 1));
1812 cur_trace->cfa_store.offset = 0;
1814 if (cur_cfa->reg != dw_stack_pointer_regnum
1815 && cur_cfa->reg != dw_frame_pointer_regnum)
1816 fde->drap_reg = cur_cfa->reg;
1818 return;
1820 default:
1821 gcc_unreachable ();
1823 break;
1825 case MEM:
1827 /* Saving a register to the stack. Make sure dest is relative to the
1828 CFA register. */
1829 switch (GET_CODE (XEXP (dest, 0)))
1831 /* Rule 10 */
1832 /* With a push. */
1833 case PRE_MODIFY:
1834 case POST_MODIFY:
1835 /* We can't handle variable size modifications. */
1836 offset = -rtx_to_poly_int64 (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1838 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1839 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1841 cur_trace->cfa_store.offset += offset;
1842 if (cur_cfa->reg == dw_stack_pointer_regnum)
1843 cur_cfa->offset = cur_trace->cfa_store.offset;
1845 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1846 offset -= cur_trace->cfa_store.offset;
1847 else
1848 offset = -cur_trace->cfa_store.offset;
1849 break;
1851 /* Rule 11 */
1852 case PRE_INC:
1853 case PRE_DEC:
1854 case POST_DEC:
1855 offset = GET_MODE_SIZE (GET_MODE (dest));
1856 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1857 offset = -offset;
1859 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1860 == STACK_POINTER_REGNUM)
1861 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1863 cur_trace->cfa_store.offset += offset;
1865 /* Rule 18: If stack is aligned, we will use FP as a
1866 reference to represent the address of the stored
1867 regiser. */
1868 if (fde
1869 && fde->stack_realign
1870 && REG_P (src)
1871 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1873 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1874 cur_trace->cfa_store.offset = 0;
1877 if (cur_cfa->reg == dw_stack_pointer_regnum)
1878 cur_cfa->offset = cur_trace->cfa_store.offset;
1880 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1881 offset += -cur_trace->cfa_store.offset;
1882 else
1883 offset = -cur_trace->cfa_store.offset;
1884 break;
1886 /* Rule 12 */
1887 /* With an offset. */
1888 case PLUS:
1889 case MINUS:
1890 case LO_SUM:
1892 unsigned int regno;
1894 gcc_assert (REG_P (XEXP (XEXP (dest, 0), 0)));
1895 offset = rtx_to_poly_int64 (XEXP (XEXP (dest, 0), 1));
1896 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1897 offset = -offset;
1899 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1901 if (cur_cfa->reg == regno)
1902 offset -= cur_cfa->offset;
1903 else if (cur_trace->cfa_store.reg == regno)
1904 offset -= cur_trace->cfa_store.offset;
1905 else
1907 gcc_assert (cur_trace->cfa_temp.reg == regno);
1908 offset -= cur_trace->cfa_temp.offset;
1911 break;
1913 /* Rule 13 */
1914 /* Without an offset. */
1915 case REG:
1917 unsigned int regno = dwf_regno (XEXP (dest, 0));
1919 if (cur_cfa->reg == regno)
1920 offset = -cur_cfa->offset;
1921 else if (cur_trace->cfa_store.reg == regno)
1922 offset = -cur_trace->cfa_store.offset;
1923 else
1925 gcc_assert (cur_trace->cfa_temp.reg == regno);
1926 offset = -cur_trace->cfa_temp.offset;
1929 break;
1931 /* Rule 14 */
1932 case POST_INC:
1933 gcc_assert (cur_trace->cfa_temp.reg
1934 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1935 offset = -cur_trace->cfa_temp.offset;
1936 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1937 break;
1939 default:
1940 gcc_unreachable ();
1943 /* Rule 17 */
1944 /* If the source operand of this MEM operation is a memory,
1945 we only care how much stack grew. */
1946 if (MEM_P (src))
1947 break;
1949 if (REG_P (src)
1950 && REGNO (src) != STACK_POINTER_REGNUM
1951 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1952 && dwf_regno (src) == cur_cfa->reg)
1954 /* We're storing the current CFA reg into the stack. */
1956 if (known_eq (cur_cfa->offset, 0))
1958 /* Rule 19 */
1959 /* If stack is aligned, putting CFA reg into stack means
1960 we can no longer use reg + offset to represent CFA.
1961 Here we use DW_CFA_def_cfa_expression instead. The
1962 result of this expression equals to the original CFA
1963 value. */
1964 if (fde
1965 && fde->stack_realign
1966 && cur_cfa->indirect == 0
1967 && cur_cfa->reg != dw_frame_pointer_regnum)
1969 gcc_assert (fde->drap_reg == cur_cfa->reg);
1971 cur_cfa->indirect = 1;
1972 cur_cfa->reg = dw_frame_pointer_regnum;
1973 cur_cfa->base_offset = offset;
1974 cur_cfa->offset = 0;
1976 fde->drap_reg_saved = 1;
1977 break;
1980 /* If the source register is exactly the CFA, assume
1981 we're saving SP like any other register; this happens
1982 on the ARM. */
1983 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1984 break;
1986 else
1988 /* Otherwise, we'll need to look in the stack to
1989 calculate the CFA. */
1990 rtx x = XEXP (dest, 0);
1992 if (!REG_P (x))
1993 x = XEXP (x, 0);
1994 gcc_assert (REG_P (x));
1996 cur_cfa->reg = dwf_regno (x);
1997 cur_cfa->base_offset = offset;
1998 cur_cfa->indirect = 1;
1999 break;
2003 if (REG_P (src))
2004 span = targetm.dwarf_register_span (src);
2005 else
2006 span = NULL;
2008 if (!span)
2009 queue_reg_save (src, NULL_RTX, offset);
2010 else
2012 /* We have a PARALLEL describing where the contents of SRC live.
2013 Queue register saves for each piece of the PARALLEL. */
2014 poly_int64 span_offset = offset;
2016 gcc_assert (GET_CODE (span) == PARALLEL);
2018 const int par_len = XVECLEN (span, 0);
2019 for (int par_index = 0; par_index < par_len; par_index++)
2021 rtx elem = XVECEXP (span, 0, par_index);
2022 queue_reg_save (elem, NULL_RTX, span_offset);
2023 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2026 break;
2028 default:
2029 gcc_unreachable ();
2033 /* Record call frame debugging information for INSN, which either sets
2034 SP or FP (adjusting how we calculate the frame address) or saves a
2035 register to the stack. */
2037 static void
2038 dwarf2out_frame_debug (rtx_insn *insn)
2040 rtx note, n, pat;
2041 bool handled_one = false;
2043 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2044 switch (REG_NOTE_KIND (note))
2046 case REG_FRAME_RELATED_EXPR:
2047 pat = XEXP (note, 0);
2048 goto do_frame_expr;
2050 case REG_CFA_DEF_CFA:
2051 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2052 handled_one = true;
2053 break;
2055 case REG_CFA_ADJUST_CFA:
2056 n = XEXP (note, 0);
2057 if (n == NULL)
2059 n = PATTERN (insn);
2060 if (GET_CODE (n) == PARALLEL)
2061 n = XVECEXP (n, 0, 0);
2063 dwarf2out_frame_debug_adjust_cfa (n);
2064 handled_one = true;
2065 break;
2067 case REG_CFA_OFFSET:
2068 n = XEXP (note, 0);
2069 if (n == NULL)
2070 n = single_set (insn);
2071 dwarf2out_frame_debug_cfa_offset (n);
2072 handled_one = true;
2073 break;
2075 case REG_CFA_REGISTER:
2076 n = XEXP (note, 0);
2077 if (n == NULL)
2079 n = PATTERN (insn);
2080 if (GET_CODE (n) == PARALLEL)
2081 n = XVECEXP (n, 0, 0);
2083 dwarf2out_frame_debug_cfa_register (n);
2084 handled_one = true;
2085 break;
2087 case REG_CFA_EXPRESSION:
2088 case REG_CFA_VAL_EXPRESSION:
2089 n = XEXP (note, 0);
2090 if (n == NULL)
2091 n = single_set (insn);
2093 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2094 dwarf2out_frame_debug_cfa_expression (n);
2095 else
2096 dwarf2out_frame_debug_cfa_val_expression (n);
2098 handled_one = true;
2099 break;
2101 case REG_CFA_RESTORE:
2102 n = XEXP (note, 0);
2103 if (n == NULL)
2105 n = PATTERN (insn);
2106 if (GET_CODE (n) == PARALLEL)
2107 n = XVECEXP (n, 0, 0);
2108 n = XEXP (n, 0);
2110 dwarf2out_frame_debug_cfa_restore (n);
2111 handled_one = true;
2112 break;
2114 case REG_CFA_SET_VDRAP:
2115 n = XEXP (note, 0);
2116 if (REG_P (n))
2118 dw_fde_ref fde = cfun->fde;
2119 if (fde)
2121 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2122 if (REG_P (n))
2123 fde->vdrap_reg = dwf_regno (n);
2126 handled_one = true;
2127 break;
2129 case REG_CFA_TOGGLE_RA_MANGLE:
2130 case REG_CFA_WINDOW_SAVE:
2131 /* We overload both of these operations onto the same DWARF opcode. */
2132 dwarf2out_frame_debug_cfa_window_save ();
2133 handled_one = true;
2134 break;
2136 case REG_CFA_FLUSH_QUEUE:
2137 /* The actual flush happens elsewhere. */
2138 handled_one = true;
2139 break;
2141 default:
2142 break;
2145 if (!handled_one)
2147 pat = PATTERN (insn);
2148 do_frame_expr:
2149 dwarf2out_frame_debug_expr (pat);
2151 /* Check again. A parallel can save and update the same register.
2152 We could probably check just once, here, but this is safer than
2153 removing the check at the start of the function. */
2154 if (clobbers_queued_reg_save (pat))
2155 dwarf2out_flush_queued_reg_saves ();
2159 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2161 static void
2162 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2164 size_t i, n_old, n_new, n_max;
2165 dw_cfi_ref cfi;
2167 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2168 add_cfi (new_row->cfa_cfi);
2169 else
2171 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2172 if (cfi)
2173 add_cfi (cfi);
2176 n_old = vec_safe_length (old_row->reg_save);
2177 n_new = vec_safe_length (new_row->reg_save);
2178 n_max = MAX (n_old, n_new);
2180 for (i = 0; i < n_max; ++i)
2182 dw_cfi_ref r_old = NULL, r_new = NULL;
2184 if (i < n_old)
2185 r_old = (*old_row->reg_save)[i];
2186 if (i < n_new)
2187 r_new = (*new_row->reg_save)[i];
2189 if (r_old == r_new)
2191 else if (r_new == NULL)
2192 add_cfi_restore (i);
2193 else if (!cfi_equal_p (r_old, r_new))
2194 add_cfi (r_new);
2198 /* Examine CFI and return true if a cfi label and set_loc is needed
2199 beforehand. Even when generating CFI assembler instructions, we
2200 still have to add the cfi to the list so that lookup_cfa_1 works
2201 later on. When -g2 and above we even need to force emitting of
2202 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2203 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2204 and so don't use convert_cfa_to_fb_loc_list. */
2206 static bool
2207 cfi_label_required_p (dw_cfi_ref cfi)
2209 if (!dwarf2out_do_cfi_asm ())
2210 return true;
2212 if (dwarf_version == 2
2213 && debug_info_level > DINFO_LEVEL_TERSE
2214 && (write_symbols == DWARF2_DEBUG
2215 || write_symbols == VMS_AND_DWARF2_DEBUG))
2217 switch (cfi->dw_cfi_opc)
2219 case DW_CFA_def_cfa_offset:
2220 case DW_CFA_def_cfa_offset_sf:
2221 case DW_CFA_def_cfa_register:
2222 case DW_CFA_def_cfa:
2223 case DW_CFA_def_cfa_sf:
2224 case DW_CFA_def_cfa_expression:
2225 case DW_CFA_restore_state:
2226 return true;
2227 default:
2228 return false;
2231 return false;
2234 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2235 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2236 necessary. */
2237 static void
2238 add_cfis_to_fde (void)
2240 dw_fde_ref fde = cfun->fde;
2241 rtx_insn *insn, *next;
2243 for (insn = get_insns (); insn; insn = next)
2245 next = NEXT_INSN (insn);
2247 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2248 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2250 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2252 bool required = cfi_label_required_p (NOTE_CFI (insn));
2253 while (next)
2254 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2256 required |= cfi_label_required_p (NOTE_CFI (next));
2257 next = NEXT_INSN (next);
2259 else if (active_insn_p (next)
2260 || (NOTE_P (next) && (NOTE_KIND (next)
2261 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2262 break;
2263 else
2264 next = NEXT_INSN (next);
2265 if (required)
2267 int num = dwarf2out_cfi_label_num;
2268 const char *label = dwarf2out_cfi_label ();
2269 dw_cfi_ref xcfi;
2271 /* Set the location counter to the new label. */
2272 xcfi = new_cfi ();
2273 xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
2274 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2275 vec_safe_push (fde->dw_fde_cfi, xcfi);
2277 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2278 NOTE_LABEL_NUMBER (tmp) = num;
2283 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2284 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2285 insn = NEXT_INSN (insn);
2287 while (insn != next);
2292 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2294 /* If LABEL is the start of a trace, then initialize the state of that
2295 trace from CUR_TRACE and CUR_ROW. */
2297 static void
2298 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2300 dw_trace_info *ti;
2302 ti = get_trace_info (start);
2303 gcc_assert (ti != NULL);
2305 if (dump_file)
2307 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2308 cur_trace->id, ti->id,
2309 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2310 (origin ? INSN_UID (origin) : 0));
2313 poly_int64 args_size = cur_trace->end_true_args_size;
2314 if (ti->beg_row == NULL)
2316 /* This is the first time we've encountered this trace. Propagate
2317 state across the edge and push the trace onto the work list. */
2318 ti->beg_row = copy_cfi_row (cur_row);
2319 ti->beg_true_args_size = args_size;
2321 ti->cfa_store = cur_trace->cfa_store;
2322 ti->cfa_temp = cur_trace->cfa_temp;
2323 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2325 trace_work_list.safe_push (ti);
2327 if (dump_file)
2328 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2330 else
2333 /* We ought to have the same state incoming to a given trace no
2334 matter how we arrive at the trace. Anything else means we've
2335 got some kind of optimization error. */
2336 #if CHECKING_P
2337 if (!cfi_row_equal_p (cur_row, ti->beg_row))
2339 if (dump_file)
2341 fprintf (dump_file, "Inconsistent CFI state!\n");
2342 fprintf (dump_file, "SHOULD have:\n");
2343 dump_cfi_row (dump_file, ti->beg_row);
2344 fprintf (dump_file, "DO have:\n");
2345 dump_cfi_row (dump_file, cur_row);
2348 gcc_unreachable ();
2350 #endif
2352 /* The args_size is allowed to conflict if it isn't actually used. */
2353 if (maybe_ne (ti->beg_true_args_size, args_size))
2354 ti->args_size_undefined = true;
2358 /* Similarly, but handle the args_size and CFA reset across EH
2359 and non-local goto edges. */
2361 static void
2362 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2364 poly_int64 save_args_size, delta;
2365 dw_cfa_location save_cfa;
2367 save_args_size = cur_trace->end_true_args_size;
2368 if (known_eq (save_args_size, 0))
2370 maybe_record_trace_start (start, origin);
2371 return;
2374 delta = -save_args_size;
2375 cur_trace->end_true_args_size = 0;
2377 save_cfa = cur_row->cfa;
2378 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2380 /* Convert a change in args_size (always a positive in the
2381 direction of stack growth) to a change in stack pointer. */
2382 if (!STACK_GROWS_DOWNWARD)
2383 delta = -delta;
2385 cur_row->cfa.offset += delta;
2388 maybe_record_trace_start (start, origin);
2390 cur_trace->end_true_args_size = save_args_size;
2391 cur_row->cfa = save_cfa;
2394 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2395 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2397 static void
2398 create_trace_edges (rtx_insn *insn)
2400 rtx tmp;
2401 int i, n;
2403 if (JUMP_P (insn))
2405 rtx_jump_table_data *table;
2407 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2408 return;
2410 if (tablejump_p (insn, NULL, &table))
2412 rtvec vec = table->get_labels ();
2414 n = GET_NUM_ELEM (vec);
2415 for (i = 0; i < n; ++i)
2417 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2418 maybe_record_trace_start (lab, insn);
2421 else if (computed_jump_p (insn))
2423 rtx_insn *temp;
2424 unsigned int i;
2425 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2426 maybe_record_trace_start (temp, insn);
2428 else if (returnjump_p (insn))
2430 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2432 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2433 for (i = 0; i < n; ++i)
2435 rtx_insn *lab =
2436 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2437 maybe_record_trace_start (lab, insn);
2440 else
2442 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2443 gcc_assert (lab != NULL);
2444 maybe_record_trace_start (lab, insn);
2447 else if (CALL_P (insn))
2449 /* Sibling calls don't have edges inside this function. */
2450 if (SIBLING_CALL_P (insn))
2451 return;
2453 /* Process non-local goto edges. */
2454 if (can_nonlocal_goto (insn))
2455 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2456 lab;
2457 lab = lab->next ())
2458 maybe_record_trace_start_abnormal (lab->insn (), insn);
2460 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2462 int i, n = seq->len ();
2463 for (i = 0; i < n; ++i)
2464 create_trace_edges (seq->insn (i));
2465 return;
2468 /* Process EH edges. */
2469 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2471 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2472 if (lp)
2473 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2477 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2479 static void
2480 scan_insn_after (rtx_insn *insn)
2482 if (RTX_FRAME_RELATED_P (insn))
2483 dwarf2out_frame_debug (insn);
2484 notice_args_size (insn);
2487 /* Scan the trace beginning at INSN and create the CFI notes for the
2488 instructions therein. */
2490 static void
2491 scan_trace (dw_trace_info *trace, bool entry)
2493 rtx_insn *prev, *insn = trace->head;
2494 dw_cfa_location this_cfa;
2496 if (dump_file)
2497 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2498 trace->id, rtx_name[(int) GET_CODE (insn)],
2499 INSN_UID (insn));
2501 trace->end_row = copy_cfi_row (trace->beg_row);
2502 trace->end_true_args_size = trace->beg_true_args_size;
2504 cur_trace = trace;
2505 cur_row = trace->end_row;
2507 this_cfa = cur_row->cfa;
2508 cur_cfa = &this_cfa;
2510 /* If the current function starts with a non-standard incoming frame
2511 sp offset, emit a note before the first instruction. */
2512 if (entry
2513 && DEFAULT_INCOMING_FRAME_SP_OFFSET != INCOMING_FRAME_SP_OFFSET)
2515 add_cfi_insn = insn;
2516 gcc_assert (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED);
2517 this_cfa.offset = INCOMING_FRAME_SP_OFFSET;
2518 def_cfa_1 (&this_cfa);
2521 for (prev = insn, insn = NEXT_INSN (insn);
2522 insn;
2523 prev = insn, insn = NEXT_INSN (insn))
2525 rtx_insn *control;
2527 /* Do everything that happens "before" the insn. */
2528 add_cfi_insn = prev;
2530 /* Notice the end of a trace. */
2531 if (BARRIER_P (insn))
2533 /* Don't bother saving the unneeded queued registers at all. */
2534 queued_reg_saves.truncate (0);
2535 break;
2537 if (save_point_p (insn))
2539 /* Propagate across fallthru edges. */
2540 dwarf2out_flush_queued_reg_saves ();
2541 maybe_record_trace_start (insn, NULL);
2542 break;
2545 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2546 continue;
2548 /* Handle all changes to the row state. Sequences require special
2549 handling for the positioning of the notes. */
2550 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2552 rtx_insn *elt;
2553 int i, n = pat->len ();
2555 control = pat->insn (0);
2556 if (can_throw_internal (control))
2557 notice_eh_throw (control);
2558 dwarf2out_flush_queued_reg_saves ();
2560 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2562 /* ??? Hopefully multiple delay slots are not annulled. */
2563 gcc_assert (n == 2);
2564 gcc_assert (!RTX_FRAME_RELATED_P (control));
2565 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2567 elt = pat->insn (1);
2569 if (INSN_FROM_TARGET_P (elt))
2571 cfi_vec save_row_reg_save;
2573 /* If ELT is an instruction from target of an annulled
2574 branch, the effects are for the target only and so
2575 the args_size and CFA along the current path
2576 shouldn't change. */
2577 add_cfi_insn = NULL;
2578 poly_int64 restore_args_size = cur_trace->end_true_args_size;
2579 cur_cfa = &cur_row->cfa;
2580 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2582 scan_insn_after (elt);
2584 /* ??? Should we instead save the entire row state? */
2585 gcc_assert (!queued_reg_saves.length ());
2587 create_trace_edges (control);
2589 cur_trace->end_true_args_size = restore_args_size;
2590 cur_row->cfa = this_cfa;
2591 cur_row->reg_save = save_row_reg_save;
2592 cur_cfa = &this_cfa;
2594 else
2596 /* If ELT is a annulled branch-taken instruction (i.e.
2597 executed only when branch is not taken), the args_size
2598 and CFA should not change through the jump. */
2599 create_trace_edges (control);
2601 /* Update and continue with the trace. */
2602 add_cfi_insn = insn;
2603 scan_insn_after (elt);
2604 def_cfa_1 (&this_cfa);
2606 continue;
2609 /* The insns in the delay slot should all be considered to happen
2610 "before" a call insn. Consider a call with a stack pointer
2611 adjustment in the delay slot. The backtrace from the callee
2612 should include the sp adjustment. Unfortunately, that leaves
2613 us with an unavoidable unwinding error exactly at the call insn
2614 itself. For jump insns we'd prefer to avoid this error by
2615 placing the notes after the sequence. */
2616 if (JUMP_P (control))
2617 add_cfi_insn = insn;
2619 for (i = 1; i < n; ++i)
2621 elt = pat->insn (i);
2622 scan_insn_after (elt);
2625 /* Make sure any register saves are visible at the jump target. */
2626 dwarf2out_flush_queued_reg_saves ();
2627 any_cfis_emitted = false;
2629 /* However, if there is some adjustment on the call itself, e.g.
2630 a call_pop, that action should be considered to happen after
2631 the call returns. */
2632 add_cfi_insn = insn;
2633 scan_insn_after (control);
2635 else
2637 /* Flush data before calls and jumps, and of course if necessary. */
2638 if (can_throw_internal (insn))
2640 notice_eh_throw (insn);
2641 dwarf2out_flush_queued_reg_saves ();
2643 else if (!NONJUMP_INSN_P (insn)
2644 || clobbers_queued_reg_save (insn)
2645 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2646 dwarf2out_flush_queued_reg_saves ();
2647 any_cfis_emitted = false;
2649 add_cfi_insn = insn;
2650 scan_insn_after (insn);
2651 control = insn;
2654 /* Between frame-related-p and args_size we might have otherwise
2655 emitted two cfa adjustments. Do it now. */
2656 def_cfa_1 (&this_cfa);
2658 /* Minimize the number of advances by emitting the entire queue
2659 once anything is emitted. */
2660 if (any_cfis_emitted
2661 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2662 dwarf2out_flush_queued_reg_saves ();
2664 /* Note that a test for control_flow_insn_p does exactly the
2665 same tests as are done to actually create the edges. So
2666 always call the routine and let it not create edges for
2667 non-control-flow insns. */
2668 create_trace_edges (control);
2671 add_cfi_insn = NULL;
2672 cur_row = NULL;
2673 cur_trace = NULL;
2674 cur_cfa = NULL;
2677 /* Scan the function and create the initial set of CFI notes. */
2679 static void
2680 create_cfi_notes (void)
2682 dw_trace_info *ti;
2684 gcc_checking_assert (!queued_reg_saves.exists ());
2685 gcc_checking_assert (!trace_work_list.exists ());
2687 /* Always begin at the entry trace. */
2688 ti = &trace_info[0];
2689 scan_trace (ti, true);
2691 while (!trace_work_list.is_empty ())
2693 ti = trace_work_list.pop ();
2694 scan_trace (ti, false);
2697 queued_reg_saves.release ();
2698 trace_work_list.release ();
2701 /* Return the insn before the first NOTE_INSN_CFI after START. */
2703 static rtx_insn *
2704 before_next_cfi_note (rtx_insn *start)
2706 rtx_insn *prev = start;
2707 while (start)
2709 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2710 return prev;
2711 prev = start;
2712 start = NEXT_INSN (start);
2714 gcc_unreachable ();
2717 /* Insert CFI notes between traces to properly change state between them. */
2719 static void
2720 connect_traces (void)
2722 unsigned i, n = trace_info.length ();
2723 dw_trace_info *prev_ti, *ti;
2725 /* ??? Ideally, we should have both queued and processed every trace.
2726 However the current representation of constant pools on various targets
2727 is indistinguishable from unreachable code. Assume for the moment that
2728 we can simply skip over such traces. */
2729 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2730 these are not "real" instructions, and should not be considered.
2731 This could be generically useful for tablejump data as well. */
2732 /* Remove all unprocessed traces from the list. */
2733 for (i = n - 1; i > 0; --i)
2735 ti = &trace_info[i];
2736 if (ti->beg_row == NULL)
2738 trace_info.ordered_remove (i);
2739 n -= 1;
2741 else
2742 gcc_assert (ti->end_row != NULL);
2745 /* Work from the end back to the beginning. This lets us easily insert
2746 remember/restore_state notes in the correct order wrt other notes. */
2747 prev_ti = &trace_info[n - 1];
2748 for (i = n - 1; i > 0; --i)
2750 dw_cfi_row *old_row;
2752 ti = prev_ti;
2753 prev_ti = &trace_info[i - 1];
2755 add_cfi_insn = ti->head;
2757 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2758 for the portion of the function in the alternate text
2759 section. The row state at the very beginning of that
2760 new FDE will be exactly the row state from the CIE. */
2761 if (ti->switch_sections)
2762 old_row = cie_cfi_row;
2763 else
2765 old_row = prev_ti->end_row;
2766 /* If there's no change from the previous end state, fine. */
2767 if (cfi_row_equal_p (old_row, ti->beg_row))
2769 /* Otherwise check for the common case of sharing state with
2770 the beginning of an epilogue, but not the end. Insert
2771 remember/restore opcodes in that case. */
2772 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2774 dw_cfi_ref cfi;
2776 /* Note that if we blindly insert the remember at the
2777 start of the trace, we can wind up increasing the
2778 size of the unwind info due to extra advance opcodes.
2779 Instead, put the remember immediately before the next
2780 state change. We know there must be one, because the
2781 state at the beginning and head of the trace differ. */
2782 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2783 cfi = new_cfi ();
2784 cfi->dw_cfi_opc = DW_CFA_remember_state;
2785 add_cfi (cfi);
2787 add_cfi_insn = ti->head;
2788 cfi = new_cfi ();
2789 cfi->dw_cfi_opc = DW_CFA_restore_state;
2790 add_cfi (cfi);
2792 old_row = prev_ti->beg_row;
2794 /* Otherwise, we'll simply change state from the previous end. */
2797 change_cfi_row (old_row, ti->beg_row);
2799 if (dump_file && add_cfi_insn != ti->head)
2801 rtx_insn *note;
2803 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2804 prev_ti->id, ti->id);
2806 note = ti->head;
2809 note = NEXT_INSN (note);
2810 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2811 output_cfi_directive (dump_file, NOTE_CFI (note));
2813 while (note != add_cfi_insn);
2817 /* Connect args_size between traces that have can_throw_internal insns. */
2818 if (cfun->eh->lp_array)
2820 poly_int64 prev_args_size = 0;
2822 for (i = 0; i < n; ++i)
2824 ti = &trace_info[i];
2826 if (ti->switch_sections)
2827 prev_args_size = 0;
2828 if (ti->eh_head == NULL)
2829 continue;
2830 gcc_assert (!ti->args_size_undefined);
2832 if (maybe_ne (ti->beg_delay_args_size, prev_args_size))
2834 /* ??? Search back to previous CFI note. */
2835 add_cfi_insn = PREV_INSN (ti->eh_head);
2836 add_cfi_args_size (ti->beg_delay_args_size);
2839 prev_args_size = ti->end_delay_args_size;
2844 /* Set up the pseudo-cfg of instruction traces, as described at the
2845 block comment at the top of the file. */
2847 static void
2848 create_pseudo_cfg (void)
2850 bool saw_barrier, switch_sections;
2851 dw_trace_info ti;
2852 rtx_insn *insn;
2853 unsigned i;
2855 /* The first trace begins at the start of the function,
2856 and begins with the CIE row state. */
2857 trace_info.create (16);
2858 memset (&ti, 0, sizeof (ti));
2859 ti.head = get_insns ();
2860 ti.beg_row = cie_cfi_row;
2861 ti.cfa_store = cie_cfi_row->cfa;
2862 ti.cfa_temp.reg = INVALID_REGNUM;
2863 trace_info.quick_push (ti);
2865 if (cie_return_save)
2866 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2868 /* Walk all the insns, collecting start of trace locations. */
2869 saw_barrier = false;
2870 switch_sections = false;
2871 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2873 if (BARRIER_P (insn))
2874 saw_barrier = true;
2875 else if (NOTE_P (insn)
2876 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2878 /* We should have just seen a barrier. */
2879 gcc_assert (saw_barrier);
2880 switch_sections = true;
2882 /* Watch out for save_point notes between basic blocks.
2883 In particular, a note after a barrier. Do not record these,
2884 delaying trace creation until the label. */
2885 else if (save_point_p (insn)
2886 && (LABEL_P (insn) || !saw_barrier))
2888 memset (&ti, 0, sizeof (ti));
2889 ti.head = insn;
2890 ti.switch_sections = switch_sections;
2891 ti.id = trace_info.length ();
2892 trace_info.safe_push (ti);
2894 saw_barrier = false;
2895 switch_sections = false;
2899 /* Create the trace index after we've finished building trace_info,
2900 avoiding stale pointer problems due to reallocation. */
2901 trace_index
2902 = new hash_table<trace_info_hasher> (trace_info.length ());
2903 dw_trace_info *tp;
2904 FOR_EACH_VEC_ELT (trace_info, i, tp)
2906 dw_trace_info **slot;
2908 if (dump_file)
2909 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2910 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2911 tp->switch_sections ? " (section switch)" : "");
2913 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2914 gcc_assert (*slot == NULL);
2915 *slot = tp;
2919 /* Record the initial position of the return address. RTL is
2920 INCOMING_RETURN_ADDR_RTX. */
2922 static void
2923 initial_return_save (rtx rtl)
2925 unsigned int reg = INVALID_REGNUM;
2926 poly_int64 offset = 0;
2928 switch (GET_CODE (rtl))
2930 case REG:
2931 /* RA is in a register. */
2932 reg = dwf_regno (rtl);
2933 break;
2935 case MEM:
2936 /* RA is on the stack. */
2937 rtl = XEXP (rtl, 0);
2938 switch (GET_CODE (rtl))
2940 case REG:
2941 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2942 offset = 0;
2943 break;
2945 case PLUS:
2946 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2947 offset = rtx_to_poly_int64 (XEXP (rtl, 1));
2948 break;
2950 case MINUS:
2951 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2952 offset = -rtx_to_poly_int64 (XEXP (rtl, 1));
2953 break;
2955 default:
2956 gcc_unreachable ();
2959 break;
2961 case PLUS:
2962 /* The return address is at some offset from any value we can
2963 actually load. For instance, on the SPARC it is in %i7+8. Just
2964 ignore the offset for now; it doesn't matter for unwinding frames. */
2965 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2966 initial_return_save (XEXP (rtl, 0));
2967 return;
2969 default:
2970 gcc_unreachable ();
2973 if (reg != DWARF_FRAME_RETURN_COLUMN)
2975 if (reg != INVALID_REGNUM)
2976 record_reg_saved_in_reg (rtl, pc_rtx);
2977 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2981 static void
2982 create_cie_data (void)
2984 dw_cfa_location loc;
2985 dw_trace_info cie_trace;
2987 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2989 memset (&cie_trace, 0, sizeof (cie_trace));
2990 cur_trace = &cie_trace;
2992 add_cfi_vec = &cie_cfi_vec;
2993 cie_cfi_row = cur_row = new_cfi_row ();
2995 /* On entry, the Canonical Frame Address is at SP. */
2996 memset (&loc, 0, sizeof (loc));
2997 loc.reg = dw_stack_pointer_regnum;
2998 /* create_cie_data is called just once per TU, and when using .cfi_startproc
2999 is even done by the assembler rather than the compiler. If the target
3000 has different incoming frame sp offsets depending on what kind of
3001 function it is, use a single constant offset for the target and
3002 if needed, adjust before the first instruction in insn stream. */
3003 loc.offset = DEFAULT_INCOMING_FRAME_SP_OFFSET;
3004 def_cfa_1 (&loc);
3006 if (targetm.debug_unwind_info () == UI_DWARF2
3007 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3009 initial_return_save (INCOMING_RETURN_ADDR_RTX);
3011 /* For a few targets, we have the return address incoming into a
3012 register, but choose a different return column. This will result
3013 in a DW_CFA_register for the return, and an entry in
3014 regs_saved_in_regs to match. If the target later stores that
3015 return address register to the stack, we want to be able to emit
3016 the DW_CFA_offset against the return column, not the intermediate
3017 save register. Save the contents of regs_saved_in_regs so that
3018 we can re-initialize it at the start of each function. */
3019 switch (cie_trace.regs_saved_in_regs.length ())
3021 case 0:
3022 break;
3023 case 1:
3024 cie_return_save = ggc_alloc<reg_saved_in_data> ();
3025 *cie_return_save = cie_trace.regs_saved_in_regs[0];
3026 cie_trace.regs_saved_in_regs.release ();
3027 break;
3028 default:
3029 gcc_unreachable ();
3033 add_cfi_vec = NULL;
3034 cur_row = NULL;
3035 cur_trace = NULL;
3038 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3039 state at each location within the function. These notes will be
3040 emitted during pass_final. */
3042 static unsigned int
3043 execute_dwarf2_frame (void)
3045 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3046 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3048 /* The first time we're called, compute the incoming frame state. */
3049 if (cie_cfi_vec == NULL)
3050 create_cie_data ();
3052 dwarf2out_alloc_current_fde ();
3054 create_pseudo_cfg ();
3056 /* Do the work. */
3057 create_cfi_notes ();
3058 connect_traces ();
3059 add_cfis_to_fde ();
3061 /* Free all the data we allocated. */
3063 size_t i;
3064 dw_trace_info *ti;
3066 FOR_EACH_VEC_ELT (trace_info, i, ti)
3067 ti->regs_saved_in_regs.release ();
3069 trace_info.release ();
3071 delete trace_index;
3072 trace_index = NULL;
3074 return 0;
3077 /* Convert a DWARF call frame info. operation to its string name */
3079 static const char *
3080 dwarf_cfi_name (unsigned int cfi_opc)
3082 const char *name = get_DW_CFA_name (cfi_opc);
3084 if (name != NULL)
3085 return name;
3087 return "DW_CFA_<unknown>";
3090 /* This routine will generate the correct assembly data for a location
3091 description based on a cfi entry with a complex address. */
3093 static void
3094 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3096 dw_loc_descr_ref loc;
3097 unsigned long size;
3099 if (cfi->dw_cfi_opc == DW_CFA_expression
3100 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3102 unsigned r =
3103 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3104 dw2_asm_output_data (1, r, NULL);
3105 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3107 else
3108 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3110 /* Output the size of the block. */
3111 size = size_of_locs (loc);
3112 dw2_asm_output_data_uleb128 (size, NULL);
3114 /* Now output the operations themselves. */
3115 output_loc_sequence (loc, for_eh);
3118 /* Similar, but used for .cfi_escape. */
3120 static void
3121 output_cfa_loc_raw (dw_cfi_ref cfi)
3123 dw_loc_descr_ref loc;
3124 unsigned long size;
3126 if (cfi->dw_cfi_opc == DW_CFA_expression
3127 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3129 unsigned r =
3130 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3131 fprintf (asm_out_file, "%#x,", r);
3132 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3134 else
3135 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3137 /* Output the size of the block. */
3138 size = size_of_locs (loc);
3139 dw2_asm_output_data_uleb128_raw (size);
3140 fputc (',', asm_out_file);
3142 /* Now output the operations themselves. */
3143 output_loc_sequence_raw (loc);
3146 /* Output a Call Frame Information opcode and its operand(s). */
3148 void
3149 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3151 unsigned long r;
3152 HOST_WIDE_INT off;
3154 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3155 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3156 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3157 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3158 ((unsigned HOST_WIDE_INT)
3159 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3160 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3162 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3163 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3164 "DW_CFA_offset, column %#lx", r);
3165 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3166 dw2_asm_output_data_uleb128 (off, NULL);
3168 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3170 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3171 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3172 "DW_CFA_restore, column %#lx", r);
3174 else
3176 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3177 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3179 switch (cfi->dw_cfi_opc)
3181 case DW_CFA_set_loc:
3182 if (for_eh)
3183 dw2_asm_output_encoded_addr_rtx (
3184 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3185 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3186 false, NULL);
3187 else
3188 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3189 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3190 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3191 break;
3193 case DW_CFA_advance_loc1:
3194 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3195 fde->dw_fde_current_label, NULL);
3196 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3197 break;
3199 case DW_CFA_advance_loc2:
3200 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3201 fde->dw_fde_current_label, NULL);
3202 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3203 break;
3205 case DW_CFA_advance_loc4:
3206 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3207 fde->dw_fde_current_label, NULL);
3208 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3209 break;
3211 case DW_CFA_MIPS_advance_loc8:
3212 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3213 fde->dw_fde_current_label, NULL);
3214 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3215 break;
3217 case DW_CFA_offset_extended:
3218 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3219 dw2_asm_output_data_uleb128 (r, NULL);
3220 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3221 dw2_asm_output_data_uleb128 (off, NULL);
3222 break;
3224 case DW_CFA_def_cfa:
3225 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3226 dw2_asm_output_data_uleb128 (r, NULL);
3227 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3228 break;
3230 case DW_CFA_offset_extended_sf:
3231 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3232 dw2_asm_output_data_uleb128 (r, NULL);
3233 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3234 dw2_asm_output_data_sleb128 (off, NULL);
3235 break;
3237 case DW_CFA_def_cfa_sf:
3238 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3239 dw2_asm_output_data_uleb128 (r, NULL);
3240 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3241 dw2_asm_output_data_sleb128 (off, NULL);
3242 break;
3244 case DW_CFA_restore_extended:
3245 case DW_CFA_undefined:
3246 case DW_CFA_same_value:
3247 case DW_CFA_def_cfa_register:
3248 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3249 dw2_asm_output_data_uleb128 (r, NULL);
3250 break;
3252 case DW_CFA_register:
3253 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3254 dw2_asm_output_data_uleb128 (r, NULL);
3255 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3256 dw2_asm_output_data_uleb128 (r, NULL);
3257 break;
3259 case DW_CFA_def_cfa_offset:
3260 case DW_CFA_GNU_args_size:
3261 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3262 break;
3264 case DW_CFA_def_cfa_offset_sf:
3265 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3266 dw2_asm_output_data_sleb128 (off, NULL);
3267 break;
3269 case DW_CFA_GNU_window_save:
3270 break;
3272 case DW_CFA_def_cfa_expression:
3273 case DW_CFA_expression:
3274 case DW_CFA_val_expression:
3275 output_cfa_loc (cfi, for_eh);
3276 break;
3278 case DW_CFA_GNU_negative_offset_extended:
3279 /* Obsoleted by DW_CFA_offset_extended_sf. */
3280 gcc_unreachable ();
3282 default:
3283 break;
3288 /* Similar, but do it via assembler directives instead. */
3290 void
3291 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3293 unsigned long r, r2;
3295 switch (cfi->dw_cfi_opc)
3297 case DW_CFA_advance_loc:
3298 case DW_CFA_advance_loc1:
3299 case DW_CFA_advance_loc2:
3300 case DW_CFA_advance_loc4:
3301 case DW_CFA_MIPS_advance_loc8:
3302 case DW_CFA_set_loc:
3303 /* Should only be created in a code path not followed when emitting
3304 via directives. The assembler is going to take care of this for
3305 us. But this routines is also used for debugging dumps, so
3306 print something. */
3307 gcc_assert (f != asm_out_file);
3308 fprintf (f, "\t.cfi_advance_loc\n");
3309 break;
3311 case DW_CFA_offset:
3312 case DW_CFA_offset_extended:
3313 case DW_CFA_offset_extended_sf:
3314 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3315 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3316 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3317 break;
3319 case DW_CFA_restore:
3320 case DW_CFA_restore_extended:
3321 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3322 fprintf (f, "\t.cfi_restore %lu\n", r);
3323 break;
3325 case DW_CFA_undefined:
3326 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3327 fprintf (f, "\t.cfi_undefined %lu\n", r);
3328 break;
3330 case DW_CFA_same_value:
3331 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3332 fprintf (f, "\t.cfi_same_value %lu\n", r);
3333 break;
3335 case DW_CFA_def_cfa:
3336 case DW_CFA_def_cfa_sf:
3337 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3338 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3339 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3340 break;
3342 case DW_CFA_def_cfa_register:
3343 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3344 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3345 break;
3347 case DW_CFA_register:
3348 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3349 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3350 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3351 break;
3353 case DW_CFA_def_cfa_offset:
3354 case DW_CFA_def_cfa_offset_sf:
3355 fprintf (f, "\t.cfi_def_cfa_offset "
3356 HOST_WIDE_INT_PRINT_DEC"\n",
3357 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3358 break;
3360 case DW_CFA_remember_state:
3361 fprintf (f, "\t.cfi_remember_state\n");
3362 break;
3363 case DW_CFA_restore_state:
3364 fprintf (f, "\t.cfi_restore_state\n");
3365 break;
3367 case DW_CFA_GNU_args_size:
3368 if (f == asm_out_file)
3370 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3371 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3372 if (flag_debug_asm)
3373 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3374 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3375 fputc ('\n', f);
3377 else
3379 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3380 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3382 break;
3384 case DW_CFA_GNU_window_save:
3385 fprintf (f, "\t.cfi_window_save\n");
3386 break;
3388 case DW_CFA_def_cfa_expression:
3389 case DW_CFA_expression:
3390 case DW_CFA_val_expression:
3391 if (f != asm_out_file)
3393 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3394 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3395 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3396 break;
3398 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3399 output_cfa_loc_raw (cfi);
3400 fputc ('\n', f);
3401 break;
3403 default:
3404 gcc_unreachable ();
3408 void
3409 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3411 if (dwarf2out_do_cfi_asm ())
3412 output_cfi_directive (asm_out_file, cfi);
3415 static void
3416 dump_cfi_row (FILE *f, dw_cfi_row *row)
3418 dw_cfi_ref cfi;
3419 unsigned i;
3421 cfi = row->cfa_cfi;
3422 if (!cfi)
3424 dw_cfa_location dummy;
3425 memset (&dummy, 0, sizeof (dummy));
3426 dummy.reg = INVALID_REGNUM;
3427 cfi = def_cfa_0 (&dummy, &row->cfa);
3429 output_cfi_directive (f, cfi);
3431 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3432 if (cfi)
3433 output_cfi_directive (f, cfi);
3436 void debug_cfi_row (dw_cfi_row *row);
3438 void
3439 debug_cfi_row (dw_cfi_row *row)
3441 dump_cfi_row (stderr, row);
3445 /* Save the result of dwarf2out_do_frame across PCH.
3446 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3447 static GTY(()) signed char saved_do_cfi_asm = 0;
3449 /* Decide whether to emit EH frame unwind information for the current
3450 translation unit. */
3452 bool
3453 dwarf2out_do_eh_frame (void)
3455 return
3456 (flag_unwind_tables || flag_exceptions)
3457 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2;
3460 /* Decide whether we want to emit frame unwind information for the current
3461 translation unit. */
3463 bool
3464 dwarf2out_do_frame (void)
3466 /* We want to emit correct CFA location expressions or lists, so we
3467 have to return true if we're going to output debug info, even if
3468 we're not going to output frame or unwind info. */
3469 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3470 return true;
3472 if (saved_do_cfi_asm > 0)
3473 return true;
3475 if (targetm.debug_unwind_info () == UI_DWARF2)
3476 return true;
3478 if (dwarf2out_do_eh_frame ())
3479 return true;
3481 return false;
3484 /* Decide whether to emit frame unwind via assembler directives. */
3486 bool
3487 dwarf2out_do_cfi_asm (void)
3489 int enc;
3491 if (saved_do_cfi_asm != 0)
3492 return saved_do_cfi_asm > 0;
3494 /* Assume failure for a moment. */
3495 saved_do_cfi_asm = -1;
3497 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3498 return false;
3499 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3500 return false;
3502 /* Make sure the personality encoding is one the assembler can support.
3503 In particular, aligned addresses can't be handled. */
3504 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3505 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3506 return false;
3507 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3508 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3509 return false;
3511 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3512 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3513 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE && !dwarf2out_do_eh_frame ())
3514 return false;
3516 /* Success! */
3517 saved_do_cfi_asm = 1;
3518 return true;
3521 namespace {
3523 const pass_data pass_data_dwarf2_frame =
3525 RTL_PASS, /* type */
3526 "dwarf2", /* name */
3527 OPTGROUP_NONE, /* optinfo_flags */
3528 TV_FINAL, /* tv_id */
3529 0, /* properties_required */
3530 0, /* properties_provided */
3531 0, /* properties_destroyed */
3532 0, /* todo_flags_start */
3533 0, /* todo_flags_finish */
3536 class pass_dwarf2_frame : public rtl_opt_pass
3538 public:
3539 pass_dwarf2_frame (gcc::context *ctxt)
3540 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3543 /* opt_pass methods: */
3544 virtual bool gate (function *);
3545 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3547 }; // class pass_dwarf2_frame
3549 bool
3550 pass_dwarf2_frame::gate (function *)
3552 /* Targets which still implement the prologue in assembler text
3553 cannot use the generic dwarf2 unwinding. */
3554 if (!targetm.have_prologue ())
3555 return false;
3557 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3558 from the optimized shrink-wrapping annotations that we will compute.
3559 For now, only produce the CFI notes for dwarf2. */
3560 return dwarf2out_do_frame ();
3563 } // anon namespace
3565 rtl_opt_pass *
3566 make_pass_dwarf2_frame (gcc::context *ctxt)
3568 return new pass_dwarf2_frame (ctxt);
3571 #include "gt-dwarf2cfi.h"