Fix warnings building linux-atomic.c and fptr.c on hppa64-linux
[official-gcc.git] / gcc / dwarf2cfi.c
blobdf9b625f5bcc255404066dada43f8606776553db
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
42 #include "flags.h" /* dwarf_debuginfo_p */
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
50 #endif
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
54 #endif
56 #ifndef DEFAULT_INCOMING_FRAME_SP_OFFSET
57 #define DEFAULT_INCOMING_FRAME_SP_OFFSET INCOMING_FRAME_SP_OFFSET
58 #endif
60 /* A collected description of an entire row of the abstract CFI table. */
61 struct GTY(()) dw_cfi_row
63 /* The expression that computes the CFA, expressed in two different ways.
64 The CFA member for the simple cases, and the full CFI expression for
65 the complex cases. The later will be a DW_CFA_cfa_expression. */
66 dw_cfa_location cfa;
67 dw_cfi_ref cfa_cfi;
69 /* The expressions for any register column that is saved. */
70 cfi_vec reg_save;
72 /* True if the register window is saved. */
73 bool window_save;
75 /* True if the return address is in a mangled state. */
76 bool ra_mangled;
79 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
80 struct GTY(()) reg_saved_in_data {
81 rtx orig_reg;
82 rtx saved_in_reg;
86 /* Since we no longer have a proper CFG, we're going to create a facsimile
87 of one on the fly while processing the frame-related insns.
89 We create dw_trace_info structures for each extended basic block beginning
90 and ending at a "save point". Save points are labels, barriers, certain
91 notes, and of course the beginning and end of the function.
93 As we encounter control transfer insns, we propagate the "current"
94 row state across the edges to the starts of traces. When checking is
95 enabled, we validate that we propagate the same data from all sources.
97 All traces are members of the TRACE_INFO array, in the order in which
98 they appear in the instruction stream.
100 All save points are present in the TRACE_INDEX hash, mapping the insn
101 starting a trace to the dw_trace_info describing the trace. */
103 struct dw_trace_info
105 /* The insn that begins the trace. */
106 rtx_insn *head;
108 /* The row state at the beginning and end of the trace. */
109 dw_cfi_row *beg_row, *end_row;
111 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
112 while scanning insns. However, the args_size value is irrelevant at
113 any point except can_throw_internal_p insns. Therefore the "delay"
114 sizes the values that must actually be emitted for this trace. */
115 poly_int64_pod beg_true_args_size, end_true_args_size;
116 poly_int64_pod beg_delay_args_size, end_delay_args_size;
118 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
119 rtx_insn *eh_head;
121 /* The following variables contain data used in interpreting frame related
122 expressions. These are not part of the "real" row state as defined by
123 Dwarf, but it seems like they need to be propagated into a trace in case
124 frame related expressions have been sunk. */
125 /* ??? This seems fragile. These variables are fragments of a larger
126 expression. If we do not keep the entire expression together, we risk
127 not being able to put it together properly. Consider forcing targets
128 to generate self-contained expressions and dropping all of the magic
129 interpretation code in this file. Or at least refusing to shrink wrap
130 any frame related insn that doesn't contain a complete expression. */
132 /* The register used for saving registers to the stack, and its offset
133 from the CFA. */
134 dw_cfa_location cfa_store;
136 /* A temporary register holding an integral value used in adjusting SP
137 or setting up the store_reg. The "offset" field holds the integer
138 value, not an offset. */
139 dw_cfa_location cfa_temp;
141 /* A set of registers saved in other registers. This is the inverse of
142 the row->reg_save info, if the entry is a DW_CFA_register. This is
143 implemented as a flat array because it normally contains zero or 1
144 entry, depending on the target. IA-64 is the big spender here, using
145 a maximum of 5 entries. */
146 vec<reg_saved_in_data> regs_saved_in_regs;
148 /* An identifier for this trace. Used only for debugging dumps. */
149 unsigned id;
151 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
152 bool switch_sections;
154 /* True if we've seen different values incoming to beg_true_args_size. */
155 bool args_size_undefined;
157 /* True if we've seen an insn with a REG_ARGS_SIZE note before EH_HEAD. */
158 bool args_size_defined_for_eh;
162 /* Hashtable helpers. */
164 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
166 static inline hashval_t hash (const dw_trace_info *);
167 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
170 inline hashval_t
171 trace_info_hasher::hash (const dw_trace_info *ti)
173 return INSN_UID (ti->head);
176 inline bool
177 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
179 return a->head == b->head;
183 /* The variables making up the pseudo-cfg, as described above. */
184 static vec<dw_trace_info> trace_info;
185 static vec<dw_trace_info *> trace_work_list;
186 static hash_table<trace_info_hasher> *trace_index;
188 /* A vector of call frame insns for the CIE. */
189 cfi_vec cie_cfi_vec;
191 /* The state of the first row of the FDE table, which includes the
192 state provided by the CIE. */
193 static GTY(()) dw_cfi_row *cie_cfi_row;
195 static GTY(()) reg_saved_in_data *cie_return_save;
197 static GTY(()) unsigned long dwarf2out_cfi_label_num;
199 /* The insn after which a new CFI note should be emitted. */
200 static rtx_insn *add_cfi_insn;
202 /* When non-null, add_cfi will add the CFI to this vector. */
203 static cfi_vec *add_cfi_vec;
205 /* The current instruction trace. */
206 static dw_trace_info *cur_trace;
208 /* The current, i.e. most recently generated, row of the CFI table. */
209 static dw_cfi_row *cur_row;
211 /* A copy of the current CFA, for use during the processing of a
212 single insn. */
213 static dw_cfa_location *cur_cfa;
215 /* We delay emitting a register save until either (a) we reach the end
216 of the prologue or (b) the register is clobbered. This clusters
217 register saves so that there are fewer pc advances. */
219 struct queued_reg_save {
220 rtx reg;
221 rtx saved_reg;
222 poly_int64_pod cfa_offset;
226 static vec<queued_reg_save> queued_reg_saves;
228 /* True if any CFI directives were emitted at the current insn. */
229 static bool any_cfis_emitted;
231 /* Short-hand for commonly used register numbers. */
232 static unsigned dw_stack_pointer_regnum;
233 static unsigned dw_frame_pointer_regnum;
235 /* Hook used by __throw. */
238 expand_builtin_dwarf_sp_column (void)
240 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
241 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
244 /* MEM is a memory reference for the register size table, each element of
245 which has mode MODE. Initialize column C as a return address column. */
247 static void
248 init_return_column_size (scalar_int_mode mode, rtx mem, unsigned int c)
250 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
251 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
252 emit_move_insn (adjust_address (mem, mode, offset),
253 gen_int_mode (size, mode));
256 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
257 init_one_dwarf_reg_size to communicate on what has been done by the
258 latter. */
260 struct init_one_dwarf_reg_state
262 /* Whether the dwarf return column was initialized. */
263 bool wrote_return_column;
265 /* For each hard register REGNO, whether init_one_dwarf_reg_size
266 was given REGNO to process already. */
267 bool processed_regno [FIRST_PSEUDO_REGISTER];
271 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
272 initialize the dwarf register size table entry corresponding to register
273 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
274 use for the size entry to initialize, and INIT_STATE is the communication
275 datastructure conveying what we're doing to our caller. */
277 static
278 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
279 rtx table, machine_mode slotmode,
280 init_one_dwarf_reg_state *init_state)
282 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
283 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
284 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
286 poly_int64 slotoffset = dcol * GET_MODE_SIZE (slotmode);
287 poly_int64 regsize = GET_MODE_SIZE (regmode);
289 init_state->processed_regno[regno] = true;
291 if (rnum >= DWARF_FRAME_REGISTERS)
292 return;
294 if (dnum == DWARF_FRAME_RETURN_COLUMN)
296 if (regmode == VOIDmode)
297 return;
298 init_state->wrote_return_column = true;
301 /* ??? When is this true? Should it be a test based on DCOL instead? */
302 if (maybe_lt (slotoffset, 0))
303 return;
305 emit_move_insn (adjust_address (table, slotmode, slotoffset),
306 gen_int_mode (regsize, slotmode));
309 /* Generate code to initialize the dwarf register size table located
310 at the provided ADDRESS. */
312 void
313 expand_builtin_init_dwarf_reg_sizes (tree address)
315 unsigned int i;
316 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node);
317 rtx addr = expand_normal (address);
318 rtx mem = gen_rtx_MEM (BLKmode, addr);
320 init_one_dwarf_reg_state init_state;
322 memset ((char *)&init_state, 0, sizeof (init_state));
324 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
326 machine_mode save_mode;
327 rtx span;
329 /* No point in processing a register multiple times. This could happen
330 with register spans, e.g. when a reg is first processed as a piece of
331 a span, then as a register on its own later on. */
333 if (init_state.processed_regno[i])
334 continue;
336 save_mode = targetm.dwarf_frame_reg_mode (i);
337 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
339 if (!span)
340 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
341 else
343 for (int si = 0; si < XVECLEN (span, 0); si++)
345 rtx reg = XVECEXP (span, 0, si);
347 init_one_dwarf_reg_size
348 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
353 if (!init_state.wrote_return_column)
354 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
356 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
357 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
358 #endif
360 targetm.init_dwarf_reg_sizes_extra (address);
364 static dw_trace_info *
365 get_trace_info (rtx_insn *insn)
367 dw_trace_info dummy;
368 dummy.head = insn;
369 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
372 static bool
373 save_point_p (rtx_insn *insn)
375 /* Labels, except those that are really jump tables. */
376 if (LABEL_P (insn))
377 return inside_basic_block_p (insn);
379 /* We split traces at the prologue/epilogue notes because those
380 are points at which the unwind info is usually stable. This
381 makes it easier to find spots with identical unwind info so
382 that we can use remember/restore_state opcodes. */
383 if (NOTE_P (insn))
384 switch (NOTE_KIND (insn))
386 case NOTE_INSN_PROLOGUE_END:
387 case NOTE_INSN_EPILOGUE_BEG:
388 return true;
391 return false;
394 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
396 static inline HOST_WIDE_INT
397 div_data_align (HOST_WIDE_INT off)
399 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
400 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
401 return r;
404 /* Return true if we need a signed version of a given opcode
405 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
407 static inline bool
408 need_data_align_sf_opcode (HOST_WIDE_INT off)
410 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
413 /* Return a pointer to a newly allocated Call Frame Instruction. */
415 static inline dw_cfi_ref
416 new_cfi (void)
418 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
420 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
421 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
423 return cfi;
426 /* Return a newly allocated CFI row, with no defined data. */
428 static dw_cfi_row *
429 new_cfi_row (void)
431 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
433 row->cfa.reg = INVALID_REGNUM;
435 return row;
438 /* Return a copy of an existing CFI row. */
440 static dw_cfi_row *
441 copy_cfi_row (dw_cfi_row *src)
443 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
445 *dst = *src;
446 dst->reg_save = vec_safe_copy (src->reg_save);
448 return dst;
451 /* Return a copy of an existing CFA location. */
453 static dw_cfa_location *
454 copy_cfa (dw_cfa_location *src)
456 dw_cfa_location *dst = ggc_alloc<dw_cfa_location> ();
457 *dst = *src;
458 return dst;
461 /* Generate a new label for the CFI info to refer to. */
463 static char *
464 dwarf2out_cfi_label (void)
466 int num = dwarf2out_cfi_label_num++;
467 char label[20];
469 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
471 return xstrdup (label);
474 /* Add CFI either to the current insn stream or to a vector, or both. */
476 static void
477 add_cfi (dw_cfi_ref cfi)
479 any_cfis_emitted = true;
481 if (add_cfi_insn != NULL)
483 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
484 NOTE_CFI (add_cfi_insn) = cfi;
487 if (add_cfi_vec != NULL)
488 vec_safe_push (*add_cfi_vec, cfi);
491 static void
492 add_cfi_args_size (poly_int64 size)
494 /* We don't yet have a representation for polynomial sizes. */
495 HOST_WIDE_INT const_size = size.to_constant ();
497 dw_cfi_ref cfi = new_cfi ();
499 /* While we can occasionally have args_size < 0 internally, this state
500 should not persist at a point we actually need an opcode. */
501 gcc_assert (const_size >= 0);
503 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
504 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_size;
506 add_cfi (cfi);
509 static void
510 add_cfi_restore (unsigned reg)
512 dw_cfi_ref cfi = new_cfi ();
514 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
515 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
517 add_cfi (cfi);
520 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
521 that the register column is no longer saved. */
523 static void
524 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
526 if (vec_safe_length (row->reg_save) <= column)
527 vec_safe_grow_cleared (row->reg_save, column + 1, true);
528 (*row->reg_save)[column] = cfi;
531 /* This function fills in aa dw_cfa_location structure from a dwarf location
532 descriptor sequence. */
534 static void
535 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
537 struct dw_loc_descr_node *ptr;
538 cfa->offset = 0;
539 cfa->base_offset = 0;
540 cfa->indirect = 0;
541 cfa->reg = -1;
543 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
545 enum dwarf_location_atom op = ptr->dw_loc_opc;
547 switch (op)
549 case DW_OP_reg0:
550 case DW_OP_reg1:
551 case DW_OP_reg2:
552 case DW_OP_reg3:
553 case DW_OP_reg4:
554 case DW_OP_reg5:
555 case DW_OP_reg6:
556 case DW_OP_reg7:
557 case DW_OP_reg8:
558 case DW_OP_reg9:
559 case DW_OP_reg10:
560 case DW_OP_reg11:
561 case DW_OP_reg12:
562 case DW_OP_reg13:
563 case DW_OP_reg14:
564 case DW_OP_reg15:
565 case DW_OP_reg16:
566 case DW_OP_reg17:
567 case DW_OP_reg18:
568 case DW_OP_reg19:
569 case DW_OP_reg20:
570 case DW_OP_reg21:
571 case DW_OP_reg22:
572 case DW_OP_reg23:
573 case DW_OP_reg24:
574 case DW_OP_reg25:
575 case DW_OP_reg26:
576 case DW_OP_reg27:
577 case DW_OP_reg28:
578 case DW_OP_reg29:
579 case DW_OP_reg30:
580 case DW_OP_reg31:
581 cfa->reg = op - DW_OP_reg0;
582 break;
583 case DW_OP_regx:
584 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
585 break;
586 case DW_OP_breg0:
587 case DW_OP_breg1:
588 case DW_OP_breg2:
589 case DW_OP_breg3:
590 case DW_OP_breg4:
591 case DW_OP_breg5:
592 case DW_OP_breg6:
593 case DW_OP_breg7:
594 case DW_OP_breg8:
595 case DW_OP_breg9:
596 case DW_OP_breg10:
597 case DW_OP_breg11:
598 case DW_OP_breg12:
599 case DW_OP_breg13:
600 case DW_OP_breg14:
601 case DW_OP_breg15:
602 case DW_OP_breg16:
603 case DW_OP_breg17:
604 case DW_OP_breg18:
605 case DW_OP_breg19:
606 case DW_OP_breg20:
607 case DW_OP_breg21:
608 case DW_OP_breg22:
609 case DW_OP_breg23:
610 case DW_OP_breg24:
611 case DW_OP_breg25:
612 case DW_OP_breg26:
613 case DW_OP_breg27:
614 case DW_OP_breg28:
615 case DW_OP_breg29:
616 case DW_OP_breg30:
617 case DW_OP_breg31:
618 cfa->reg = op - DW_OP_breg0;
619 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
620 break;
621 case DW_OP_bregx:
622 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
623 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
624 break;
625 case DW_OP_deref:
626 cfa->indirect = 1;
627 break;
628 case DW_OP_plus_uconst:
629 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
630 break;
631 default:
632 gcc_unreachable ();
637 /* Find the previous value for the CFA, iteratively. CFI is the opcode
638 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
639 one level of remember/restore state processing. */
641 void
642 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
644 switch (cfi->dw_cfi_opc)
646 case DW_CFA_def_cfa_offset:
647 case DW_CFA_def_cfa_offset_sf:
648 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
649 break;
650 case DW_CFA_def_cfa_register:
651 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
652 break;
653 case DW_CFA_def_cfa:
654 case DW_CFA_def_cfa_sf:
655 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
656 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
657 break;
658 case DW_CFA_def_cfa_expression:
659 if (cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc)
660 *loc = *cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc;
661 else
662 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
663 break;
665 case DW_CFA_remember_state:
666 gcc_assert (!remember->in_use);
667 *remember = *loc;
668 remember->in_use = 1;
669 break;
670 case DW_CFA_restore_state:
671 gcc_assert (remember->in_use);
672 *loc = *remember;
673 remember->in_use = 0;
674 break;
676 default:
677 break;
681 /* Determine if two dw_cfa_location structures define the same data. */
683 bool
684 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
686 return (loc1->reg == loc2->reg
687 && known_eq (loc1->offset, loc2->offset)
688 && loc1->indirect == loc2->indirect
689 && (loc1->indirect == 0
690 || known_eq (loc1->base_offset, loc2->base_offset)));
693 /* Determine if two CFI operands are identical. */
695 static bool
696 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
698 switch (t)
700 case dw_cfi_oprnd_unused:
701 return true;
702 case dw_cfi_oprnd_reg_num:
703 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
704 case dw_cfi_oprnd_offset:
705 return a->dw_cfi_offset == b->dw_cfi_offset;
706 case dw_cfi_oprnd_addr:
707 return (a->dw_cfi_addr == b->dw_cfi_addr
708 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
709 case dw_cfi_oprnd_loc:
710 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
711 case dw_cfi_oprnd_cfa_loc:
712 return cfa_equal_p (a->dw_cfi_cfa_loc, b->dw_cfi_cfa_loc);
714 gcc_unreachable ();
717 /* Determine if two CFI entries are identical. */
719 static bool
720 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
722 enum dwarf_call_frame_info opc;
724 /* Make things easier for our callers, including missing operands. */
725 if (a == b)
726 return true;
727 if (a == NULL || b == NULL)
728 return false;
730 /* Obviously, the opcodes must match. */
731 opc = a->dw_cfi_opc;
732 if (opc != b->dw_cfi_opc)
733 return false;
735 /* Compare the two operands, re-using the type of the operands as
736 already exposed elsewhere. */
737 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
738 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
739 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
740 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
743 /* Determine if two CFI_ROW structures are identical. */
745 static bool
746 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
748 size_t i, n_a, n_b, n_max;
750 if (a->cfa_cfi)
752 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
753 return false;
755 else if (!cfa_equal_p (&a->cfa, &b->cfa))
756 return false;
758 n_a = vec_safe_length (a->reg_save);
759 n_b = vec_safe_length (b->reg_save);
760 n_max = MAX (n_a, n_b);
762 for (i = 0; i < n_max; ++i)
764 dw_cfi_ref r_a = NULL, r_b = NULL;
766 if (i < n_a)
767 r_a = (*a->reg_save)[i];
768 if (i < n_b)
769 r_b = (*b->reg_save)[i];
771 if (!cfi_equal_p (r_a, r_b))
772 return false;
775 if (a->window_save != b->window_save)
776 return false;
778 if (a->ra_mangled != b->ra_mangled)
779 return false;
781 return true;
784 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
785 what opcode to emit. Returns the CFI opcode to effect the change, or
786 NULL if NEW_CFA == OLD_CFA. */
788 static dw_cfi_ref
789 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
791 dw_cfi_ref cfi;
793 /* If nothing changed, no need to issue any call frame instructions. */
794 if (cfa_equal_p (old_cfa, new_cfa))
795 return NULL;
797 cfi = new_cfi ();
799 HOST_WIDE_INT const_offset;
800 if (new_cfa->reg == old_cfa->reg
801 && !new_cfa->indirect
802 && !old_cfa->indirect
803 && new_cfa->offset.is_constant (&const_offset))
805 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
806 the CFA register did not change but the offset did. The data
807 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
808 in the assembler via the .cfi_def_cfa_offset directive. */
809 if (const_offset < 0)
810 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
811 else
812 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
813 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_offset;
815 else if (new_cfa->offset.is_constant ()
816 && known_eq (new_cfa->offset, old_cfa->offset)
817 && old_cfa->reg != INVALID_REGNUM
818 && !new_cfa->indirect
819 && !old_cfa->indirect)
821 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
822 indicating the CFA register has changed to <register> but the
823 offset has not changed. This requires the old CFA to have
824 been set as a register plus offset rather than a general
825 DW_CFA_def_cfa_expression. */
826 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
827 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
829 else if (new_cfa->indirect == 0
830 && new_cfa->offset.is_constant (&const_offset))
832 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
833 indicating the CFA register has changed to <register> with
834 the specified offset. The data factoring for DW_CFA_def_cfa_sf
835 happens in output_cfi, or in the assembler via the .cfi_def_cfa
836 directive. */
837 if (const_offset < 0)
838 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
839 else
840 cfi->dw_cfi_opc = DW_CFA_def_cfa;
841 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
842 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
844 else
846 /* Construct a DW_CFA_def_cfa_expression instruction to
847 calculate the CFA using a full location expression since no
848 register-offset pair is available. */
849 struct dw_loc_descr_node *loc_list;
851 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
852 loc_list = build_cfa_loc (new_cfa, 0);
853 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
854 if (!new_cfa->offset.is_constant ()
855 || !new_cfa->base_offset.is_constant ())
856 /* It's hard to reconstruct the CFA location for a polynomial
857 expression, so just cache it instead. */
858 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = copy_cfa (new_cfa);
859 else
860 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = NULL;
863 return cfi;
866 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
868 static void
869 def_cfa_1 (dw_cfa_location *new_cfa)
871 dw_cfi_ref cfi;
873 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
874 cur_trace->cfa_store.offset = new_cfa->offset;
876 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
877 if (cfi)
879 cur_row->cfa = *new_cfa;
880 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
881 ? cfi : NULL);
883 add_cfi (cfi);
887 /* Add the CFI for saving a register. REG is the CFA column number.
888 If SREG is -1, the register is saved at OFFSET from the CFA;
889 otherwise it is saved in SREG. */
891 static void
892 reg_save (unsigned int reg, unsigned int sreg, poly_int64 offset)
894 dw_fde_ref fde = cfun ? cfun->fde : NULL;
895 dw_cfi_ref cfi = new_cfi ();
897 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
899 if (sreg == INVALID_REGNUM)
901 HOST_WIDE_INT const_offset;
902 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
903 if (fde && fde->stack_realign)
905 cfi->dw_cfi_opc = DW_CFA_expression;
906 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
907 cfi->dw_cfi_oprnd2.dw_cfi_loc
908 = build_cfa_aligned_loc (&cur_row->cfa, offset,
909 fde->stack_realignment);
911 else if (offset.is_constant (&const_offset))
913 if (need_data_align_sf_opcode (const_offset))
914 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
915 else if (reg & ~0x3f)
916 cfi->dw_cfi_opc = DW_CFA_offset_extended;
917 else
918 cfi->dw_cfi_opc = DW_CFA_offset;
919 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
921 else
923 cfi->dw_cfi_opc = DW_CFA_expression;
924 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
925 cfi->dw_cfi_oprnd2.dw_cfi_loc
926 = build_cfa_loc (&cur_row->cfa, offset);
929 else if (sreg == reg)
931 /* While we could emit something like DW_CFA_same_value or
932 DW_CFA_restore, we never expect to see something like that
933 in a prologue. This is more likely to be a bug. A backend
934 can always bypass this by using REG_CFA_RESTORE directly. */
935 gcc_unreachable ();
937 else
939 cfi->dw_cfi_opc = DW_CFA_register;
940 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
943 add_cfi (cfi);
944 update_row_reg_save (cur_row, reg, cfi);
947 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
948 and adjust data structures to match. */
950 static void
951 notice_args_size (rtx_insn *insn)
953 poly_int64 args_size, delta;
954 rtx note;
956 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
957 if (note == NULL)
958 return;
960 if (!cur_trace->eh_head)
961 cur_trace->args_size_defined_for_eh = true;
963 args_size = get_args_size (note);
964 delta = args_size - cur_trace->end_true_args_size;
965 if (known_eq (delta, 0))
966 return;
968 cur_trace->end_true_args_size = args_size;
970 /* If the CFA is computed off the stack pointer, then we must adjust
971 the computation of the CFA as well. */
972 if (cur_cfa->reg == dw_stack_pointer_regnum)
974 gcc_assert (!cur_cfa->indirect);
976 /* Convert a change in args_size (always a positive in the
977 direction of stack growth) to a change in stack pointer. */
978 if (!STACK_GROWS_DOWNWARD)
979 delta = -delta;
981 cur_cfa->offset += delta;
985 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
986 data within the trace related to EH insns and args_size. */
988 static void
989 notice_eh_throw (rtx_insn *insn)
991 poly_int64 args_size = cur_trace->end_true_args_size;
992 if (cur_trace->eh_head == NULL)
994 cur_trace->eh_head = insn;
995 cur_trace->beg_delay_args_size = args_size;
996 cur_trace->end_delay_args_size = args_size;
998 else if (maybe_ne (cur_trace->end_delay_args_size, args_size))
1000 cur_trace->end_delay_args_size = args_size;
1002 /* ??? If the CFA is the stack pointer, search backward for the last
1003 CFI note and insert there. Given that the stack changed for the
1004 args_size change, there *must* be such a note in between here and
1005 the last eh insn. */
1006 add_cfi_args_size (args_size);
1010 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
1011 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1012 used in places where rtl is prohibited. */
1014 static inline unsigned
1015 dwf_regno (const_rtx reg)
1017 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
1018 return DWARF_FRAME_REGNUM (REGNO (reg));
1021 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1023 static bool
1024 compare_reg_or_pc (rtx x, rtx y)
1026 if (REG_P (x) && REG_P (y))
1027 return REGNO (x) == REGNO (y);
1028 return x == y;
1031 /* Record SRC as being saved in DEST. DEST may be null to delete an
1032 existing entry. SRC may be a register or PC_RTX. */
1034 static void
1035 record_reg_saved_in_reg (rtx dest, rtx src)
1037 reg_saved_in_data *elt;
1038 size_t i;
1040 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1041 if (compare_reg_or_pc (elt->orig_reg, src))
1043 if (dest == NULL)
1044 cur_trace->regs_saved_in_regs.unordered_remove (i);
1045 else
1046 elt->saved_in_reg = dest;
1047 return;
1050 if (dest == NULL)
1051 return;
1053 reg_saved_in_data e = {src, dest};
1054 cur_trace->regs_saved_in_regs.safe_push (e);
1057 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1058 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1060 static void
1061 queue_reg_save (rtx reg, rtx sreg, poly_int64 offset)
1063 queued_reg_save *q;
1064 queued_reg_save e = {reg, sreg, offset};
1065 size_t i;
1067 /* Duplicates waste space, but it's also necessary to remove them
1068 for correctness, since the queue gets output in reverse order. */
1069 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1070 if (compare_reg_or_pc (q->reg, reg))
1072 *q = e;
1073 return;
1076 queued_reg_saves.safe_push (e);
1079 /* Output all the entries in QUEUED_REG_SAVES. */
1081 static void
1082 dwarf2out_flush_queued_reg_saves (void)
1084 queued_reg_save *q;
1085 size_t i;
1087 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1089 unsigned int reg, sreg;
1091 record_reg_saved_in_reg (q->saved_reg, q->reg);
1093 if (q->reg == pc_rtx)
1094 reg = DWARF_FRAME_RETURN_COLUMN;
1095 else
1096 reg = dwf_regno (q->reg);
1097 if (q->saved_reg)
1098 sreg = dwf_regno (q->saved_reg);
1099 else
1100 sreg = INVALID_REGNUM;
1101 reg_save (reg, sreg, q->cfa_offset);
1104 queued_reg_saves.truncate (0);
1107 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1108 location for? Or, does it clobber a register which we've previously
1109 said that some other register is saved in, and for which we now
1110 have a new location for? */
1112 static bool
1113 clobbers_queued_reg_save (const_rtx insn)
1115 queued_reg_save *q;
1116 size_t iq;
1118 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1120 size_t ir;
1121 reg_saved_in_data *rir;
1123 if (modified_in_p (q->reg, insn))
1124 return true;
1126 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1127 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1128 && modified_in_p (rir->saved_in_reg, insn))
1129 return true;
1132 return false;
1135 /* What register, if any, is currently saved in REG? */
1137 static rtx
1138 reg_saved_in (rtx reg)
1140 unsigned int regn = REGNO (reg);
1141 queued_reg_save *q;
1142 reg_saved_in_data *rir;
1143 size_t i;
1145 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1146 if (q->saved_reg && regn == REGNO (q->saved_reg))
1147 return q->reg;
1149 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1150 if (regn == REGNO (rir->saved_in_reg))
1151 return rir->orig_reg;
1153 return NULL_RTX;
1156 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1158 static void
1159 dwarf2out_frame_debug_def_cfa (rtx pat)
1161 memset (cur_cfa, 0, sizeof (*cur_cfa));
1163 pat = strip_offset (pat, &cur_cfa->offset);
1164 if (MEM_P (pat))
1166 cur_cfa->indirect = 1;
1167 pat = strip_offset (XEXP (pat, 0), &cur_cfa->base_offset);
1169 /* ??? If this fails, we could be calling into the _loc functions to
1170 define a full expression. So far no port does that. */
1171 gcc_assert (REG_P (pat));
1172 cur_cfa->reg = dwf_regno (pat);
1175 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1177 static void
1178 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1180 rtx src, dest;
1182 gcc_assert (GET_CODE (pat) == SET);
1183 dest = XEXP (pat, 0);
1184 src = XEXP (pat, 1);
1186 switch (GET_CODE (src))
1188 case PLUS:
1189 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1190 cur_cfa->offset -= rtx_to_poly_int64 (XEXP (src, 1));
1191 break;
1193 case REG:
1194 break;
1196 default:
1197 gcc_unreachable ();
1200 cur_cfa->reg = dwf_regno (dest);
1201 gcc_assert (cur_cfa->indirect == 0);
1204 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1206 static void
1207 dwarf2out_frame_debug_cfa_offset (rtx set)
1209 poly_int64 offset;
1210 rtx src, addr, span;
1211 unsigned int sregno;
1213 src = XEXP (set, 1);
1214 addr = XEXP (set, 0);
1215 gcc_assert (MEM_P (addr));
1216 addr = XEXP (addr, 0);
1218 /* As documented, only consider extremely simple addresses. */
1219 switch (GET_CODE (addr))
1221 case REG:
1222 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1223 offset = -cur_cfa->offset;
1224 break;
1225 case PLUS:
1226 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1227 offset = rtx_to_poly_int64 (XEXP (addr, 1)) - cur_cfa->offset;
1228 break;
1229 default:
1230 gcc_unreachable ();
1233 if (src == pc_rtx)
1235 span = NULL;
1236 sregno = DWARF_FRAME_RETURN_COLUMN;
1238 else
1240 span = targetm.dwarf_register_span (src);
1241 sregno = dwf_regno (src);
1244 /* ??? We'd like to use queue_reg_save, but we need to come up with
1245 a different flushing heuristic for epilogues. */
1246 if (!span)
1247 reg_save (sregno, INVALID_REGNUM, offset);
1248 else
1250 /* We have a PARALLEL describing where the contents of SRC live.
1251 Adjust the offset for each piece of the PARALLEL. */
1252 poly_int64 span_offset = offset;
1254 gcc_assert (GET_CODE (span) == PARALLEL);
1256 const int par_len = XVECLEN (span, 0);
1257 for (int par_index = 0; par_index < par_len; par_index++)
1259 rtx elem = XVECEXP (span, 0, par_index);
1260 sregno = dwf_regno (src);
1261 reg_save (sregno, INVALID_REGNUM, span_offset);
1262 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1267 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1269 static void
1270 dwarf2out_frame_debug_cfa_register (rtx set)
1272 rtx src, dest;
1273 unsigned sregno, dregno;
1275 src = XEXP (set, 1);
1276 dest = XEXP (set, 0);
1278 record_reg_saved_in_reg (dest, src);
1279 if (src == pc_rtx)
1280 sregno = DWARF_FRAME_RETURN_COLUMN;
1281 else
1282 sregno = dwf_regno (src);
1284 dregno = dwf_regno (dest);
1286 /* ??? We'd like to use queue_reg_save, but we need to come up with
1287 a different flushing heuristic for epilogues. */
1288 reg_save (sregno, dregno, 0);
1291 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1293 static void
1294 dwarf2out_frame_debug_cfa_expression (rtx set)
1296 rtx src, dest, span;
1297 dw_cfi_ref cfi = new_cfi ();
1298 unsigned regno;
1300 dest = SET_DEST (set);
1301 src = SET_SRC (set);
1303 gcc_assert (REG_P (src));
1304 gcc_assert (MEM_P (dest));
1306 span = targetm.dwarf_register_span (src);
1307 gcc_assert (!span);
1309 regno = dwf_regno (src);
1311 cfi->dw_cfi_opc = DW_CFA_expression;
1312 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1313 cfi->dw_cfi_oprnd2.dw_cfi_loc
1314 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1315 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1317 /* ??? We'd like to use queue_reg_save, were the interface different,
1318 and, as above, we could manage flushing for epilogues. */
1319 add_cfi (cfi);
1320 update_row_reg_save (cur_row, regno, cfi);
1323 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1324 note. */
1326 static void
1327 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1329 rtx dest = SET_DEST (set);
1330 gcc_assert (REG_P (dest));
1332 rtx span = targetm.dwarf_register_span (dest);
1333 gcc_assert (!span);
1335 rtx src = SET_SRC (set);
1336 dw_cfi_ref cfi = new_cfi ();
1337 cfi->dw_cfi_opc = DW_CFA_val_expression;
1338 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1339 cfi->dw_cfi_oprnd2.dw_cfi_loc
1340 = mem_loc_descriptor (src, GET_MODE (src),
1341 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1342 add_cfi (cfi);
1343 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1346 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1348 static void
1349 dwarf2out_frame_debug_cfa_restore (rtx reg)
1351 gcc_assert (REG_P (reg));
1353 rtx span = targetm.dwarf_register_span (reg);
1354 if (!span)
1356 unsigned int regno = dwf_regno (reg);
1357 add_cfi_restore (regno);
1358 update_row_reg_save (cur_row, regno, NULL);
1360 else
1362 /* We have a PARALLEL describing where the contents of REG live.
1363 Restore the register for each piece of the PARALLEL. */
1364 gcc_assert (GET_CODE (span) == PARALLEL);
1366 const int par_len = XVECLEN (span, 0);
1367 for (int par_index = 0; par_index < par_len; par_index++)
1369 reg = XVECEXP (span, 0, par_index);
1370 gcc_assert (REG_P (reg));
1371 unsigned int regno = dwf_regno (reg);
1372 add_cfi_restore (regno);
1373 update_row_reg_save (cur_row, regno, NULL);
1378 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1380 ??? Perhaps we should note in the CIE where windows are saved (instead
1381 of assuming 0(cfa)) and what registers are in the window. */
1383 static void
1384 dwarf2out_frame_debug_cfa_window_save (void)
1386 dw_cfi_ref cfi = new_cfi ();
1388 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1389 add_cfi (cfi);
1390 cur_row->window_save = true;
1393 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_TOGGLE_RA_MANGLE.
1394 Note: DW_CFA_GNU_window_save dwarf opcode is reused for toggling RA mangle
1395 state, this is a target specific operation on AArch64 and can only be used
1396 on other targets if they don't use the window save operation otherwise. */
1398 static void
1399 dwarf2out_frame_debug_cfa_toggle_ra_mangle (void)
1401 dw_cfi_ref cfi = new_cfi ();
1403 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1404 add_cfi (cfi);
1405 cur_row->ra_mangled = !cur_row->ra_mangled;
1408 /* Record call frame debugging information for an expression EXPR,
1409 which either sets SP or FP (adjusting how we calculate the frame
1410 address) or saves a register to the stack or another register.
1411 LABEL indicates the address of EXPR.
1413 This function encodes a state machine mapping rtxes to actions on
1414 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1415 users need not read the source code.
1417 The High-Level Picture
1419 Changes in the register we use to calculate the CFA: Currently we
1420 assume that if you copy the CFA register into another register, we
1421 should take the other one as the new CFA register; this seems to
1422 work pretty well. If it's wrong for some target, it's simple
1423 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1425 Changes in the register we use for saving registers to the stack:
1426 This is usually SP, but not always. Again, we deduce that if you
1427 copy SP into another register (and SP is not the CFA register),
1428 then the new register is the one we will be using for register
1429 saves. This also seems to work.
1431 Register saves: There's not much guesswork about this one; if
1432 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1433 register save, and the register used to calculate the destination
1434 had better be the one we think we're using for this purpose.
1435 It's also assumed that a copy from a call-saved register to another
1436 register is saving that register if RTX_FRAME_RELATED_P is set on
1437 that instruction. If the copy is from a call-saved register to
1438 the *same* register, that means that the register is now the same
1439 value as in the caller.
1441 Except: If the register being saved is the CFA register, and the
1442 offset is nonzero, we are saving the CFA, so we assume we have to
1443 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1444 the intent is to save the value of SP from the previous frame.
1446 In addition, if a register has previously been saved to a different
1447 register,
1449 Invariants / Summaries of Rules
1451 cfa current rule for calculating the CFA. It usually
1452 consists of a register and an offset. This is
1453 actually stored in *cur_cfa, but abbreviated
1454 for the purposes of this documentation.
1455 cfa_store register used by prologue code to save things to the stack
1456 cfa_store.offset is the offset from the value of
1457 cfa_store.reg to the actual CFA
1458 cfa_temp register holding an integral value. cfa_temp.offset
1459 stores the value, which will be used to adjust the
1460 stack pointer. cfa_temp is also used like cfa_store,
1461 to track stores to the stack via fp or a temp reg.
1463 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1464 with cfa.reg as the first operand changes the cfa.reg and its
1465 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1466 cfa_temp.offset.
1468 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1469 expression yielding a constant. This sets cfa_temp.reg
1470 and cfa_temp.offset.
1472 Rule 5: Create a new register cfa_store used to save items to the
1473 stack.
1475 Rules 10-14: Save a register to the stack. Define offset as the
1476 difference of the original location and cfa_store's
1477 location (or cfa_temp's location if cfa_temp is used).
1479 Rules 16-20: If AND operation happens on sp in prologue, we assume
1480 stack is realigned. We will use a group of DW_OP_XXX
1481 expressions to represent the location of the stored
1482 register instead of CFA+offset.
1484 The Rules
1486 "{a,b}" indicates a choice of a xor b.
1487 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1489 Rule 1:
1490 (set <reg1> <reg2>:cfa.reg)
1491 effects: cfa.reg = <reg1>
1492 cfa.offset unchanged
1493 cfa_temp.reg = <reg1>
1494 cfa_temp.offset = cfa.offset
1496 Rule 2:
1497 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1498 {<const_int>,<reg>:cfa_temp.reg}))
1499 effects: cfa.reg = sp if fp used
1500 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1501 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1502 if cfa_store.reg==sp
1504 Rule 3:
1505 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1506 effects: cfa.reg = fp
1507 cfa_offset += +/- <const_int>
1509 Rule 4:
1510 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1511 constraints: <reg1> != fp
1512 <reg1> != sp
1513 effects: cfa.reg = <reg1>
1514 cfa_temp.reg = <reg1>
1515 cfa_temp.offset = cfa.offset
1517 Rule 5:
1518 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1519 constraints: <reg1> != fp
1520 <reg1> != sp
1521 effects: cfa_store.reg = <reg1>
1522 cfa_store.offset = cfa.offset - cfa_temp.offset
1524 Rule 6:
1525 (set <reg> <const_int>)
1526 effects: cfa_temp.reg = <reg>
1527 cfa_temp.offset = <const_int>
1529 Rule 7:
1530 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1531 effects: cfa_temp.reg = <reg1>
1532 cfa_temp.offset |= <const_int>
1534 Rule 8:
1535 (set <reg> (high <exp>))
1536 effects: none
1538 Rule 9:
1539 (set <reg> (lo_sum <exp> <const_int>))
1540 effects: cfa_temp.reg = <reg>
1541 cfa_temp.offset = <const_int>
1543 Rule 10:
1544 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1545 effects: cfa_store.offset -= <const_int>
1546 cfa.offset = cfa_store.offset if cfa.reg == sp
1547 cfa.reg = sp
1548 cfa.base_offset = -cfa_store.offset
1550 Rule 11:
1551 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1552 effects: cfa_store.offset += -/+ mode_size(mem)
1553 cfa.offset = cfa_store.offset if cfa.reg == sp
1554 cfa.reg = sp
1555 cfa.base_offset = -cfa_store.offset
1557 Rule 12:
1558 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1560 <reg2>)
1561 effects: cfa.reg = <reg1>
1562 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1564 Rule 13:
1565 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1566 effects: cfa.reg = <reg1>
1567 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1569 Rule 14:
1570 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1571 effects: cfa.reg = <reg1>
1572 cfa.base_offset = -cfa_temp.offset
1573 cfa_temp.offset -= mode_size(mem)
1575 Rule 15:
1576 (set <reg> {unspec, unspec_volatile})
1577 effects: target-dependent
1579 Rule 16:
1580 (set sp (and: sp <const_int>))
1581 constraints: cfa_store.reg == sp
1582 effects: cfun->fde.stack_realign = 1
1583 cfa_store.offset = 0
1584 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1586 Rule 17:
1587 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1588 effects: cfa_store.offset += -/+ mode_size(mem)
1590 Rule 18:
1591 (set (mem ({pre_inc, pre_dec} sp)) fp)
1592 constraints: fde->stack_realign == 1
1593 effects: cfa_store.offset = 0
1594 cfa.reg != HARD_FRAME_POINTER_REGNUM
1596 Rule 19:
1597 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1598 constraints: fde->stack_realign == 1
1599 && cfa.offset == 0
1600 && cfa.indirect == 0
1601 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1602 effects: Use DW_CFA_def_cfa_expression to define cfa
1603 cfa.reg == fde->drap_reg */
1605 static void
1606 dwarf2out_frame_debug_expr (rtx expr)
1608 rtx src, dest, span;
1609 poly_int64 offset;
1610 dw_fde_ref fde;
1612 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1613 the PARALLEL independently. The first element is always processed if
1614 it is a SET. This is for backward compatibility. Other elements
1615 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1616 flag is set in them. */
1617 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1619 int par_index;
1620 int limit = XVECLEN (expr, 0);
1621 rtx elem;
1623 /* PARALLELs have strict read-modify-write semantics, so we
1624 ought to evaluate every rvalue before changing any lvalue.
1625 It's cumbersome to do that in general, but there's an
1626 easy approximation that is enough for all current users:
1627 handle register saves before register assignments. */
1628 if (GET_CODE (expr) == PARALLEL)
1629 for (par_index = 0; par_index < limit; par_index++)
1631 elem = XVECEXP (expr, 0, par_index);
1632 if (GET_CODE (elem) == SET
1633 && MEM_P (SET_DEST (elem))
1634 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1635 dwarf2out_frame_debug_expr (elem);
1638 for (par_index = 0; par_index < limit; par_index++)
1640 elem = XVECEXP (expr, 0, par_index);
1641 if (GET_CODE (elem) == SET
1642 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1643 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1644 dwarf2out_frame_debug_expr (elem);
1646 return;
1649 gcc_assert (GET_CODE (expr) == SET);
1651 src = SET_SRC (expr);
1652 dest = SET_DEST (expr);
1654 if (REG_P (src))
1656 rtx rsi = reg_saved_in (src);
1657 if (rsi)
1658 src = rsi;
1661 fde = cfun->fde;
1663 switch (GET_CODE (dest))
1665 case REG:
1666 switch (GET_CODE (src))
1668 /* Setting FP from SP. */
1669 case REG:
1670 if (cur_cfa->reg == dwf_regno (src))
1672 /* Rule 1 */
1673 /* Update the CFA rule wrt SP or FP. Make sure src is
1674 relative to the current CFA register.
1676 We used to require that dest be either SP or FP, but the
1677 ARM copies SP to a temporary register, and from there to
1678 FP. So we just rely on the backends to only set
1679 RTX_FRAME_RELATED_P on appropriate insns. */
1680 cur_cfa->reg = dwf_regno (dest);
1681 cur_trace->cfa_temp.reg = cur_cfa->reg;
1682 cur_trace->cfa_temp.offset = cur_cfa->offset;
1684 else
1686 /* Saving a register in a register. */
1687 gcc_assert (!fixed_regs [REGNO (dest)]
1688 /* For the SPARC and its register window. */
1689 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1691 /* After stack is aligned, we can only save SP in FP
1692 if drap register is used. In this case, we have
1693 to restore stack pointer with the CFA value and we
1694 don't generate this DWARF information. */
1695 if (fde
1696 && fde->stack_realign
1697 && REGNO (src) == STACK_POINTER_REGNUM)
1699 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1700 && fde->drap_reg != INVALID_REGNUM
1701 && cur_cfa->reg != dwf_regno (src)
1702 && fde->rule18);
1703 fde->rule18 = 0;
1704 /* The save of hard frame pointer has been deferred
1705 until this point when Rule 18 applied. Emit it now. */
1706 queue_reg_save (dest, NULL_RTX, 0);
1707 /* And as the instruction modifies the hard frame pointer,
1708 flush the queue as well. */
1709 dwarf2out_flush_queued_reg_saves ();
1711 else
1712 queue_reg_save (src, dest, 0);
1714 break;
1716 case PLUS:
1717 case MINUS:
1718 case LO_SUM:
1719 if (dest == stack_pointer_rtx)
1721 /* Rule 2 */
1722 /* Adjusting SP. */
1723 if (REG_P (XEXP (src, 1)))
1725 gcc_assert (dwf_regno (XEXP (src, 1))
1726 == cur_trace->cfa_temp.reg);
1727 offset = cur_trace->cfa_temp.offset;
1729 else if (!poly_int_rtx_p (XEXP (src, 1), &offset))
1730 gcc_unreachable ();
1732 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1734 /* Restoring SP from FP in the epilogue. */
1735 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1736 cur_cfa->reg = dw_stack_pointer_regnum;
1738 else if (GET_CODE (src) == LO_SUM)
1739 /* Assume we've set the source reg of the LO_SUM from sp. */
1741 else
1742 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1744 if (GET_CODE (src) != MINUS)
1745 offset = -offset;
1746 if (cur_cfa->reg == dw_stack_pointer_regnum)
1747 cur_cfa->offset += offset;
1748 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1749 cur_trace->cfa_store.offset += offset;
1751 else if (dest == hard_frame_pointer_rtx)
1753 /* Rule 3 */
1754 /* Either setting the FP from an offset of the SP,
1755 or adjusting the FP */
1756 gcc_assert (frame_pointer_needed);
1758 gcc_assert (REG_P (XEXP (src, 0))
1759 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1760 offset = rtx_to_poly_int64 (XEXP (src, 1));
1761 if (GET_CODE (src) != MINUS)
1762 offset = -offset;
1763 cur_cfa->offset += offset;
1764 cur_cfa->reg = dw_frame_pointer_regnum;
1766 else
1768 gcc_assert (GET_CODE (src) != MINUS);
1770 /* Rule 4 */
1771 if (REG_P (XEXP (src, 0))
1772 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1773 && poly_int_rtx_p (XEXP (src, 1), &offset))
1775 /* Setting a temporary CFA register that will be copied
1776 into the FP later on. */
1777 offset = -offset;
1778 cur_cfa->offset += offset;
1779 cur_cfa->reg = dwf_regno (dest);
1780 /* Or used to save regs to the stack. */
1781 cur_trace->cfa_temp.reg = cur_cfa->reg;
1782 cur_trace->cfa_temp.offset = cur_cfa->offset;
1785 /* Rule 5 */
1786 else if (REG_P (XEXP (src, 0))
1787 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1788 && XEXP (src, 1) == stack_pointer_rtx)
1790 /* Setting a scratch register that we will use instead
1791 of SP for saving registers to the stack. */
1792 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1793 cur_trace->cfa_store.reg = dwf_regno (dest);
1794 cur_trace->cfa_store.offset
1795 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1798 /* Rule 9 */
1799 else if (GET_CODE (src) == LO_SUM
1800 && poly_int_rtx_p (XEXP (src, 1),
1801 &cur_trace->cfa_temp.offset))
1802 cur_trace->cfa_temp.reg = dwf_regno (dest);
1803 else
1804 gcc_unreachable ();
1806 break;
1808 /* Rule 6 */
1809 case CONST_INT:
1810 case CONST_POLY_INT:
1811 cur_trace->cfa_temp.reg = dwf_regno (dest);
1812 cur_trace->cfa_temp.offset = rtx_to_poly_int64 (src);
1813 break;
1815 /* Rule 7 */
1816 case IOR:
1817 gcc_assert (REG_P (XEXP (src, 0))
1818 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1819 && CONST_INT_P (XEXP (src, 1)));
1821 cur_trace->cfa_temp.reg = dwf_regno (dest);
1822 if (!can_ior_p (cur_trace->cfa_temp.offset, INTVAL (XEXP (src, 1)),
1823 &cur_trace->cfa_temp.offset))
1824 /* The target shouldn't generate this kind of CFI note if we
1825 can't represent it. */
1826 gcc_unreachable ();
1827 break;
1829 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1830 which will fill in all of the bits. */
1831 /* Rule 8 */
1832 case HIGH:
1833 break;
1835 /* Rule 15 */
1836 case UNSPEC:
1837 case UNSPEC_VOLATILE:
1838 /* All unspecs should be represented by REG_CFA_* notes. */
1839 gcc_unreachable ();
1840 return;
1842 /* Rule 16 */
1843 case AND:
1844 /* If this AND operation happens on stack pointer in prologue,
1845 we assume the stack is realigned and we extract the
1846 alignment. */
1847 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1849 /* We interpret reg_save differently with stack_realign set.
1850 Thus we must flush whatever we have queued first. */
1851 dwarf2out_flush_queued_reg_saves ();
1853 gcc_assert (cur_trace->cfa_store.reg
1854 == dwf_regno (XEXP (src, 0)));
1855 fde->stack_realign = 1;
1856 fde->stack_realignment = INTVAL (XEXP (src, 1));
1857 cur_trace->cfa_store.offset = 0;
1859 if (cur_cfa->reg != dw_stack_pointer_regnum
1860 && cur_cfa->reg != dw_frame_pointer_regnum)
1861 fde->drap_reg = cur_cfa->reg;
1863 return;
1865 default:
1866 gcc_unreachable ();
1868 break;
1870 case MEM:
1872 /* Saving a register to the stack. Make sure dest is relative to the
1873 CFA register. */
1874 switch (GET_CODE (XEXP (dest, 0)))
1876 /* Rule 10 */
1877 /* With a push. */
1878 case PRE_MODIFY:
1879 case POST_MODIFY:
1880 /* We can't handle variable size modifications. */
1881 offset = -rtx_to_poly_int64 (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1883 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1884 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1886 cur_trace->cfa_store.offset += offset;
1887 if (cur_cfa->reg == dw_stack_pointer_regnum)
1888 cur_cfa->offset = cur_trace->cfa_store.offset;
1890 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1891 offset -= cur_trace->cfa_store.offset;
1892 else
1893 offset = -cur_trace->cfa_store.offset;
1894 break;
1896 /* Rule 11 */
1897 case PRE_INC:
1898 case PRE_DEC:
1899 case POST_DEC:
1900 offset = GET_MODE_SIZE (GET_MODE (dest));
1901 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1902 offset = -offset;
1904 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1905 == STACK_POINTER_REGNUM)
1906 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1908 cur_trace->cfa_store.offset += offset;
1910 /* Rule 18: If stack is aligned, we will use FP as a
1911 reference to represent the address of the stored
1912 regiser. */
1913 if (fde
1914 && fde->stack_realign
1915 && REG_P (src)
1916 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1918 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1919 cur_trace->cfa_store.offset = 0;
1920 fde->rule18 = 1;
1923 if (cur_cfa->reg == dw_stack_pointer_regnum)
1924 cur_cfa->offset = cur_trace->cfa_store.offset;
1926 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1927 offset += -cur_trace->cfa_store.offset;
1928 else
1929 offset = -cur_trace->cfa_store.offset;
1930 break;
1932 /* Rule 12 */
1933 /* With an offset. */
1934 case PLUS:
1935 case MINUS:
1936 case LO_SUM:
1938 unsigned int regno;
1940 gcc_assert (REG_P (XEXP (XEXP (dest, 0), 0)));
1941 offset = rtx_to_poly_int64 (XEXP (XEXP (dest, 0), 1));
1942 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1943 offset = -offset;
1945 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1947 if (cur_cfa->reg == regno)
1948 offset -= cur_cfa->offset;
1949 else if (cur_trace->cfa_store.reg == regno)
1950 offset -= cur_trace->cfa_store.offset;
1951 else
1953 gcc_assert (cur_trace->cfa_temp.reg == regno);
1954 offset -= cur_trace->cfa_temp.offset;
1957 break;
1959 /* Rule 13 */
1960 /* Without an offset. */
1961 case REG:
1963 unsigned int regno = dwf_regno (XEXP (dest, 0));
1965 if (cur_cfa->reg == regno)
1966 offset = -cur_cfa->offset;
1967 else if (cur_trace->cfa_store.reg == regno)
1968 offset = -cur_trace->cfa_store.offset;
1969 else
1971 gcc_assert (cur_trace->cfa_temp.reg == regno);
1972 offset = -cur_trace->cfa_temp.offset;
1975 break;
1977 /* Rule 14 */
1978 case POST_INC:
1979 gcc_assert (cur_trace->cfa_temp.reg
1980 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1981 offset = -cur_trace->cfa_temp.offset;
1982 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1983 break;
1985 default:
1986 gcc_unreachable ();
1989 /* Rule 17 */
1990 /* If the source operand of this MEM operation is a memory,
1991 we only care how much stack grew. */
1992 if (MEM_P (src))
1993 break;
1995 if (REG_P (src)
1996 && REGNO (src) != STACK_POINTER_REGNUM
1997 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1998 && dwf_regno (src) == cur_cfa->reg)
2000 /* We're storing the current CFA reg into the stack. */
2002 if (known_eq (cur_cfa->offset, 0))
2004 /* Rule 19 */
2005 /* If stack is aligned, putting CFA reg into stack means
2006 we can no longer use reg + offset to represent CFA.
2007 Here we use DW_CFA_def_cfa_expression instead. The
2008 result of this expression equals to the original CFA
2009 value. */
2010 if (fde
2011 && fde->stack_realign
2012 && cur_cfa->indirect == 0
2013 && cur_cfa->reg != dw_frame_pointer_regnum)
2015 gcc_assert (fde->drap_reg == cur_cfa->reg);
2017 cur_cfa->indirect = 1;
2018 cur_cfa->reg = dw_frame_pointer_regnum;
2019 cur_cfa->base_offset = offset;
2020 cur_cfa->offset = 0;
2022 fde->drap_reg_saved = 1;
2023 break;
2026 /* If the source register is exactly the CFA, assume
2027 we're saving SP like any other register; this happens
2028 on the ARM. */
2029 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2030 break;
2032 else
2034 /* Otherwise, we'll need to look in the stack to
2035 calculate the CFA. */
2036 rtx x = XEXP (dest, 0);
2038 if (!REG_P (x))
2039 x = XEXP (x, 0);
2040 gcc_assert (REG_P (x));
2042 cur_cfa->reg = dwf_regno (x);
2043 cur_cfa->base_offset = offset;
2044 cur_cfa->indirect = 1;
2045 break;
2049 if (REG_P (src))
2050 span = targetm.dwarf_register_span (src);
2051 else
2052 span = NULL;
2054 if (!span)
2056 if (fde->rule18)
2057 /* Just verify the hard frame pointer save when doing dynamic
2058 realignment uses expected offset. The actual queue_reg_save
2059 needs to be deferred until the instruction that sets
2060 hard frame pointer to stack pointer, see PR99334 for
2061 details. */
2062 gcc_assert (known_eq (offset, 0));
2063 else
2064 queue_reg_save (src, NULL_RTX, offset);
2066 else
2068 /* We have a PARALLEL describing where the contents of SRC live.
2069 Queue register saves for each piece of the PARALLEL. */
2070 poly_int64 span_offset = offset;
2072 gcc_assert (GET_CODE (span) == PARALLEL);
2074 const int par_len = XVECLEN (span, 0);
2075 for (int par_index = 0; par_index < par_len; par_index++)
2077 rtx elem = XVECEXP (span, 0, par_index);
2078 queue_reg_save (elem, NULL_RTX, span_offset);
2079 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2082 break;
2084 default:
2085 gcc_unreachable ();
2089 /* Record call frame debugging information for INSN, which either sets
2090 SP or FP (adjusting how we calculate the frame address) or saves a
2091 register to the stack. */
2093 static void
2094 dwarf2out_frame_debug (rtx_insn *insn)
2096 rtx note, n, pat;
2097 bool handled_one = false;
2099 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2100 switch (REG_NOTE_KIND (note))
2102 case REG_FRAME_RELATED_EXPR:
2103 pat = XEXP (note, 0);
2104 goto do_frame_expr;
2106 case REG_CFA_DEF_CFA:
2107 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2108 handled_one = true;
2109 break;
2111 case REG_CFA_ADJUST_CFA:
2112 n = XEXP (note, 0);
2113 if (n == NULL)
2115 n = PATTERN (insn);
2116 if (GET_CODE (n) == PARALLEL)
2117 n = XVECEXP (n, 0, 0);
2119 dwarf2out_frame_debug_adjust_cfa (n);
2120 handled_one = true;
2121 break;
2123 case REG_CFA_OFFSET:
2124 n = XEXP (note, 0);
2125 if (n == NULL)
2126 n = single_set (insn);
2127 dwarf2out_frame_debug_cfa_offset (n);
2128 handled_one = true;
2129 break;
2131 case REG_CFA_REGISTER:
2132 n = XEXP (note, 0);
2133 if (n == NULL)
2135 n = PATTERN (insn);
2136 if (GET_CODE (n) == PARALLEL)
2137 n = XVECEXP (n, 0, 0);
2139 dwarf2out_frame_debug_cfa_register (n);
2140 handled_one = true;
2141 break;
2143 case REG_CFA_EXPRESSION:
2144 case REG_CFA_VAL_EXPRESSION:
2145 n = XEXP (note, 0);
2146 if (n == NULL)
2147 n = single_set (insn);
2149 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2150 dwarf2out_frame_debug_cfa_expression (n);
2151 else
2152 dwarf2out_frame_debug_cfa_val_expression (n);
2154 handled_one = true;
2155 break;
2157 case REG_CFA_RESTORE:
2158 n = XEXP (note, 0);
2159 if (n == NULL)
2161 n = PATTERN (insn);
2162 if (GET_CODE (n) == PARALLEL)
2163 n = XVECEXP (n, 0, 0);
2164 n = XEXP (n, 0);
2166 dwarf2out_frame_debug_cfa_restore (n);
2167 handled_one = true;
2168 break;
2170 case REG_CFA_SET_VDRAP:
2171 n = XEXP (note, 0);
2172 if (REG_P (n))
2174 dw_fde_ref fde = cfun->fde;
2175 if (fde)
2177 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2178 if (REG_P (n))
2179 fde->vdrap_reg = dwf_regno (n);
2182 handled_one = true;
2183 break;
2185 case REG_CFA_TOGGLE_RA_MANGLE:
2186 dwarf2out_frame_debug_cfa_toggle_ra_mangle ();
2187 handled_one = true;
2188 break;
2190 case REG_CFA_WINDOW_SAVE:
2191 dwarf2out_frame_debug_cfa_window_save ();
2192 handled_one = true;
2193 break;
2195 case REG_CFA_FLUSH_QUEUE:
2196 /* The actual flush happens elsewhere. */
2197 handled_one = true;
2198 break;
2200 default:
2201 break;
2204 if (!handled_one)
2206 pat = PATTERN (insn);
2207 do_frame_expr:
2208 dwarf2out_frame_debug_expr (pat);
2210 /* Check again. A parallel can save and update the same register.
2211 We could probably check just once, here, but this is safer than
2212 removing the check at the start of the function. */
2213 if (clobbers_queued_reg_save (pat))
2214 dwarf2out_flush_queued_reg_saves ();
2218 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2220 static void
2221 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2223 size_t i, n_old, n_new, n_max;
2224 dw_cfi_ref cfi;
2226 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2227 add_cfi (new_row->cfa_cfi);
2228 else
2230 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2231 if (cfi)
2232 add_cfi (cfi);
2235 n_old = vec_safe_length (old_row->reg_save);
2236 n_new = vec_safe_length (new_row->reg_save);
2237 n_max = MAX (n_old, n_new);
2239 for (i = 0; i < n_max; ++i)
2241 dw_cfi_ref r_old = NULL, r_new = NULL;
2243 if (i < n_old)
2244 r_old = (*old_row->reg_save)[i];
2245 if (i < n_new)
2246 r_new = (*new_row->reg_save)[i];
2248 if (r_old == r_new)
2250 else if (r_new == NULL)
2251 add_cfi_restore (i);
2252 else if (!cfi_equal_p (r_old, r_new))
2253 add_cfi (r_new);
2256 if (!old_row->window_save && new_row->window_save)
2258 dw_cfi_ref cfi = new_cfi ();
2260 gcc_assert (!old_row->ra_mangled && !new_row->ra_mangled);
2261 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
2262 add_cfi (cfi);
2265 if (old_row->ra_mangled != new_row->ra_mangled)
2267 dw_cfi_ref cfi = new_cfi ();
2269 gcc_assert (!old_row->window_save && !new_row->window_save);
2270 /* DW_CFA_GNU_window_save is reused for toggling RA mangle state. */
2271 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
2272 add_cfi (cfi);
2276 /* Examine CFI and return true if a cfi label and set_loc is needed
2277 beforehand. Even when generating CFI assembler instructions, we
2278 still have to add the cfi to the list so that lookup_cfa_1 works
2279 later on. When -g2 and above we even need to force emitting of
2280 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2281 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2282 and so don't use convert_cfa_to_fb_loc_list. */
2284 static bool
2285 cfi_label_required_p (dw_cfi_ref cfi)
2287 if (!dwarf2out_do_cfi_asm ())
2288 return true;
2290 if (dwarf_version == 2
2291 && debug_info_level > DINFO_LEVEL_TERSE
2292 && dwarf_debuginfo_p ())
2294 switch (cfi->dw_cfi_opc)
2296 case DW_CFA_def_cfa_offset:
2297 case DW_CFA_def_cfa_offset_sf:
2298 case DW_CFA_def_cfa_register:
2299 case DW_CFA_def_cfa:
2300 case DW_CFA_def_cfa_sf:
2301 case DW_CFA_def_cfa_expression:
2302 case DW_CFA_restore_state:
2303 return true;
2304 default:
2305 return false;
2308 return false;
2311 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2312 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2313 necessary. */
2314 static void
2315 add_cfis_to_fde (void)
2317 dw_fde_ref fde = cfun->fde;
2318 rtx_insn *insn, *next;
2320 for (insn = get_insns (); insn; insn = next)
2322 next = NEXT_INSN (insn);
2324 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2325 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2327 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2329 bool required = cfi_label_required_p (NOTE_CFI (insn));
2330 while (next)
2331 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2333 required |= cfi_label_required_p (NOTE_CFI (next));
2334 next = NEXT_INSN (next);
2336 else if (active_insn_p (next)
2337 || (NOTE_P (next) && (NOTE_KIND (next)
2338 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2339 break;
2340 else
2341 next = NEXT_INSN (next);
2342 if (required)
2344 int num = dwarf2out_cfi_label_num;
2345 const char *label = dwarf2out_cfi_label ();
2346 dw_cfi_ref xcfi;
2348 /* Set the location counter to the new label. */
2349 xcfi = new_cfi ();
2350 xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
2351 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2352 vec_safe_push (fde->dw_fde_cfi, xcfi);
2354 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2355 NOTE_LABEL_NUMBER (tmp) = num;
2360 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2361 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2362 insn = NEXT_INSN (insn);
2364 while (insn != next);
2369 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2371 /* If LABEL is the start of a trace, then initialize the state of that
2372 trace from CUR_TRACE and CUR_ROW. */
2374 static void
2375 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2377 dw_trace_info *ti;
2379 ti = get_trace_info (start);
2380 gcc_assert (ti != NULL);
2382 if (dump_file)
2384 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2385 cur_trace->id, ti->id,
2386 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2387 (origin ? INSN_UID (origin) : 0));
2390 poly_int64 args_size = cur_trace->end_true_args_size;
2391 if (ti->beg_row == NULL)
2393 /* This is the first time we've encountered this trace. Propagate
2394 state across the edge and push the trace onto the work list. */
2395 ti->beg_row = copy_cfi_row (cur_row);
2396 ti->beg_true_args_size = args_size;
2398 ti->cfa_store = cur_trace->cfa_store;
2399 ti->cfa_temp = cur_trace->cfa_temp;
2400 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2402 trace_work_list.safe_push (ti);
2404 if (dump_file)
2405 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2407 else
2410 /* We ought to have the same state incoming to a given trace no
2411 matter how we arrive at the trace. Anything else means we've
2412 got some kind of optimization error. */
2413 #if CHECKING_P
2414 if (!cfi_row_equal_p (cur_row, ti->beg_row))
2416 if (dump_file)
2418 fprintf (dump_file, "Inconsistent CFI state!\n");
2419 fprintf (dump_file, "SHOULD have:\n");
2420 dump_cfi_row (dump_file, ti->beg_row);
2421 fprintf (dump_file, "DO have:\n");
2422 dump_cfi_row (dump_file, cur_row);
2425 gcc_unreachable ();
2427 #endif
2429 /* The args_size is allowed to conflict if it isn't actually used. */
2430 if (maybe_ne (ti->beg_true_args_size, args_size))
2431 ti->args_size_undefined = true;
2435 /* Similarly, but handle the args_size and CFA reset across EH
2436 and non-local goto edges. */
2438 static void
2439 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2441 poly_int64 save_args_size, delta;
2442 dw_cfa_location save_cfa;
2444 save_args_size = cur_trace->end_true_args_size;
2445 if (known_eq (save_args_size, 0))
2447 maybe_record_trace_start (start, origin);
2448 return;
2451 delta = -save_args_size;
2452 cur_trace->end_true_args_size = 0;
2454 save_cfa = cur_row->cfa;
2455 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2457 /* Convert a change in args_size (always a positive in the
2458 direction of stack growth) to a change in stack pointer. */
2459 if (!STACK_GROWS_DOWNWARD)
2460 delta = -delta;
2462 cur_row->cfa.offset += delta;
2465 maybe_record_trace_start (start, origin);
2467 cur_trace->end_true_args_size = save_args_size;
2468 cur_row->cfa = save_cfa;
2471 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2472 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2474 static void
2475 create_trace_edges (rtx_insn *insn)
2477 rtx tmp;
2478 int i, n;
2480 if (JUMP_P (insn))
2482 rtx_jump_table_data *table;
2484 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2485 return;
2487 if (tablejump_p (insn, NULL, &table))
2489 rtvec vec = table->get_labels ();
2491 n = GET_NUM_ELEM (vec);
2492 for (i = 0; i < n; ++i)
2494 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2495 maybe_record_trace_start (lab, insn);
2498 /* Handle casesi dispatch insns. */
2499 if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX)
2501 rtx_insn * lab = label_ref_label (XEXP (SET_SRC (tmp), 2));
2502 maybe_record_trace_start (lab, insn);
2505 else if (computed_jump_p (insn))
2507 rtx_insn *temp;
2508 unsigned int i;
2509 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2510 maybe_record_trace_start (temp, insn);
2512 else if (returnjump_p (insn))
2514 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2516 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2517 for (i = 0; i < n; ++i)
2519 rtx_insn *lab =
2520 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2521 maybe_record_trace_start (lab, insn);
2524 else
2526 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2527 gcc_assert (lab != NULL);
2528 maybe_record_trace_start (lab, insn);
2531 else if (CALL_P (insn))
2533 /* Sibling calls don't have edges inside this function. */
2534 if (SIBLING_CALL_P (insn))
2535 return;
2537 /* Process non-local goto edges. */
2538 if (can_nonlocal_goto (insn))
2539 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2540 lab;
2541 lab = lab->next ())
2542 maybe_record_trace_start_abnormal (lab->insn (), insn);
2544 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2546 int i, n = seq->len ();
2547 for (i = 0; i < n; ++i)
2548 create_trace_edges (seq->insn (i));
2549 return;
2552 /* Process EH edges. */
2553 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2555 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2556 if (lp)
2557 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2561 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2563 static void
2564 scan_insn_after (rtx_insn *insn)
2566 if (RTX_FRAME_RELATED_P (insn))
2567 dwarf2out_frame_debug (insn);
2568 notice_args_size (insn);
2571 /* Scan the trace beginning at INSN and create the CFI notes for the
2572 instructions therein. */
2574 static void
2575 scan_trace (dw_trace_info *trace, bool entry)
2577 rtx_insn *prev, *insn = trace->head;
2578 dw_cfa_location this_cfa;
2580 if (dump_file)
2581 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2582 trace->id, rtx_name[(int) GET_CODE (insn)],
2583 INSN_UID (insn));
2585 trace->end_row = copy_cfi_row (trace->beg_row);
2586 trace->end_true_args_size = trace->beg_true_args_size;
2588 cur_trace = trace;
2589 cur_row = trace->end_row;
2591 this_cfa = cur_row->cfa;
2592 cur_cfa = &this_cfa;
2594 /* If the current function starts with a non-standard incoming frame
2595 sp offset, emit a note before the first instruction. */
2596 if (entry
2597 && DEFAULT_INCOMING_FRAME_SP_OFFSET != INCOMING_FRAME_SP_OFFSET)
2599 add_cfi_insn = insn;
2600 gcc_assert (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED);
2601 this_cfa.offset = INCOMING_FRAME_SP_OFFSET;
2602 def_cfa_1 (&this_cfa);
2605 for (prev = insn, insn = NEXT_INSN (insn);
2606 insn;
2607 prev = insn, insn = NEXT_INSN (insn))
2609 rtx_insn *control;
2611 /* Do everything that happens "before" the insn. */
2612 add_cfi_insn = prev;
2614 /* Notice the end of a trace. */
2615 if (BARRIER_P (insn))
2617 /* Don't bother saving the unneeded queued registers at all. */
2618 queued_reg_saves.truncate (0);
2619 break;
2621 if (save_point_p (insn))
2623 /* Propagate across fallthru edges. */
2624 dwarf2out_flush_queued_reg_saves ();
2625 maybe_record_trace_start (insn, NULL);
2626 break;
2629 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2630 continue;
2632 /* Handle all changes to the row state. Sequences require special
2633 handling for the positioning of the notes. */
2634 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2636 rtx_insn *elt;
2637 int i, n = pat->len ();
2639 control = pat->insn (0);
2640 if (can_throw_internal (control))
2641 notice_eh_throw (control);
2642 dwarf2out_flush_queued_reg_saves ();
2644 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2646 /* ??? Hopefully multiple delay slots are not annulled. */
2647 gcc_assert (n == 2);
2648 gcc_assert (!RTX_FRAME_RELATED_P (control));
2649 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2651 elt = pat->insn (1);
2653 if (INSN_FROM_TARGET_P (elt))
2655 cfi_vec save_row_reg_save;
2657 /* If ELT is an instruction from target of an annulled
2658 branch, the effects are for the target only and so
2659 the args_size and CFA along the current path
2660 shouldn't change. */
2661 add_cfi_insn = NULL;
2662 poly_int64 restore_args_size = cur_trace->end_true_args_size;
2663 cur_cfa = &cur_row->cfa;
2664 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2666 scan_insn_after (elt);
2668 /* ??? Should we instead save the entire row state? */
2669 gcc_assert (!queued_reg_saves.length ());
2671 create_trace_edges (control);
2673 cur_trace->end_true_args_size = restore_args_size;
2674 cur_row->cfa = this_cfa;
2675 cur_row->reg_save = save_row_reg_save;
2676 cur_cfa = &this_cfa;
2678 else
2680 /* If ELT is a annulled branch-taken instruction (i.e.
2681 executed only when branch is not taken), the args_size
2682 and CFA should not change through the jump. */
2683 create_trace_edges (control);
2685 /* Update and continue with the trace. */
2686 add_cfi_insn = insn;
2687 scan_insn_after (elt);
2688 def_cfa_1 (&this_cfa);
2690 continue;
2693 /* The insns in the delay slot should all be considered to happen
2694 "before" a call insn. Consider a call with a stack pointer
2695 adjustment in the delay slot. The backtrace from the callee
2696 should include the sp adjustment. Unfortunately, that leaves
2697 us with an unavoidable unwinding error exactly at the call insn
2698 itself. For jump insns we'd prefer to avoid this error by
2699 placing the notes after the sequence. */
2700 if (JUMP_P (control))
2701 add_cfi_insn = insn;
2703 for (i = 1; i < n; ++i)
2705 elt = pat->insn (i);
2706 scan_insn_after (elt);
2709 /* Make sure any register saves are visible at the jump target. */
2710 dwarf2out_flush_queued_reg_saves ();
2711 any_cfis_emitted = false;
2713 /* However, if there is some adjustment on the call itself, e.g.
2714 a call_pop, that action should be considered to happen after
2715 the call returns. */
2716 add_cfi_insn = insn;
2717 scan_insn_after (control);
2719 else
2721 /* Flush data before calls and jumps, and of course if necessary. */
2722 if (can_throw_internal (insn))
2724 notice_eh_throw (insn);
2725 dwarf2out_flush_queued_reg_saves ();
2727 else if (!NONJUMP_INSN_P (insn)
2728 || clobbers_queued_reg_save (insn)
2729 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2730 dwarf2out_flush_queued_reg_saves ();
2731 any_cfis_emitted = false;
2733 add_cfi_insn = insn;
2734 scan_insn_after (insn);
2735 control = insn;
2738 /* Between frame-related-p and args_size we might have otherwise
2739 emitted two cfa adjustments. Do it now. */
2740 def_cfa_1 (&this_cfa);
2742 /* Minimize the number of advances by emitting the entire queue
2743 once anything is emitted. */
2744 if (any_cfis_emitted
2745 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2746 dwarf2out_flush_queued_reg_saves ();
2748 /* Note that a test for control_flow_insn_p does exactly the
2749 same tests as are done to actually create the edges. So
2750 always call the routine and let it not create edges for
2751 non-control-flow insns. */
2752 create_trace_edges (control);
2755 gcc_assert (!cfun->fde || !cfun->fde->rule18);
2756 add_cfi_insn = NULL;
2757 cur_row = NULL;
2758 cur_trace = NULL;
2759 cur_cfa = NULL;
2762 /* Scan the function and create the initial set of CFI notes. */
2764 static void
2765 create_cfi_notes (void)
2767 dw_trace_info *ti;
2769 gcc_checking_assert (!queued_reg_saves.exists ());
2770 gcc_checking_assert (!trace_work_list.exists ());
2772 /* Always begin at the entry trace. */
2773 ti = &trace_info[0];
2774 scan_trace (ti, true);
2776 while (!trace_work_list.is_empty ())
2778 ti = trace_work_list.pop ();
2779 scan_trace (ti, false);
2782 queued_reg_saves.release ();
2783 trace_work_list.release ();
2786 /* Return the insn before the first NOTE_INSN_CFI after START. */
2788 static rtx_insn *
2789 before_next_cfi_note (rtx_insn *start)
2791 rtx_insn *prev = start;
2792 while (start)
2794 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2795 return prev;
2796 prev = start;
2797 start = NEXT_INSN (start);
2799 gcc_unreachable ();
2802 /* Insert CFI notes between traces to properly change state between them. */
2804 static void
2805 connect_traces (void)
2807 unsigned i, n;
2808 dw_trace_info *prev_ti, *ti;
2810 /* ??? Ideally, we should have both queued and processed every trace.
2811 However the current representation of constant pools on various targets
2812 is indistinguishable from unreachable code. Assume for the moment that
2813 we can simply skip over such traces. */
2814 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2815 these are not "real" instructions, and should not be considered.
2816 This could be generically useful for tablejump data as well. */
2817 /* Remove all unprocessed traces from the list. */
2818 unsigned ix, ix2;
2819 VEC_ORDERED_REMOVE_IF_FROM_TO (trace_info, ix, ix2, ti, 1,
2820 trace_info.length (), ti->beg_row == NULL);
2821 FOR_EACH_VEC_ELT (trace_info, ix, ti)
2822 gcc_assert (ti->end_row != NULL);
2824 /* Work from the end back to the beginning. This lets us easily insert
2825 remember/restore_state notes in the correct order wrt other notes. */
2826 n = trace_info.length ();
2827 prev_ti = &trace_info[n - 1];
2828 for (i = n - 1; i > 0; --i)
2830 dw_cfi_row *old_row;
2832 ti = prev_ti;
2833 prev_ti = &trace_info[i - 1];
2835 add_cfi_insn = ti->head;
2837 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2838 for the portion of the function in the alternate text
2839 section. The row state at the very beginning of that
2840 new FDE will be exactly the row state from the CIE. */
2841 if (ti->switch_sections)
2842 old_row = cie_cfi_row;
2843 else
2845 old_row = prev_ti->end_row;
2846 /* If there's no change from the previous end state, fine. */
2847 if (cfi_row_equal_p (old_row, ti->beg_row))
2849 /* Otherwise check for the common case of sharing state with
2850 the beginning of an epilogue, but not the end. Insert
2851 remember/restore opcodes in that case. */
2852 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2854 dw_cfi_ref cfi;
2856 /* Note that if we blindly insert the remember at the
2857 start of the trace, we can wind up increasing the
2858 size of the unwind info due to extra advance opcodes.
2859 Instead, put the remember immediately before the next
2860 state change. We know there must be one, because the
2861 state at the beginning and head of the trace differ. */
2862 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2863 cfi = new_cfi ();
2864 cfi->dw_cfi_opc = DW_CFA_remember_state;
2865 add_cfi (cfi);
2867 add_cfi_insn = ti->head;
2868 cfi = new_cfi ();
2869 cfi->dw_cfi_opc = DW_CFA_restore_state;
2870 add_cfi (cfi);
2872 /* If the target unwinder does not save the CFA as part of the
2873 register state, we need to restore it separately. */
2874 if (targetm.asm_out.should_restore_cfa_state ()
2875 && (cfi = def_cfa_0 (&old_row->cfa, &ti->beg_row->cfa)))
2876 add_cfi (cfi);
2878 old_row = prev_ti->beg_row;
2880 /* Otherwise, we'll simply change state from the previous end. */
2883 change_cfi_row (old_row, ti->beg_row);
2885 if (dump_file && add_cfi_insn != ti->head)
2887 rtx_insn *note;
2889 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2890 prev_ti->id, ti->id);
2892 note = ti->head;
2895 note = NEXT_INSN (note);
2896 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2897 output_cfi_directive (dump_file, NOTE_CFI (note));
2899 while (note != add_cfi_insn);
2903 /* Connect args_size between traces that have can_throw_internal insns. */
2904 if (cfun->eh->lp_array)
2906 poly_int64 prev_args_size = 0;
2908 for (i = 0; i < n; ++i)
2910 ti = &trace_info[i];
2912 if (ti->switch_sections)
2913 prev_args_size = 0;
2915 if (ti->eh_head == NULL)
2916 continue;
2918 /* We require either the incoming args_size values to match or the
2919 presence of an insn setting it before the first EH insn. */
2920 gcc_assert (!ti->args_size_undefined || ti->args_size_defined_for_eh);
2922 /* In the latter case, we force the creation of a CFI note. */
2923 if (ti->args_size_undefined
2924 || maybe_ne (ti->beg_delay_args_size, prev_args_size))
2926 /* ??? Search back to previous CFI note. */
2927 add_cfi_insn = PREV_INSN (ti->eh_head);
2928 add_cfi_args_size (ti->beg_delay_args_size);
2931 prev_args_size = ti->end_delay_args_size;
2936 /* Set up the pseudo-cfg of instruction traces, as described at the
2937 block comment at the top of the file. */
2939 static void
2940 create_pseudo_cfg (void)
2942 bool saw_barrier, switch_sections;
2943 dw_trace_info ti;
2944 rtx_insn *insn;
2945 unsigned i;
2947 /* The first trace begins at the start of the function,
2948 and begins with the CIE row state. */
2949 trace_info.create (16);
2950 memset (&ti, 0, sizeof (ti));
2951 ti.head = get_insns ();
2952 ti.beg_row = cie_cfi_row;
2953 ti.cfa_store = cie_cfi_row->cfa;
2954 ti.cfa_temp.reg = INVALID_REGNUM;
2955 trace_info.quick_push (ti);
2957 if (cie_return_save)
2958 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2960 /* Walk all the insns, collecting start of trace locations. */
2961 saw_barrier = false;
2962 switch_sections = false;
2963 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2965 if (BARRIER_P (insn))
2966 saw_barrier = true;
2967 else if (NOTE_P (insn)
2968 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2970 /* We should have just seen a barrier. */
2971 gcc_assert (saw_barrier);
2972 switch_sections = true;
2974 /* Watch out for save_point notes between basic blocks.
2975 In particular, a note after a barrier. Do not record these,
2976 delaying trace creation until the label. */
2977 else if (save_point_p (insn)
2978 && (LABEL_P (insn) || !saw_barrier))
2980 memset (&ti, 0, sizeof (ti));
2981 ti.head = insn;
2982 ti.switch_sections = switch_sections;
2983 ti.id = trace_info.length ();
2984 trace_info.safe_push (ti);
2986 saw_barrier = false;
2987 switch_sections = false;
2991 /* Create the trace index after we've finished building trace_info,
2992 avoiding stale pointer problems due to reallocation. */
2993 trace_index
2994 = new hash_table<trace_info_hasher> (trace_info.length ());
2995 dw_trace_info *tp;
2996 FOR_EACH_VEC_ELT (trace_info, i, tp)
2998 dw_trace_info **slot;
3000 if (dump_file)
3001 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
3002 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
3003 tp->switch_sections ? " (section switch)" : "");
3005 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
3006 gcc_assert (*slot == NULL);
3007 *slot = tp;
3011 /* Record the initial position of the return address. RTL is
3012 INCOMING_RETURN_ADDR_RTX. */
3014 static void
3015 initial_return_save (rtx rtl)
3017 unsigned int reg = INVALID_REGNUM;
3018 poly_int64 offset = 0;
3020 switch (GET_CODE (rtl))
3022 case REG:
3023 /* RA is in a register. */
3024 reg = dwf_regno (rtl);
3025 break;
3027 case MEM:
3028 /* RA is on the stack. */
3029 rtl = XEXP (rtl, 0);
3030 switch (GET_CODE (rtl))
3032 case REG:
3033 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
3034 offset = 0;
3035 break;
3037 case PLUS:
3038 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
3039 offset = rtx_to_poly_int64 (XEXP (rtl, 1));
3040 break;
3042 case MINUS:
3043 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
3044 offset = -rtx_to_poly_int64 (XEXP (rtl, 1));
3045 break;
3047 default:
3048 gcc_unreachable ();
3051 break;
3053 case PLUS:
3054 /* The return address is at some offset from any value we can
3055 actually load. For instance, on the SPARC it is in %i7+8. Just
3056 ignore the offset for now; it doesn't matter for unwinding frames. */
3057 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
3058 initial_return_save (XEXP (rtl, 0));
3059 return;
3061 default:
3062 gcc_unreachable ();
3065 if (reg != DWARF_FRAME_RETURN_COLUMN)
3067 if (reg != INVALID_REGNUM)
3068 record_reg_saved_in_reg (rtl, pc_rtx);
3069 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
3073 static void
3074 create_cie_data (void)
3076 dw_cfa_location loc;
3077 dw_trace_info cie_trace;
3079 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
3081 memset (&cie_trace, 0, sizeof (cie_trace));
3082 cur_trace = &cie_trace;
3084 add_cfi_vec = &cie_cfi_vec;
3085 cie_cfi_row = cur_row = new_cfi_row ();
3087 /* On entry, the Canonical Frame Address is at SP. */
3088 memset (&loc, 0, sizeof (loc));
3089 loc.reg = dw_stack_pointer_regnum;
3090 /* create_cie_data is called just once per TU, and when using .cfi_startproc
3091 is even done by the assembler rather than the compiler. If the target
3092 has different incoming frame sp offsets depending on what kind of
3093 function it is, use a single constant offset for the target and
3094 if needed, adjust before the first instruction in insn stream. */
3095 loc.offset = DEFAULT_INCOMING_FRAME_SP_OFFSET;
3096 def_cfa_1 (&loc);
3098 if (targetm.debug_unwind_info () == UI_DWARF2
3099 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3101 initial_return_save (INCOMING_RETURN_ADDR_RTX);
3103 /* For a few targets, we have the return address incoming into a
3104 register, but choose a different return column. This will result
3105 in a DW_CFA_register for the return, and an entry in
3106 regs_saved_in_regs to match. If the target later stores that
3107 return address register to the stack, we want to be able to emit
3108 the DW_CFA_offset against the return column, not the intermediate
3109 save register. Save the contents of regs_saved_in_regs so that
3110 we can re-initialize it at the start of each function. */
3111 switch (cie_trace.regs_saved_in_regs.length ())
3113 case 0:
3114 break;
3115 case 1:
3116 cie_return_save = ggc_alloc<reg_saved_in_data> ();
3117 *cie_return_save = cie_trace.regs_saved_in_regs[0];
3118 cie_trace.regs_saved_in_regs.release ();
3119 break;
3120 default:
3121 gcc_unreachable ();
3125 add_cfi_vec = NULL;
3126 cur_row = NULL;
3127 cur_trace = NULL;
3130 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3131 state at each location within the function. These notes will be
3132 emitted during pass_final. */
3134 static unsigned int
3135 execute_dwarf2_frame (void)
3137 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3138 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3140 /* The first time we're called, compute the incoming frame state. */
3141 if (cie_cfi_vec == NULL)
3142 create_cie_data ();
3144 dwarf2out_alloc_current_fde ();
3146 create_pseudo_cfg ();
3148 /* Do the work. */
3149 create_cfi_notes ();
3150 connect_traces ();
3151 add_cfis_to_fde ();
3153 /* Free all the data we allocated. */
3155 size_t i;
3156 dw_trace_info *ti;
3158 FOR_EACH_VEC_ELT (trace_info, i, ti)
3159 ti->regs_saved_in_regs.release ();
3161 trace_info.release ();
3163 delete trace_index;
3164 trace_index = NULL;
3166 return 0;
3169 /* Convert a DWARF call frame info. operation to its string name */
3171 static const char *
3172 dwarf_cfi_name (unsigned int cfi_opc)
3174 const char *name = get_DW_CFA_name (cfi_opc);
3176 if (name != NULL)
3177 return name;
3179 return "DW_CFA_<unknown>";
3182 /* This routine will generate the correct assembly data for a location
3183 description based on a cfi entry with a complex address. */
3185 static void
3186 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3188 dw_loc_descr_ref loc;
3189 unsigned long size;
3191 if (cfi->dw_cfi_opc == DW_CFA_expression
3192 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3194 unsigned r =
3195 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3196 dw2_asm_output_data (1, r, NULL);
3197 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3199 else
3200 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3202 /* Output the size of the block. */
3203 size = size_of_locs (loc);
3204 dw2_asm_output_data_uleb128 (size, NULL);
3206 /* Now output the operations themselves. */
3207 output_loc_sequence (loc, for_eh);
3210 /* Similar, but used for .cfi_escape. */
3212 static void
3213 output_cfa_loc_raw (dw_cfi_ref cfi)
3215 dw_loc_descr_ref loc;
3216 unsigned long size;
3218 if (cfi->dw_cfi_opc == DW_CFA_expression
3219 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3221 unsigned r =
3222 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3223 fprintf (asm_out_file, "%#x,", r);
3224 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3226 else
3227 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3229 /* Output the size of the block. */
3230 size = size_of_locs (loc);
3231 dw2_asm_output_data_uleb128_raw (size);
3232 fputc (',', asm_out_file);
3234 /* Now output the operations themselves. */
3235 output_loc_sequence_raw (loc);
3238 /* Output a Call Frame Information opcode and its operand(s). */
3240 void
3241 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3243 unsigned long r;
3244 HOST_WIDE_INT off;
3246 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3247 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3248 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3249 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3250 ((unsigned HOST_WIDE_INT)
3251 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3252 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3254 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3255 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3256 "DW_CFA_offset, column %#lx", r);
3257 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3258 dw2_asm_output_data_uleb128 (off, NULL);
3260 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3262 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3263 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3264 "DW_CFA_restore, column %#lx", r);
3266 else
3268 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3269 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3271 switch (cfi->dw_cfi_opc)
3273 case DW_CFA_set_loc:
3274 if (for_eh)
3275 dw2_asm_output_encoded_addr_rtx (
3276 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3277 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3278 false, NULL);
3279 else
3280 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3281 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3282 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3283 break;
3285 case DW_CFA_advance_loc1:
3286 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3287 fde->dw_fde_current_label, NULL);
3288 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3289 break;
3291 case DW_CFA_advance_loc2:
3292 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3293 fde->dw_fde_current_label, NULL);
3294 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3295 break;
3297 case DW_CFA_advance_loc4:
3298 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3299 fde->dw_fde_current_label, NULL);
3300 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3301 break;
3303 case DW_CFA_MIPS_advance_loc8:
3304 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3305 fde->dw_fde_current_label, NULL);
3306 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3307 break;
3309 case DW_CFA_offset_extended:
3310 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3311 dw2_asm_output_data_uleb128 (r, NULL);
3312 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3313 dw2_asm_output_data_uleb128 (off, NULL);
3314 break;
3316 case DW_CFA_def_cfa:
3317 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3318 dw2_asm_output_data_uleb128 (r, NULL);
3319 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3320 break;
3322 case DW_CFA_offset_extended_sf:
3323 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3324 dw2_asm_output_data_uleb128 (r, NULL);
3325 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3326 dw2_asm_output_data_sleb128 (off, NULL);
3327 break;
3329 case DW_CFA_def_cfa_sf:
3330 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3331 dw2_asm_output_data_uleb128 (r, NULL);
3332 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3333 dw2_asm_output_data_sleb128 (off, NULL);
3334 break;
3336 case DW_CFA_restore_extended:
3337 case DW_CFA_undefined:
3338 case DW_CFA_same_value:
3339 case DW_CFA_def_cfa_register:
3340 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3341 dw2_asm_output_data_uleb128 (r, NULL);
3342 break;
3344 case DW_CFA_register:
3345 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3346 dw2_asm_output_data_uleb128 (r, NULL);
3347 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3348 dw2_asm_output_data_uleb128 (r, NULL);
3349 break;
3351 case DW_CFA_def_cfa_offset:
3352 case DW_CFA_GNU_args_size:
3353 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3354 break;
3356 case DW_CFA_def_cfa_offset_sf:
3357 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3358 dw2_asm_output_data_sleb128 (off, NULL);
3359 break;
3361 case DW_CFA_GNU_window_save:
3362 break;
3364 case DW_CFA_def_cfa_expression:
3365 case DW_CFA_expression:
3366 case DW_CFA_val_expression:
3367 output_cfa_loc (cfi, for_eh);
3368 break;
3370 case DW_CFA_GNU_negative_offset_extended:
3371 /* Obsoleted by DW_CFA_offset_extended_sf. */
3372 gcc_unreachable ();
3374 default:
3375 break;
3380 /* Similar, but do it via assembler directives instead. */
3382 void
3383 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3385 unsigned long r, r2;
3387 switch (cfi->dw_cfi_opc)
3389 case DW_CFA_advance_loc:
3390 case DW_CFA_advance_loc1:
3391 case DW_CFA_advance_loc2:
3392 case DW_CFA_advance_loc4:
3393 case DW_CFA_MIPS_advance_loc8:
3394 case DW_CFA_set_loc:
3395 /* Should only be created in a code path not followed when emitting
3396 via directives. The assembler is going to take care of this for
3397 us. But this routines is also used for debugging dumps, so
3398 print something. */
3399 gcc_assert (f != asm_out_file);
3400 fprintf (f, "\t.cfi_advance_loc\n");
3401 break;
3403 case DW_CFA_offset:
3404 case DW_CFA_offset_extended:
3405 case DW_CFA_offset_extended_sf:
3406 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3407 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3408 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3409 break;
3411 case DW_CFA_restore:
3412 case DW_CFA_restore_extended:
3413 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3414 fprintf (f, "\t.cfi_restore %lu\n", r);
3415 break;
3417 case DW_CFA_undefined:
3418 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3419 fprintf (f, "\t.cfi_undefined %lu\n", r);
3420 break;
3422 case DW_CFA_same_value:
3423 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3424 fprintf (f, "\t.cfi_same_value %lu\n", r);
3425 break;
3427 case DW_CFA_def_cfa:
3428 case DW_CFA_def_cfa_sf:
3429 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3430 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3431 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3432 break;
3434 case DW_CFA_def_cfa_register:
3435 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3436 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3437 break;
3439 case DW_CFA_register:
3440 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3441 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3442 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3443 break;
3445 case DW_CFA_def_cfa_offset:
3446 case DW_CFA_def_cfa_offset_sf:
3447 fprintf (f, "\t.cfi_def_cfa_offset "
3448 HOST_WIDE_INT_PRINT_DEC"\n",
3449 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3450 break;
3452 case DW_CFA_remember_state:
3453 fprintf (f, "\t.cfi_remember_state\n");
3454 break;
3455 case DW_CFA_restore_state:
3456 fprintf (f, "\t.cfi_restore_state\n");
3457 break;
3459 case DW_CFA_GNU_args_size:
3460 if (f == asm_out_file)
3462 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3463 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3464 if (flag_debug_asm)
3465 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3466 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3467 fputc ('\n', f);
3469 else
3471 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3472 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3474 break;
3476 case DW_CFA_GNU_window_save:
3477 fprintf (f, "\t.cfi_window_save\n");
3478 break;
3480 case DW_CFA_def_cfa_expression:
3481 case DW_CFA_expression:
3482 case DW_CFA_val_expression:
3483 if (f != asm_out_file)
3485 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3486 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3487 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3488 break;
3490 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3491 output_cfa_loc_raw (cfi);
3492 fputc ('\n', f);
3493 break;
3495 default:
3496 gcc_unreachable ();
3500 void
3501 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3503 if (dwarf2out_do_cfi_asm ())
3504 output_cfi_directive (asm_out_file, cfi);
3507 static void
3508 dump_cfi_row (FILE *f, dw_cfi_row *row)
3510 dw_cfi_ref cfi;
3511 unsigned i;
3513 cfi = row->cfa_cfi;
3514 if (!cfi)
3516 dw_cfa_location dummy;
3517 memset (&dummy, 0, sizeof (dummy));
3518 dummy.reg = INVALID_REGNUM;
3519 cfi = def_cfa_0 (&dummy, &row->cfa);
3521 output_cfi_directive (f, cfi);
3523 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3524 if (cfi)
3525 output_cfi_directive (f, cfi);
3528 void debug_cfi_row (dw_cfi_row *row);
3530 void
3531 debug_cfi_row (dw_cfi_row *row)
3533 dump_cfi_row (stderr, row);
3537 /* Save the result of dwarf2out_do_frame across PCH.
3538 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3539 static GTY(()) signed char saved_do_cfi_asm = 0;
3541 /* Decide whether to emit EH frame unwind information for the current
3542 translation unit. */
3544 bool
3545 dwarf2out_do_eh_frame (void)
3547 return
3548 (flag_unwind_tables || flag_exceptions)
3549 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2;
3552 /* Decide whether we want to emit frame unwind information for the current
3553 translation unit. */
3555 bool
3556 dwarf2out_do_frame (void)
3558 /* We want to emit correct CFA location expressions or lists, so we
3559 have to return true if we're going to generate debug info, even if
3560 we're not going to output frame or unwind info. */
3561 if (dwarf_debuginfo_p () || dwarf_based_debuginfo_p ())
3562 return true;
3564 if (saved_do_cfi_asm > 0)
3565 return true;
3567 if (targetm.debug_unwind_info () == UI_DWARF2)
3568 return true;
3570 if (dwarf2out_do_eh_frame ())
3571 return true;
3573 return false;
3576 /* Decide whether to emit frame unwind via assembler directives. */
3578 bool
3579 dwarf2out_do_cfi_asm (void)
3581 int enc;
3583 if (saved_do_cfi_asm != 0)
3584 return saved_do_cfi_asm > 0;
3586 /* Assume failure for a moment. */
3587 saved_do_cfi_asm = -1;
3589 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3590 return false;
3591 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3592 return false;
3594 /* Make sure the personality encoding is one the assembler can support.
3595 In particular, aligned addresses can't be handled. */
3596 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3597 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3598 return false;
3599 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3600 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3601 return false;
3603 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3604 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3605 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE && !dwarf2out_do_eh_frame ())
3606 return false;
3608 /* Success! */
3609 saved_do_cfi_asm = 1;
3610 return true;
3613 namespace {
3615 const pass_data pass_data_dwarf2_frame =
3617 RTL_PASS, /* type */
3618 "dwarf2", /* name */
3619 OPTGROUP_NONE, /* optinfo_flags */
3620 TV_FINAL, /* tv_id */
3621 0, /* properties_required */
3622 0, /* properties_provided */
3623 0, /* properties_destroyed */
3624 0, /* todo_flags_start */
3625 0, /* todo_flags_finish */
3628 class pass_dwarf2_frame : public rtl_opt_pass
3630 public:
3631 pass_dwarf2_frame (gcc::context *ctxt)
3632 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3635 /* opt_pass methods: */
3636 virtual bool gate (function *);
3637 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3639 }; // class pass_dwarf2_frame
3641 bool
3642 pass_dwarf2_frame::gate (function *)
3644 /* Targets which still implement the prologue in assembler text
3645 cannot use the generic dwarf2 unwinding. */
3646 if (!targetm.have_prologue ())
3647 return false;
3649 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3650 from the optimized shrink-wrapping annotations that we will compute.
3651 For now, only produce the CFI notes for dwarf2. */
3652 return dwarf2out_do_frame ();
3655 } // anon namespace
3657 rtl_opt_pass *
3658 make_pass_dwarf2_frame (gcc::context *ctxt)
3660 return new pass_dwarf2_frame (ctxt);
3663 #include "gt-dwarf2cfi.h"