svn merge -r 217500:218679 svn+ssh://gcc.gnu.org/svn/gcc/trunk
[official-gcc.git] / gcc / dwarf2cfi.c
blob04a17bd36ab5555bb1cc3c65fea5499b2d4debf2
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "version.h"
25 #include "flags.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "hashtab.h"
30 #include "hash-set.h"
31 #include "vec.h"
32 #include "machmode.h"
33 #include "hard-reg-set.h"
34 #include "input.h"
35 #include "function.h"
36 #include "cfgbuild.h"
37 #include "dwarf2.h"
38 #include "dwarf2out.h"
39 #include "dwarf2asm.h"
40 #include "ggc.h"
41 #include "hash-table.h"
42 #include "tm_p.h"
43 #include "target.h"
44 #include "common/common-target.h"
45 #include "tree-pass.h"
47 #include "except.h" /* expand_builtin_dwarf_sp_column */
48 #include "expr.h" /* init_return_column_size */
49 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
50 #include "output.h" /* asm_out_file */
51 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
54 /* ??? Poison these here until it can be done generically. They've been
55 totally replaced in this file; make sure it stays that way. */
56 #undef DWARF2_UNWIND_INFO
57 #undef DWARF2_FRAME_INFO
58 #if (GCC_VERSION >= 3000)
59 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
60 #endif
62 #ifndef INCOMING_RETURN_ADDR_RTX
63 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
64 #endif
66 /* Maximum size (in bytes) of an artificially generated label. */
67 #define MAX_ARTIFICIAL_LABEL_BYTES 30
69 /* A collected description of an entire row of the abstract CFI table. */
70 typedef struct GTY(()) dw_cfi_row_struct
72 /* The expression that computes the CFA, expressed in two different ways.
73 The CFA member for the simple cases, and the full CFI expression for
74 the complex cases. The later will be a DW_CFA_cfa_expression. */
75 dw_cfa_location cfa;
76 dw_cfi_ref cfa_cfi;
78 /* The expressions for any register column that is saved. */
79 cfi_vec reg_save;
80 } dw_cfi_row;
82 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
83 typedef struct GTY(()) reg_saved_in_data_struct {
84 rtx orig_reg;
85 rtx saved_in_reg;
86 } reg_saved_in_data;
89 /* Since we no longer have a proper CFG, we're going to create a facsimile
90 of one on the fly while processing the frame-related insns.
92 We create dw_trace_info structures for each extended basic block beginning
93 and ending at a "save point". Save points are labels, barriers, certain
94 notes, and of course the beginning and end of the function.
96 As we encounter control transfer insns, we propagate the "current"
97 row state across the edges to the starts of traces. When checking is
98 enabled, we validate that we propagate the same data from all sources.
100 All traces are members of the TRACE_INFO array, in the order in which
101 they appear in the instruction stream.
103 All save points are present in the TRACE_INDEX hash, mapping the insn
104 starting a trace to the dw_trace_info describing the trace. */
106 typedef struct
108 /* The insn that begins the trace. */
109 rtx_insn *head;
111 /* The row state at the beginning and end of the trace. */
112 dw_cfi_row *beg_row, *end_row;
114 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
115 while scanning insns. However, the args_size value is irrelevant at
116 any point except can_throw_internal_p insns. Therefore the "delay"
117 sizes the values that must actually be emitted for this trace. */
118 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
119 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
121 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
122 rtx_insn *eh_head;
124 /* The following variables contain data used in interpreting frame related
125 expressions. These are not part of the "real" row state as defined by
126 Dwarf, but it seems like they need to be propagated into a trace in case
127 frame related expressions have been sunk. */
128 /* ??? This seems fragile. These variables are fragments of a larger
129 expression. If we do not keep the entire expression together, we risk
130 not being able to put it together properly. Consider forcing targets
131 to generate self-contained expressions and dropping all of the magic
132 interpretation code in this file. Or at least refusing to shrink wrap
133 any frame related insn that doesn't contain a complete expression. */
135 /* The register used for saving registers to the stack, and its offset
136 from the CFA. */
137 dw_cfa_location cfa_store;
139 /* A temporary register holding an integral value used in adjusting SP
140 or setting up the store_reg. The "offset" field holds the integer
141 value, not an offset. */
142 dw_cfa_location cfa_temp;
144 /* A set of registers saved in other registers. This is the inverse of
145 the row->reg_save info, if the entry is a DW_CFA_register. This is
146 implemented as a flat array because it normally contains zero or 1
147 entry, depending on the target. IA-64 is the big spender here, using
148 a maximum of 5 entries. */
149 vec<reg_saved_in_data> regs_saved_in_regs;
151 /* An identifier for this trace. Used only for debugging dumps. */
152 unsigned id;
154 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
155 bool switch_sections;
157 /* True if we've seen different values incoming to beg_true_args_size. */
158 bool args_size_undefined;
159 } dw_trace_info;
162 typedef dw_trace_info *dw_trace_info_ref;
165 /* Hashtable helpers. */
167 struct trace_info_hasher : typed_noop_remove <dw_trace_info>
169 typedef dw_trace_info value_type;
170 typedef dw_trace_info compare_type;
171 static inline hashval_t hash (const value_type *);
172 static inline bool equal (const value_type *, const compare_type *);
175 inline hashval_t
176 trace_info_hasher::hash (const value_type *ti)
178 return INSN_UID (ti->head);
181 inline bool
182 trace_info_hasher::equal (const value_type *a, const compare_type *b)
184 return a->head == b->head;
188 /* The variables making up the pseudo-cfg, as described above. */
189 static vec<dw_trace_info> trace_info;
190 static vec<dw_trace_info_ref> trace_work_list;
191 static hash_table<trace_info_hasher> *trace_index;
193 /* A vector of call frame insns for the CIE. */
194 cfi_vec cie_cfi_vec;
196 /* The state of the first row of the FDE table, which includes the
197 state provided by the CIE. */
198 static GTY(()) dw_cfi_row *cie_cfi_row;
200 static GTY(()) reg_saved_in_data *cie_return_save;
202 static GTY(()) unsigned long dwarf2out_cfi_label_num;
204 /* The insn after which a new CFI note should be emitted. */
205 static rtx add_cfi_insn;
207 /* When non-null, add_cfi will add the CFI to this vector. */
208 static cfi_vec *add_cfi_vec;
210 /* The current instruction trace. */
211 static dw_trace_info *cur_trace;
213 /* The current, i.e. most recently generated, row of the CFI table. */
214 static dw_cfi_row *cur_row;
216 /* A copy of the current CFA, for use during the processing of a
217 single insn. */
218 static dw_cfa_location *cur_cfa;
220 /* We delay emitting a register save until either (a) we reach the end
221 of the prologue or (b) the register is clobbered. This clusters
222 register saves so that there are fewer pc advances. */
224 typedef struct {
225 rtx reg;
226 rtx saved_reg;
227 HOST_WIDE_INT cfa_offset;
228 } queued_reg_save;
231 static vec<queued_reg_save> queued_reg_saves;
233 /* True if any CFI directives were emitted at the current insn. */
234 static bool any_cfis_emitted;
236 /* Short-hand for commonly used register numbers. */
237 static unsigned dw_stack_pointer_regnum;
238 static unsigned dw_frame_pointer_regnum;
240 /* Hook used by __throw. */
243 expand_builtin_dwarf_sp_column (void)
245 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
246 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
249 /* MEM is a memory reference for the register size table, each element of
250 which has mode MODE. Initialize column C as a return address column. */
252 static void
253 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
255 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
256 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
257 emit_move_insn (adjust_address (mem, mode, offset),
258 gen_int_mode (size, mode));
261 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
262 init_one_dwarf_reg_size to communicate on what has been done by the
263 latter. */
265 typedef struct
267 /* Whether the dwarf return column was initialized. */
268 bool wrote_return_column;
270 /* For each hard register REGNO, whether init_one_dwarf_reg_size
271 was given REGNO to process already. */
272 bool processed_regno [FIRST_PSEUDO_REGISTER];
274 } init_one_dwarf_reg_state;
276 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
277 initialize the dwarf register size table entry corresponding to register
278 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
279 use for the size entry to initialize, and INIT_STATE is the communication
280 datastructure conveying what we're doing to our caller. */
282 static
283 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
284 rtx table, machine_mode slotmode,
285 init_one_dwarf_reg_state *init_state)
287 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
288 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
289 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
291 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
292 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
294 init_state->processed_regno[regno] = true;
296 if (rnum >= DWARF_FRAME_REGISTERS)
297 return;
299 if (dnum == DWARF_FRAME_RETURN_COLUMN)
301 if (regmode == VOIDmode)
302 return;
303 init_state->wrote_return_column = true;
306 if (slotoffset < 0)
307 return;
309 emit_move_insn (adjust_address (table, slotmode, slotoffset),
310 gen_int_mode (regsize, slotmode));
313 /* Generate code to initialize the dwarf register size table located
314 at the provided ADDRESS. */
316 void
317 expand_builtin_init_dwarf_reg_sizes (tree address)
319 unsigned int i;
320 machine_mode mode = TYPE_MODE (char_type_node);
321 rtx addr = expand_normal (address);
322 rtx mem = gen_rtx_MEM (BLKmode, addr);
324 init_one_dwarf_reg_state init_state;
326 memset ((char *)&init_state, 0, sizeof (init_state));
328 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
330 machine_mode save_mode;
331 rtx span;
333 /* No point in processing a register multiple times. This could happen
334 with register spans, e.g. when a reg is first processed as a piece of
335 a span, then as a register on its own later on. */
337 if (init_state.processed_regno[i])
338 continue;
340 save_mode = targetm.dwarf_frame_reg_mode (i);
341 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
343 if (!span)
344 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
345 else
347 for (int si = 0; si < XVECLEN (span, 0); si++)
349 rtx reg = XVECEXP (span, 0, si);
351 init_one_dwarf_reg_size
352 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
357 if (!init_state.wrote_return_column)
358 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
360 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
361 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
362 #endif
364 targetm.init_dwarf_reg_sizes_extra (address);
368 static dw_trace_info *
369 get_trace_info (rtx_insn *insn)
371 dw_trace_info dummy;
372 dummy.head = insn;
373 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
376 static bool
377 save_point_p (rtx_insn *insn)
379 /* Labels, except those that are really jump tables. */
380 if (LABEL_P (insn))
381 return inside_basic_block_p (insn);
383 /* We split traces at the prologue/epilogue notes because those
384 are points at which the unwind info is usually stable. This
385 makes it easier to find spots with identical unwind info so
386 that we can use remember/restore_state opcodes. */
387 if (NOTE_P (insn))
388 switch (NOTE_KIND (insn))
390 case NOTE_INSN_PROLOGUE_END:
391 case NOTE_INSN_EPILOGUE_BEG:
392 return true;
395 return false;
398 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
400 static inline HOST_WIDE_INT
401 div_data_align (HOST_WIDE_INT off)
403 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
404 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
405 return r;
408 /* Return true if we need a signed version of a given opcode
409 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
411 static inline bool
412 need_data_align_sf_opcode (HOST_WIDE_INT off)
414 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
417 /* Return a pointer to a newly allocated Call Frame Instruction. */
419 static inline dw_cfi_ref
420 new_cfi (void)
422 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
424 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
425 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
427 return cfi;
430 /* Return a newly allocated CFI row, with no defined data. */
432 static dw_cfi_row *
433 new_cfi_row (void)
435 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
437 row->cfa.reg = INVALID_REGNUM;
439 return row;
442 /* Return a copy of an existing CFI row. */
444 static dw_cfi_row *
445 copy_cfi_row (dw_cfi_row *src)
447 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
449 *dst = *src;
450 dst->reg_save = vec_safe_copy (src->reg_save);
452 return dst;
455 /* Generate a new label for the CFI info to refer to. */
457 static char *
458 dwarf2out_cfi_label (void)
460 int num = dwarf2out_cfi_label_num++;
461 char label[20];
463 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
465 return xstrdup (label);
468 /* Add CFI either to the current insn stream or to a vector, or both. */
470 static void
471 add_cfi (dw_cfi_ref cfi)
473 any_cfis_emitted = true;
475 if (add_cfi_insn != NULL)
477 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
478 NOTE_CFI (add_cfi_insn) = cfi;
481 if (add_cfi_vec != NULL)
482 vec_safe_push (*add_cfi_vec, cfi);
485 static void
486 add_cfi_args_size (HOST_WIDE_INT size)
488 dw_cfi_ref cfi = new_cfi ();
490 /* While we can occasionally have args_size < 0 internally, this state
491 should not persist at a point we actually need an opcode. */
492 gcc_assert (size >= 0);
494 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
495 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
497 add_cfi (cfi);
500 static void
501 add_cfi_restore (unsigned reg)
503 dw_cfi_ref cfi = new_cfi ();
505 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
506 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
508 add_cfi (cfi);
511 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
512 that the register column is no longer saved. */
514 static void
515 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
517 if (vec_safe_length (row->reg_save) <= column)
518 vec_safe_grow_cleared (row->reg_save, column + 1);
519 (*row->reg_save)[column] = cfi;
522 /* This function fills in aa dw_cfa_location structure from a dwarf location
523 descriptor sequence. */
525 static void
526 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
528 struct dw_loc_descr_node *ptr;
529 cfa->offset = 0;
530 cfa->base_offset = 0;
531 cfa->indirect = 0;
532 cfa->reg = -1;
534 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
536 enum dwarf_location_atom op = ptr->dw_loc_opc;
538 switch (op)
540 case DW_OP_reg0:
541 case DW_OP_reg1:
542 case DW_OP_reg2:
543 case DW_OP_reg3:
544 case DW_OP_reg4:
545 case DW_OP_reg5:
546 case DW_OP_reg6:
547 case DW_OP_reg7:
548 case DW_OP_reg8:
549 case DW_OP_reg9:
550 case DW_OP_reg10:
551 case DW_OP_reg11:
552 case DW_OP_reg12:
553 case DW_OP_reg13:
554 case DW_OP_reg14:
555 case DW_OP_reg15:
556 case DW_OP_reg16:
557 case DW_OP_reg17:
558 case DW_OP_reg18:
559 case DW_OP_reg19:
560 case DW_OP_reg20:
561 case DW_OP_reg21:
562 case DW_OP_reg22:
563 case DW_OP_reg23:
564 case DW_OP_reg24:
565 case DW_OP_reg25:
566 case DW_OP_reg26:
567 case DW_OP_reg27:
568 case DW_OP_reg28:
569 case DW_OP_reg29:
570 case DW_OP_reg30:
571 case DW_OP_reg31:
572 cfa->reg = op - DW_OP_reg0;
573 break;
574 case DW_OP_regx:
575 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
576 break;
577 case DW_OP_breg0:
578 case DW_OP_breg1:
579 case DW_OP_breg2:
580 case DW_OP_breg3:
581 case DW_OP_breg4:
582 case DW_OP_breg5:
583 case DW_OP_breg6:
584 case DW_OP_breg7:
585 case DW_OP_breg8:
586 case DW_OP_breg9:
587 case DW_OP_breg10:
588 case DW_OP_breg11:
589 case DW_OP_breg12:
590 case DW_OP_breg13:
591 case DW_OP_breg14:
592 case DW_OP_breg15:
593 case DW_OP_breg16:
594 case DW_OP_breg17:
595 case DW_OP_breg18:
596 case DW_OP_breg19:
597 case DW_OP_breg20:
598 case DW_OP_breg21:
599 case DW_OP_breg22:
600 case DW_OP_breg23:
601 case DW_OP_breg24:
602 case DW_OP_breg25:
603 case DW_OP_breg26:
604 case DW_OP_breg27:
605 case DW_OP_breg28:
606 case DW_OP_breg29:
607 case DW_OP_breg30:
608 case DW_OP_breg31:
609 cfa->reg = op - DW_OP_breg0;
610 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
611 break;
612 case DW_OP_bregx:
613 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
614 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
615 break;
616 case DW_OP_deref:
617 cfa->indirect = 1;
618 break;
619 case DW_OP_plus_uconst:
620 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
621 break;
622 default:
623 gcc_unreachable ();
628 /* Find the previous value for the CFA, iteratively. CFI is the opcode
629 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
630 one level of remember/restore state processing. */
632 void
633 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
635 switch (cfi->dw_cfi_opc)
637 case DW_CFA_def_cfa_offset:
638 case DW_CFA_def_cfa_offset_sf:
639 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
640 break;
641 case DW_CFA_def_cfa_register:
642 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
643 break;
644 case DW_CFA_def_cfa:
645 case DW_CFA_def_cfa_sf:
646 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
647 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
648 break;
649 case DW_CFA_def_cfa_expression:
650 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
651 break;
653 case DW_CFA_remember_state:
654 gcc_assert (!remember->in_use);
655 *remember = *loc;
656 remember->in_use = 1;
657 break;
658 case DW_CFA_restore_state:
659 gcc_assert (remember->in_use);
660 *loc = *remember;
661 remember->in_use = 0;
662 break;
664 default:
665 break;
669 /* Determine if two dw_cfa_location structures define the same data. */
671 bool
672 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
674 return (loc1->reg == loc2->reg
675 && loc1->offset == loc2->offset
676 && loc1->indirect == loc2->indirect
677 && (loc1->indirect == 0
678 || loc1->base_offset == loc2->base_offset));
681 /* Determine if two CFI operands are identical. */
683 static bool
684 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
686 switch (t)
688 case dw_cfi_oprnd_unused:
689 return true;
690 case dw_cfi_oprnd_reg_num:
691 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
692 case dw_cfi_oprnd_offset:
693 return a->dw_cfi_offset == b->dw_cfi_offset;
694 case dw_cfi_oprnd_addr:
695 return (a->dw_cfi_addr == b->dw_cfi_addr
696 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
697 case dw_cfi_oprnd_loc:
698 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
700 gcc_unreachable ();
703 /* Determine if two CFI entries are identical. */
705 static bool
706 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
708 enum dwarf_call_frame_info opc;
710 /* Make things easier for our callers, including missing operands. */
711 if (a == b)
712 return true;
713 if (a == NULL || b == NULL)
714 return false;
716 /* Obviously, the opcodes must match. */
717 opc = a->dw_cfi_opc;
718 if (opc != b->dw_cfi_opc)
719 return false;
721 /* Compare the two operands, re-using the type of the operands as
722 already exposed elsewhere. */
723 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
724 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
725 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
726 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
729 /* Determine if two CFI_ROW structures are identical. */
731 static bool
732 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
734 size_t i, n_a, n_b, n_max;
736 if (a->cfa_cfi)
738 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
739 return false;
741 else if (!cfa_equal_p (&a->cfa, &b->cfa))
742 return false;
744 n_a = vec_safe_length (a->reg_save);
745 n_b = vec_safe_length (b->reg_save);
746 n_max = MAX (n_a, n_b);
748 for (i = 0; i < n_max; ++i)
750 dw_cfi_ref r_a = NULL, r_b = NULL;
752 if (i < n_a)
753 r_a = (*a->reg_save)[i];
754 if (i < n_b)
755 r_b = (*b->reg_save)[i];
757 if (!cfi_equal_p (r_a, r_b))
758 return false;
761 return true;
764 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
765 what opcode to emit. Returns the CFI opcode to effect the change, or
766 NULL if NEW_CFA == OLD_CFA. */
768 static dw_cfi_ref
769 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
771 dw_cfi_ref cfi;
773 /* If nothing changed, no need to issue any call frame instructions. */
774 if (cfa_equal_p (old_cfa, new_cfa))
775 return NULL;
777 cfi = new_cfi ();
779 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
781 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
782 the CFA register did not change but the offset did. The data
783 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
784 in the assembler via the .cfi_def_cfa_offset directive. */
785 if (new_cfa->offset < 0)
786 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
787 else
788 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
789 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
791 else if (new_cfa->offset == old_cfa->offset
792 && old_cfa->reg != INVALID_REGNUM
793 && !new_cfa->indirect
794 && !old_cfa->indirect)
796 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
797 indicating the CFA register has changed to <register> but the
798 offset has not changed. */
799 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
800 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
802 else if (new_cfa->indirect == 0)
804 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
805 indicating the CFA register has changed to <register> with
806 the specified offset. The data factoring for DW_CFA_def_cfa_sf
807 happens in output_cfi, or in the assembler via the .cfi_def_cfa
808 directive. */
809 if (new_cfa->offset < 0)
810 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
811 else
812 cfi->dw_cfi_opc = DW_CFA_def_cfa;
813 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
814 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
816 else
818 /* Construct a DW_CFA_def_cfa_expression instruction to
819 calculate the CFA using a full location expression since no
820 register-offset pair is available. */
821 struct dw_loc_descr_node *loc_list;
823 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
824 loc_list = build_cfa_loc (new_cfa, 0);
825 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
828 return cfi;
831 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
833 static void
834 def_cfa_1 (dw_cfa_location *new_cfa)
836 dw_cfi_ref cfi;
838 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
839 cur_trace->cfa_store.offset = new_cfa->offset;
841 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
842 if (cfi)
844 cur_row->cfa = *new_cfa;
845 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
846 ? cfi : NULL);
848 add_cfi (cfi);
852 /* Add the CFI for saving a register. REG is the CFA column number.
853 If SREG is -1, the register is saved at OFFSET from the CFA;
854 otherwise it is saved in SREG. */
856 static void
857 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
859 dw_fde_ref fde = cfun ? cfun->fde : NULL;
860 dw_cfi_ref cfi = new_cfi ();
862 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
864 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
865 if (fde
866 && fde->stack_realign
867 && sreg == INVALID_REGNUM)
869 cfi->dw_cfi_opc = DW_CFA_expression;
870 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
871 cfi->dw_cfi_oprnd2.dw_cfi_loc
872 = build_cfa_aligned_loc (&cur_row->cfa, offset,
873 fde->stack_realignment);
875 else if (sreg == INVALID_REGNUM)
877 if (need_data_align_sf_opcode (offset))
878 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
879 else if (reg & ~0x3f)
880 cfi->dw_cfi_opc = DW_CFA_offset_extended;
881 else
882 cfi->dw_cfi_opc = DW_CFA_offset;
883 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
885 else if (sreg == reg)
887 /* While we could emit something like DW_CFA_same_value or
888 DW_CFA_restore, we never expect to see something like that
889 in a prologue. This is more likely to be a bug. A backend
890 can always bypass this by using REG_CFA_RESTORE directly. */
891 gcc_unreachable ();
893 else
895 cfi->dw_cfi_opc = DW_CFA_register;
896 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
899 add_cfi (cfi);
900 update_row_reg_save (cur_row, reg, cfi);
903 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
904 and adjust data structures to match. */
906 static void
907 notice_args_size (rtx insn)
909 HOST_WIDE_INT args_size, delta;
910 rtx note;
912 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
913 if (note == NULL)
914 return;
916 args_size = INTVAL (XEXP (note, 0));
917 delta = args_size - cur_trace->end_true_args_size;
918 if (delta == 0)
919 return;
921 cur_trace->end_true_args_size = args_size;
923 /* If the CFA is computed off the stack pointer, then we must adjust
924 the computation of the CFA as well. */
925 if (cur_cfa->reg == dw_stack_pointer_regnum)
927 gcc_assert (!cur_cfa->indirect);
929 /* Convert a change in args_size (always a positive in the
930 direction of stack growth) to a change in stack pointer. */
931 #ifndef STACK_GROWS_DOWNWARD
932 delta = -delta;
933 #endif
934 cur_cfa->offset += delta;
938 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
939 data within the trace related to EH insns and args_size. */
941 static void
942 notice_eh_throw (rtx_insn *insn)
944 HOST_WIDE_INT args_size;
946 args_size = cur_trace->end_true_args_size;
947 if (cur_trace->eh_head == NULL)
949 cur_trace->eh_head = insn;
950 cur_trace->beg_delay_args_size = args_size;
951 cur_trace->end_delay_args_size = args_size;
953 else if (cur_trace->end_delay_args_size != args_size)
955 cur_trace->end_delay_args_size = args_size;
957 /* ??? If the CFA is the stack pointer, search backward for the last
958 CFI note and insert there. Given that the stack changed for the
959 args_size change, there *must* be such a note in between here and
960 the last eh insn. */
961 add_cfi_args_size (args_size);
965 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
966 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
967 used in places where rtl is prohibited. */
969 static inline unsigned
970 dwf_regno (const_rtx reg)
972 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
973 return DWARF_FRAME_REGNUM (REGNO (reg));
976 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
978 static bool
979 compare_reg_or_pc (rtx x, rtx y)
981 if (REG_P (x) && REG_P (y))
982 return REGNO (x) == REGNO (y);
983 return x == y;
986 /* Record SRC as being saved in DEST. DEST may be null to delete an
987 existing entry. SRC may be a register or PC_RTX. */
989 static void
990 record_reg_saved_in_reg (rtx dest, rtx src)
992 reg_saved_in_data *elt;
993 size_t i;
995 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
996 if (compare_reg_or_pc (elt->orig_reg, src))
998 if (dest == NULL)
999 cur_trace->regs_saved_in_regs.unordered_remove (i);
1000 else
1001 elt->saved_in_reg = dest;
1002 return;
1005 if (dest == NULL)
1006 return;
1008 reg_saved_in_data e = {src, dest};
1009 cur_trace->regs_saved_in_regs.safe_push (e);
1012 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1013 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1015 static void
1016 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1018 queued_reg_save *q;
1019 queued_reg_save e = {reg, sreg, offset};
1020 size_t i;
1022 /* Duplicates waste space, but it's also necessary to remove them
1023 for correctness, since the queue gets output in reverse order. */
1024 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1025 if (compare_reg_or_pc (q->reg, reg))
1027 *q = e;
1028 return;
1031 queued_reg_saves.safe_push (e);
1034 /* Output all the entries in QUEUED_REG_SAVES. */
1036 static void
1037 dwarf2out_flush_queued_reg_saves (void)
1039 queued_reg_save *q;
1040 size_t i;
1042 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1044 unsigned int reg, sreg;
1046 record_reg_saved_in_reg (q->saved_reg, q->reg);
1048 if (q->reg == pc_rtx)
1049 reg = DWARF_FRAME_RETURN_COLUMN;
1050 else
1051 reg = dwf_regno (q->reg);
1052 if (q->saved_reg)
1053 sreg = dwf_regno (q->saved_reg);
1054 else
1055 sreg = INVALID_REGNUM;
1056 reg_save (reg, sreg, q->cfa_offset);
1059 queued_reg_saves.truncate (0);
1062 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1063 location for? Or, does it clobber a register which we've previously
1064 said that some other register is saved in, and for which we now
1065 have a new location for? */
1067 static bool
1068 clobbers_queued_reg_save (const_rtx insn)
1070 queued_reg_save *q;
1071 size_t iq;
1073 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1075 size_t ir;
1076 reg_saved_in_data *rir;
1078 if (modified_in_p (q->reg, insn))
1079 return true;
1081 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1082 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1083 && modified_in_p (rir->saved_in_reg, insn))
1084 return true;
1087 return false;
1090 /* What register, if any, is currently saved in REG? */
1092 static rtx
1093 reg_saved_in (rtx reg)
1095 unsigned int regn = REGNO (reg);
1096 queued_reg_save *q;
1097 reg_saved_in_data *rir;
1098 size_t i;
1100 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1101 if (q->saved_reg && regn == REGNO (q->saved_reg))
1102 return q->reg;
1104 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1105 if (regn == REGNO (rir->saved_in_reg))
1106 return rir->orig_reg;
1108 return NULL_RTX;
1111 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1113 static void
1114 dwarf2out_frame_debug_def_cfa (rtx pat)
1116 memset (cur_cfa, 0, sizeof (*cur_cfa));
1118 if (GET_CODE (pat) == PLUS)
1120 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1121 pat = XEXP (pat, 0);
1123 if (MEM_P (pat))
1125 cur_cfa->indirect = 1;
1126 pat = XEXP (pat, 0);
1127 if (GET_CODE (pat) == PLUS)
1129 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1130 pat = XEXP (pat, 0);
1133 /* ??? If this fails, we could be calling into the _loc functions to
1134 define a full expression. So far no port does that. */
1135 gcc_assert (REG_P (pat));
1136 cur_cfa->reg = dwf_regno (pat);
1139 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1141 static void
1142 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1144 rtx src, dest;
1146 gcc_assert (GET_CODE (pat) == SET);
1147 dest = XEXP (pat, 0);
1148 src = XEXP (pat, 1);
1150 switch (GET_CODE (src))
1152 case PLUS:
1153 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1154 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1155 break;
1157 case REG:
1158 break;
1160 default:
1161 gcc_unreachable ();
1164 cur_cfa->reg = dwf_regno (dest);
1165 gcc_assert (cur_cfa->indirect == 0);
1168 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1170 static void
1171 dwarf2out_frame_debug_cfa_offset (rtx set)
1173 HOST_WIDE_INT offset;
1174 rtx src, addr, span;
1175 unsigned int sregno;
1177 src = XEXP (set, 1);
1178 addr = XEXP (set, 0);
1179 gcc_assert (MEM_P (addr));
1180 addr = XEXP (addr, 0);
1182 /* As documented, only consider extremely simple addresses. */
1183 switch (GET_CODE (addr))
1185 case REG:
1186 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1187 offset = -cur_cfa->offset;
1188 break;
1189 case PLUS:
1190 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1191 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1192 break;
1193 default:
1194 gcc_unreachable ();
1197 if (src == pc_rtx)
1199 span = NULL;
1200 sregno = DWARF_FRAME_RETURN_COLUMN;
1202 else
1204 span = targetm.dwarf_register_span (src);
1205 sregno = dwf_regno (src);
1208 /* ??? We'd like to use queue_reg_save, but we need to come up with
1209 a different flushing heuristic for epilogues. */
1210 if (!span)
1211 reg_save (sregno, INVALID_REGNUM, offset);
1212 else
1214 /* We have a PARALLEL describing where the contents of SRC live.
1215 Adjust the offset for each piece of the PARALLEL. */
1216 HOST_WIDE_INT span_offset = offset;
1218 gcc_assert (GET_CODE (span) == PARALLEL);
1220 const int par_len = XVECLEN (span, 0);
1221 for (int par_index = 0; par_index < par_len; par_index++)
1223 rtx elem = XVECEXP (span, 0, par_index);
1224 sregno = dwf_regno (src);
1225 reg_save (sregno, INVALID_REGNUM, span_offset);
1226 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1231 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1233 static void
1234 dwarf2out_frame_debug_cfa_register (rtx set)
1236 rtx src, dest;
1237 unsigned sregno, dregno;
1239 src = XEXP (set, 1);
1240 dest = XEXP (set, 0);
1242 record_reg_saved_in_reg (dest, src);
1243 if (src == pc_rtx)
1244 sregno = DWARF_FRAME_RETURN_COLUMN;
1245 else
1246 sregno = dwf_regno (src);
1248 dregno = dwf_regno (dest);
1250 /* ??? We'd like to use queue_reg_save, but we need to come up with
1251 a different flushing heuristic for epilogues. */
1252 reg_save (sregno, dregno, 0);
1255 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1257 static void
1258 dwarf2out_frame_debug_cfa_expression (rtx set)
1260 rtx src, dest, span;
1261 dw_cfi_ref cfi = new_cfi ();
1262 unsigned regno;
1264 dest = SET_DEST (set);
1265 src = SET_SRC (set);
1267 gcc_assert (REG_P (src));
1268 gcc_assert (MEM_P (dest));
1270 span = targetm.dwarf_register_span (src);
1271 gcc_assert (!span);
1273 regno = dwf_regno (src);
1275 cfi->dw_cfi_opc = DW_CFA_expression;
1276 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1277 cfi->dw_cfi_oprnd2.dw_cfi_loc
1278 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1279 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1281 /* ??? We'd like to use queue_reg_save, were the interface different,
1282 and, as above, we could manage flushing for epilogues. */
1283 add_cfi (cfi);
1284 update_row_reg_save (cur_row, regno, cfi);
1287 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1289 static void
1290 dwarf2out_frame_debug_cfa_restore (rtx reg)
1292 gcc_assert (REG_P (reg));
1294 rtx span = targetm.dwarf_register_span (reg);
1295 if (!span)
1297 unsigned int regno = dwf_regno (reg);
1298 add_cfi_restore (regno);
1299 update_row_reg_save (cur_row, regno, NULL);
1301 else
1303 /* We have a PARALLEL describing where the contents of REG live.
1304 Restore the register for each piece of the PARALLEL. */
1305 gcc_assert (GET_CODE (span) == PARALLEL);
1307 const int par_len = XVECLEN (span, 0);
1308 for (int par_index = 0; par_index < par_len; par_index++)
1310 reg = XVECEXP (span, 0, par_index);
1311 gcc_assert (REG_P (reg));
1312 unsigned int regno = dwf_regno (reg);
1313 add_cfi_restore (regno);
1314 update_row_reg_save (cur_row, regno, NULL);
1319 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1320 ??? Perhaps we should note in the CIE where windows are saved (instead of
1321 assuming 0(cfa)) and what registers are in the window. */
1323 static void
1324 dwarf2out_frame_debug_cfa_window_save (void)
1326 dw_cfi_ref cfi = new_cfi ();
1328 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1329 add_cfi (cfi);
1332 /* Record call frame debugging information for an expression EXPR,
1333 which either sets SP or FP (adjusting how we calculate the frame
1334 address) or saves a register to the stack or another register.
1335 LABEL indicates the address of EXPR.
1337 This function encodes a state machine mapping rtxes to actions on
1338 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1339 users need not read the source code.
1341 The High-Level Picture
1343 Changes in the register we use to calculate the CFA: Currently we
1344 assume that if you copy the CFA register into another register, we
1345 should take the other one as the new CFA register; this seems to
1346 work pretty well. If it's wrong for some target, it's simple
1347 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1349 Changes in the register we use for saving registers to the stack:
1350 This is usually SP, but not always. Again, we deduce that if you
1351 copy SP into another register (and SP is not the CFA register),
1352 then the new register is the one we will be using for register
1353 saves. This also seems to work.
1355 Register saves: There's not much guesswork about this one; if
1356 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1357 register save, and the register used to calculate the destination
1358 had better be the one we think we're using for this purpose.
1359 It's also assumed that a copy from a call-saved register to another
1360 register is saving that register if RTX_FRAME_RELATED_P is set on
1361 that instruction. If the copy is from a call-saved register to
1362 the *same* register, that means that the register is now the same
1363 value as in the caller.
1365 Except: If the register being saved is the CFA register, and the
1366 offset is nonzero, we are saving the CFA, so we assume we have to
1367 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1368 the intent is to save the value of SP from the previous frame.
1370 In addition, if a register has previously been saved to a different
1371 register,
1373 Invariants / Summaries of Rules
1375 cfa current rule for calculating the CFA. It usually
1376 consists of a register and an offset. This is
1377 actually stored in *cur_cfa, but abbreviated
1378 for the purposes of this documentation.
1379 cfa_store register used by prologue code to save things to the stack
1380 cfa_store.offset is the offset from the value of
1381 cfa_store.reg to the actual CFA
1382 cfa_temp register holding an integral value. cfa_temp.offset
1383 stores the value, which will be used to adjust the
1384 stack pointer. cfa_temp is also used like cfa_store,
1385 to track stores to the stack via fp or a temp reg.
1387 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1388 with cfa.reg as the first operand changes the cfa.reg and its
1389 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1390 cfa_temp.offset.
1392 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1393 expression yielding a constant. This sets cfa_temp.reg
1394 and cfa_temp.offset.
1396 Rule 5: Create a new register cfa_store used to save items to the
1397 stack.
1399 Rules 10-14: Save a register to the stack. Define offset as the
1400 difference of the original location and cfa_store's
1401 location (or cfa_temp's location if cfa_temp is used).
1403 Rules 16-20: If AND operation happens on sp in prologue, we assume
1404 stack is realigned. We will use a group of DW_OP_XXX
1405 expressions to represent the location of the stored
1406 register instead of CFA+offset.
1408 The Rules
1410 "{a,b}" indicates a choice of a xor b.
1411 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1413 Rule 1:
1414 (set <reg1> <reg2>:cfa.reg)
1415 effects: cfa.reg = <reg1>
1416 cfa.offset unchanged
1417 cfa_temp.reg = <reg1>
1418 cfa_temp.offset = cfa.offset
1420 Rule 2:
1421 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1422 {<const_int>,<reg>:cfa_temp.reg}))
1423 effects: cfa.reg = sp if fp used
1424 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1425 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1426 if cfa_store.reg==sp
1428 Rule 3:
1429 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1430 effects: cfa.reg = fp
1431 cfa_offset += +/- <const_int>
1433 Rule 4:
1434 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1435 constraints: <reg1> != fp
1436 <reg1> != sp
1437 effects: cfa.reg = <reg1>
1438 cfa_temp.reg = <reg1>
1439 cfa_temp.offset = cfa.offset
1441 Rule 5:
1442 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1443 constraints: <reg1> != fp
1444 <reg1> != sp
1445 effects: cfa_store.reg = <reg1>
1446 cfa_store.offset = cfa.offset - cfa_temp.offset
1448 Rule 6:
1449 (set <reg> <const_int>)
1450 effects: cfa_temp.reg = <reg>
1451 cfa_temp.offset = <const_int>
1453 Rule 7:
1454 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1455 effects: cfa_temp.reg = <reg1>
1456 cfa_temp.offset |= <const_int>
1458 Rule 8:
1459 (set <reg> (high <exp>))
1460 effects: none
1462 Rule 9:
1463 (set <reg> (lo_sum <exp> <const_int>))
1464 effects: cfa_temp.reg = <reg>
1465 cfa_temp.offset = <const_int>
1467 Rule 10:
1468 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1469 effects: cfa_store.offset -= <const_int>
1470 cfa.offset = cfa_store.offset if cfa.reg == sp
1471 cfa.reg = sp
1472 cfa.base_offset = -cfa_store.offset
1474 Rule 11:
1475 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1476 effects: cfa_store.offset += -/+ mode_size(mem)
1477 cfa.offset = cfa_store.offset if cfa.reg == sp
1478 cfa.reg = sp
1479 cfa.base_offset = -cfa_store.offset
1481 Rule 12:
1482 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1484 <reg2>)
1485 effects: cfa.reg = <reg1>
1486 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1488 Rule 13:
1489 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1490 effects: cfa.reg = <reg1>
1491 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1493 Rule 14:
1494 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1495 effects: cfa.reg = <reg1>
1496 cfa.base_offset = -cfa_temp.offset
1497 cfa_temp.offset -= mode_size(mem)
1499 Rule 15:
1500 (set <reg> {unspec, unspec_volatile})
1501 effects: target-dependent
1503 Rule 16:
1504 (set sp (and: sp <const_int>))
1505 constraints: cfa_store.reg == sp
1506 effects: cfun->fde.stack_realign = 1
1507 cfa_store.offset = 0
1508 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1510 Rule 17:
1511 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1512 effects: cfa_store.offset += -/+ mode_size(mem)
1514 Rule 18:
1515 (set (mem ({pre_inc, pre_dec} sp)) fp)
1516 constraints: fde->stack_realign == 1
1517 effects: cfa_store.offset = 0
1518 cfa.reg != HARD_FRAME_POINTER_REGNUM
1520 Rule 19:
1521 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1522 constraints: fde->stack_realign == 1
1523 && cfa.offset == 0
1524 && cfa.indirect == 0
1525 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1526 effects: Use DW_CFA_def_cfa_expression to define cfa
1527 cfa.reg == fde->drap_reg */
1529 static void
1530 dwarf2out_frame_debug_expr (rtx expr)
1532 rtx src, dest, span;
1533 HOST_WIDE_INT offset;
1534 dw_fde_ref fde;
1536 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1537 the PARALLEL independently. The first element is always processed if
1538 it is a SET. This is for backward compatibility. Other elements
1539 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1540 flag is set in them. */
1541 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1543 int par_index;
1544 int limit = XVECLEN (expr, 0);
1545 rtx elem;
1547 /* PARALLELs have strict read-modify-write semantics, so we
1548 ought to evaluate every rvalue before changing any lvalue.
1549 It's cumbersome to do that in general, but there's an
1550 easy approximation that is enough for all current users:
1551 handle register saves before register assignments. */
1552 if (GET_CODE (expr) == PARALLEL)
1553 for (par_index = 0; par_index < limit; par_index++)
1555 elem = XVECEXP (expr, 0, par_index);
1556 if (GET_CODE (elem) == SET
1557 && MEM_P (SET_DEST (elem))
1558 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1559 dwarf2out_frame_debug_expr (elem);
1562 for (par_index = 0; par_index < limit; par_index++)
1564 elem = XVECEXP (expr, 0, par_index);
1565 if (GET_CODE (elem) == SET
1566 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1567 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1568 dwarf2out_frame_debug_expr (elem);
1570 return;
1573 gcc_assert (GET_CODE (expr) == SET);
1575 src = SET_SRC (expr);
1576 dest = SET_DEST (expr);
1578 if (REG_P (src))
1580 rtx rsi = reg_saved_in (src);
1581 if (rsi)
1582 src = rsi;
1585 fde = cfun->fde;
1587 switch (GET_CODE (dest))
1589 case REG:
1590 switch (GET_CODE (src))
1592 /* Setting FP from SP. */
1593 case REG:
1594 if (cur_cfa->reg == dwf_regno (src))
1596 /* Rule 1 */
1597 /* Update the CFA rule wrt SP or FP. Make sure src is
1598 relative to the current CFA register.
1600 We used to require that dest be either SP or FP, but the
1601 ARM copies SP to a temporary register, and from there to
1602 FP. So we just rely on the backends to only set
1603 RTX_FRAME_RELATED_P on appropriate insns. */
1604 cur_cfa->reg = dwf_regno (dest);
1605 cur_trace->cfa_temp.reg = cur_cfa->reg;
1606 cur_trace->cfa_temp.offset = cur_cfa->offset;
1608 else
1610 /* Saving a register in a register. */
1611 gcc_assert (!fixed_regs [REGNO (dest)]
1612 /* For the SPARC and its register window. */
1613 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1615 /* After stack is aligned, we can only save SP in FP
1616 if drap register is used. In this case, we have
1617 to restore stack pointer with the CFA value and we
1618 don't generate this DWARF information. */
1619 if (fde
1620 && fde->stack_realign
1621 && REGNO (src) == STACK_POINTER_REGNUM)
1622 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1623 && fde->drap_reg != INVALID_REGNUM
1624 && cur_cfa->reg != dwf_regno (src));
1625 else
1626 queue_reg_save (src, dest, 0);
1628 break;
1630 case PLUS:
1631 case MINUS:
1632 case LO_SUM:
1633 if (dest == stack_pointer_rtx)
1635 /* Rule 2 */
1636 /* Adjusting SP. */
1637 switch (GET_CODE (XEXP (src, 1)))
1639 case CONST_INT:
1640 offset = INTVAL (XEXP (src, 1));
1641 break;
1642 case REG:
1643 gcc_assert (dwf_regno (XEXP (src, 1))
1644 == cur_trace->cfa_temp.reg);
1645 offset = cur_trace->cfa_temp.offset;
1646 break;
1647 default:
1648 gcc_unreachable ();
1651 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1653 /* Restoring SP from FP in the epilogue. */
1654 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1655 cur_cfa->reg = dw_stack_pointer_regnum;
1657 else if (GET_CODE (src) == LO_SUM)
1658 /* Assume we've set the source reg of the LO_SUM from sp. */
1660 else
1661 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1663 if (GET_CODE (src) != MINUS)
1664 offset = -offset;
1665 if (cur_cfa->reg == dw_stack_pointer_regnum)
1666 cur_cfa->offset += offset;
1667 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1668 cur_trace->cfa_store.offset += offset;
1670 else if (dest == hard_frame_pointer_rtx)
1672 /* Rule 3 */
1673 /* Either setting the FP from an offset of the SP,
1674 or adjusting the FP */
1675 gcc_assert (frame_pointer_needed);
1677 gcc_assert (REG_P (XEXP (src, 0))
1678 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1679 && CONST_INT_P (XEXP (src, 1)));
1680 offset = INTVAL (XEXP (src, 1));
1681 if (GET_CODE (src) != MINUS)
1682 offset = -offset;
1683 cur_cfa->offset += offset;
1684 cur_cfa->reg = dw_frame_pointer_regnum;
1686 else
1688 gcc_assert (GET_CODE (src) != MINUS);
1690 /* Rule 4 */
1691 if (REG_P (XEXP (src, 0))
1692 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1693 && CONST_INT_P (XEXP (src, 1)))
1695 /* Setting a temporary CFA register that will be copied
1696 into the FP later on. */
1697 offset = - INTVAL (XEXP (src, 1));
1698 cur_cfa->offset += offset;
1699 cur_cfa->reg = dwf_regno (dest);
1700 /* Or used to save regs to the stack. */
1701 cur_trace->cfa_temp.reg = cur_cfa->reg;
1702 cur_trace->cfa_temp.offset = cur_cfa->offset;
1705 /* Rule 5 */
1706 else if (REG_P (XEXP (src, 0))
1707 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1708 && XEXP (src, 1) == stack_pointer_rtx)
1710 /* Setting a scratch register that we will use instead
1711 of SP for saving registers to the stack. */
1712 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1713 cur_trace->cfa_store.reg = dwf_regno (dest);
1714 cur_trace->cfa_store.offset
1715 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1718 /* Rule 9 */
1719 else if (GET_CODE (src) == LO_SUM
1720 && CONST_INT_P (XEXP (src, 1)))
1722 cur_trace->cfa_temp.reg = dwf_regno (dest);
1723 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1725 else
1726 gcc_unreachable ();
1728 break;
1730 /* Rule 6 */
1731 case CONST_INT:
1732 cur_trace->cfa_temp.reg = dwf_regno (dest);
1733 cur_trace->cfa_temp.offset = INTVAL (src);
1734 break;
1736 /* Rule 7 */
1737 case IOR:
1738 gcc_assert (REG_P (XEXP (src, 0))
1739 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1740 && CONST_INT_P (XEXP (src, 1)));
1742 cur_trace->cfa_temp.reg = dwf_regno (dest);
1743 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1744 break;
1746 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1747 which will fill in all of the bits. */
1748 /* Rule 8 */
1749 case HIGH:
1750 break;
1752 /* Rule 15 */
1753 case UNSPEC:
1754 case UNSPEC_VOLATILE:
1755 /* All unspecs should be represented by REG_CFA_* notes. */
1756 gcc_unreachable ();
1757 return;
1759 /* Rule 16 */
1760 case AND:
1761 /* If this AND operation happens on stack pointer in prologue,
1762 we assume the stack is realigned and we extract the
1763 alignment. */
1764 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1766 /* We interpret reg_save differently with stack_realign set.
1767 Thus we must flush whatever we have queued first. */
1768 dwarf2out_flush_queued_reg_saves ();
1770 gcc_assert (cur_trace->cfa_store.reg
1771 == dwf_regno (XEXP (src, 0)));
1772 fde->stack_realign = 1;
1773 fde->stack_realignment = INTVAL (XEXP (src, 1));
1774 cur_trace->cfa_store.offset = 0;
1776 if (cur_cfa->reg != dw_stack_pointer_regnum
1777 && cur_cfa->reg != dw_frame_pointer_regnum)
1778 fde->drap_reg = cur_cfa->reg;
1780 return;
1782 default:
1783 gcc_unreachable ();
1785 break;
1787 case MEM:
1789 /* Saving a register to the stack. Make sure dest is relative to the
1790 CFA register. */
1791 switch (GET_CODE (XEXP (dest, 0)))
1793 /* Rule 10 */
1794 /* With a push. */
1795 case PRE_MODIFY:
1796 case POST_MODIFY:
1797 /* We can't handle variable size modifications. */
1798 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1799 == CONST_INT);
1800 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1802 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1803 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1805 cur_trace->cfa_store.offset += offset;
1806 if (cur_cfa->reg == dw_stack_pointer_regnum)
1807 cur_cfa->offset = cur_trace->cfa_store.offset;
1809 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1810 offset -= cur_trace->cfa_store.offset;
1811 else
1812 offset = -cur_trace->cfa_store.offset;
1813 break;
1815 /* Rule 11 */
1816 case PRE_INC:
1817 case PRE_DEC:
1818 case POST_DEC:
1819 offset = GET_MODE_SIZE (GET_MODE (dest));
1820 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1821 offset = -offset;
1823 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1824 == STACK_POINTER_REGNUM)
1825 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1827 cur_trace->cfa_store.offset += offset;
1829 /* Rule 18: If stack is aligned, we will use FP as a
1830 reference to represent the address of the stored
1831 regiser. */
1832 if (fde
1833 && fde->stack_realign
1834 && REG_P (src)
1835 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1837 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1838 cur_trace->cfa_store.offset = 0;
1841 if (cur_cfa->reg == dw_stack_pointer_regnum)
1842 cur_cfa->offset = cur_trace->cfa_store.offset;
1844 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1845 offset += -cur_trace->cfa_store.offset;
1846 else
1847 offset = -cur_trace->cfa_store.offset;
1848 break;
1850 /* Rule 12 */
1851 /* With an offset. */
1852 case PLUS:
1853 case MINUS:
1854 case LO_SUM:
1856 unsigned int regno;
1858 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1859 && REG_P (XEXP (XEXP (dest, 0), 0)));
1860 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1861 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1862 offset = -offset;
1864 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1866 if (cur_cfa->reg == regno)
1867 offset -= cur_cfa->offset;
1868 else if (cur_trace->cfa_store.reg == regno)
1869 offset -= cur_trace->cfa_store.offset;
1870 else
1872 gcc_assert (cur_trace->cfa_temp.reg == regno);
1873 offset -= cur_trace->cfa_temp.offset;
1876 break;
1878 /* Rule 13 */
1879 /* Without an offset. */
1880 case REG:
1882 unsigned int regno = dwf_regno (XEXP (dest, 0));
1884 if (cur_cfa->reg == regno)
1885 offset = -cur_cfa->offset;
1886 else if (cur_trace->cfa_store.reg == regno)
1887 offset = -cur_trace->cfa_store.offset;
1888 else
1890 gcc_assert (cur_trace->cfa_temp.reg == regno);
1891 offset = -cur_trace->cfa_temp.offset;
1894 break;
1896 /* Rule 14 */
1897 case POST_INC:
1898 gcc_assert (cur_trace->cfa_temp.reg
1899 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1900 offset = -cur_trace->cfa_temp.offset;
1901 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1902 break;
1904 default:
1905 gcc_unreachable ();
1908 /* Rule 17 */
1909 /* If the source operand of this MEM operation is a memory,
1910 we only care how much stack grew. */
1911 if (MEM_P (src))
1912 break;
1914 if (REG_P (src)
1915 && REGNO (src) != STACK_POINTER_REGNUM
1916 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1917 && dwf_regno (src) == cur_cfa->reg)
1919 /* We're storing the current CFA reg into the stack. */
1921 if (cur_cfa->offset == 0)
1923 /* Rule 19 */
1924 /* If stack is aligned, putting CFA reg into stack means
1925 we can no longer use reg + offset to represent CFA.
1926 Here we use DW_CFA_def_cfa_expression instead. The
1927 result of this expression equals to the original CFA
1928 value. */
1929 if (fde
1930 && fde->stack_realign
1931 && cur_cfa->indirect == 0
1932 && cur_cfa->reg != dw_frame_pointer_regnum)
1934 gcc_assert (fde->drap_reg == cur_cfa->reg);
1936 cur_cfa->indirect = 1;
1937 cur_cfa->reg = dw_frame_pointer_regnum;
1938 cur_cfa->base_offset = offset;
1939 cur_cfa->offset = 0;
1941 fde->drap_reg_saved = 1;
1942 break;
1945 /* If the source register is exactly the CFA, assume
1946 we're saving SP like any other register; this happens
1947 on the ARM. */
1948 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1949 break;
1951 else
1953 /* Otherwise, we'll need to look in the stack to
1954 calculate the CFA. */
1955 rtx x = XEXP (dest, 0);
1957 if (!REG_P (x))
1958 x = XEXP (x, 0);
1959 gcc_assert (REG_P (x));
1961 cur_cfa->reg = dwf_regno (x);
1962 cur_cfa->base_offset = offset;
1963 cur_cfa->indirect = 1;
1964 break;
1968 if (REG_P (src))
1969 span = targetm.dwarf_register_span (src);
1970 else
1971 span = NULL;
1973 if (!span)
1974 queue_reg_save (src, NULL_RTX, offset);
1975 else
1977 /* We have a PARALLEL describing where the contents of SRC live.
1978 Queue register saves for each piece of the PARALLEL. */
1979 HOST_WIDE_INT span_offset = offset;
1981 gcc_assert (GET_CODE (span) == PARALLEL);
1983 const int par_len = XVECLEN (span, 0);
1984 for (int par_index = 0; par_index < par_len; par_index++)
1986 rtx elem = XVECEXP (span, 0, par_index);
1987 queue_reg_save (elem, NULL_RTX, span_offset);
1988 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1991 break;
1993 default:
1994 gcc_unreachable ();
1998 /* Record call frame debugging information for INSN, which either sets
1999 SP or FP (adjusting how we calculate the frame address) or saves a
2000 register to the stack. */
2002 static void
2003 dwarf2out_frame_debug (rtx_insn *insn)
2005 rtx note, n, pat;
2006 bool handled_one = false;
2008 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2009 switch (REG_NOTE_KIND (note))
2011 case REG_FRAME_RELATED_EXPR:
2012 pat = XEXP (note, 0);
2013 goto do_frame_expr;
2015 case REG_CFA_DEF_CFA:
2016 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2017 handled_one = true;
2018 break;
2020 case REG_CFA_ADJUST_CFA:
2021 n = XEXP (note, 0);
2022 if (n == NULL)
2024 n = PATTERN (insn);
2025 if (GET_CODE (n) == PARALLEL)
2026 n = XVECEXP (n, 0, 0);
2028 dwarf2out_frame_debug_adjust_cfa (n);
2029 handled_one = true;
2030 break;
2032 case REG_CFA_OFFSET:
2033 n = XEXP (note, 0);
2034 if (n == NULL)
2035 n = single_set (insn);
2036 dwarf2out_frame_debug_cfa_offset (n);
2037 handled_one = true;
2038 break;
2040 case REG_CFA_REGISTER:
2041 n = XEXP (note, 0);
2042 if (n == NULL)
2044 n = PATTERN (insn);
2045 if (GET_CODE (n) == PARALLEL)
2046 n = XVECEXP (n, 0, 0);
2048 dwarf2out_frame_debug_cfa_register (n);
2049 handled_one = true;
2050 break;
2052 case REG_CFA_EXPRESSION:
2053 n = XEXP (note, 0);
2054 if (n == NULL)
2055 n = single_set (insn);
2056 dwarf2out_frame_debug_cfa_expression (n);
2057 handled_one = true;
2058 break;
2060 case REG_CFA_RESTORE:
2061 n = XEXP (note, 0);
2062 if (n == NULL)
2064 n = PATTERN (insn);
2065 if (GET_CODE (n) == PARALLEL)
2066 n = XVECEXP (n, 0, 0);
2067 n = XEXP (n, 0);
2069 dwarf2out_frame_debug_cfa_restore (n);
2070 handled_one = true;
2071 break;
2073 case REG_CFA_SET_VDRAP:
2074 n = XEXP (note, 0);
2075 if (REG_P (n))
2077 dw_fde_ref fde = cfun->fde;
2078 if (fde)
2080 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2081 if (REG_P (n))
2082 fde->vdrap_reg = dwf_regno (n);
2085 handled_one = true;
2086 break;
2088 case REG_CFA_WINDOW_SAVE:
2089 dwarf2out_frame_debug_cfa_window_save ();
2090 handled_one = true;
2091 break;
2093 case REG_CFA_FLUSH_QUEUE:
2094 /* The actual flush happens elsewhere. */
2095 handled_one = true;
2096 break;
2098 default:
2099 break;
2102 if (!handled_one)
2104 pat = PATTERN (insn);
2105 do_frame_expr:
2106 dwarf2out_frame_debug_expr (pat);
2108 /* Check again. A parallel can save and update the same register.
2109 We could probably check just once, here, but this is safer than
2110 removing the check at the start of the function. */
2111 if (clobbers_queued_reg_save (pat))
2112 dwarf2out_flush_queued_reg_saves ();
2116 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2118 static void
2119 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2121 size_t i, n_old, n_new, n_max;
2122 dw_cfi_ref cfi;
2124 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2125 add_cfi (new_row->cfa_cfi);
2126 else
2128 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2129 if (cfi)
2130 add_cfi (cfi);
2133 n_old = vec_safe_length (old_row->reg_save);
2134 n_new = vec_safe_length (new_row->reg_save);
2135 n_max = MAX (n_old, n_new);
2137 for (i = 0; i < n_max; ++i)
2139 dw_cfi_ref r_old = NULL, r_new = NULL;
2141 if (i < n_old)
2142 r_old = (*old_row->reg_save)[i];
2143 if (i < n_new)
2144 r_new = (*new_row->reg_save)[i];
2146 if (r_old == r_new)
2148 else if (r_new == NULL)
2149 add_cfi_restore (i);
2150 else if (!cfi_equal_p (r_old, r_new))
2151 add_cfi (r_new);
2155 /* Examine CFI and return true if a cfi label and set_loc is needed
2156 beforehand. Even when generating CFI assembler instructions, we
2157 still have to add the cfi to the list so that lookup_cfa_1 works
2158 later on. When -g2 and above we even need to force emitting of
2159 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2160 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2161 and so don't use convert_cfa_to_fb_loc_list. */
2163 static bool
2164 cfi_label_required_p (dw_cfi_ref cfi)
2166 if (!dwarf2out_do_cfi_asm ())
2167 return true;
2169 if (dwarf_version == 2
2170 && debug_info_level > DINFO_LEVEL_TERSE
2171 && (write_symbols == DWARF2_DEBUG
2172 || write_symbols == VMS_AND_DWARF2_DEBUG))
2174 switch (cfi->dw_cfi_opc)
2176 case DW_CFA_def_cfa_offset:
2177 case DW_CFA_def_cfa_offset_sf:
2178 case DW_CFA_def_cfa_register:
2179 case DW_CFA_def_cfa:
2180 case DW_CFA_def_cfa_sf:
2181 case DW_CFA_def_cfa_expression:
2182 case DW_CFA_restore_state:
2183 return true;
2184 default:
2185 return false;
2188 return false;
2191 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2192 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2193 necessary. */
2194 static void
2195 add_cfis_to_fde (void)
2197 dw_fde_ref fde = cfun->fde;
2198 rtx_insn *insn, *next;
2199 /* We always start with a function_begin label. */
2200 bool first = false;
2202 for (insn = get_insns (); insn; insn = next)
2204 next = NEXT_INSN (insn);
2206 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2208 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2209 /* Don't attempt to advance_loc4 between labels
2210 in different sections. */
2211 first = true;
2214 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2216 bool required = cfi_label_required_p (NOTE_CFI (insn));
2217 while (next)
2218 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2220 required |= cfi_label_required_p (NOTE_CFI (next));
2221 next = NEXT_INSN (next);
2223 else if (active_insn_p (next)
2224 || (NOTE_P (next) && (NOTE_KIND (next)
2225 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2226 break;
2227 else
2228 next = NEXT_INSN (next);
2229 if (required)
2231 int num = dwarf2out_cfi_label_num;
2232 const char *label = dwarf2out_cfi_label ();
2233 dw_cfi_ref xcfi;
2234 rtx tmp;
2236 /* Set the location counter to the new label. */
2237 xcfi = new_cfi ();
2238 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2239 : DW_CFA_advance_loc4);
2240 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2241 vec_safe_push (fde->dw_fde_cfi, xcfi);
2243 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2244 NOTE_LABEL_NUMBER (tmp) = num;
2249 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2250 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2251 insn = NEXT_INSN (insn);
2253 while (insn != next);
2254 first = false;
2259 /* If LABEL is the start of a trace, then initialize the state of that
2260 trace from CUR_TRACE and CUR_ROW. */
2262 static void
2263 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2265 dw_trace_info *ti;
2266 HOST_WIDE_INT args_size;
2268 ti = get_trace_info (start);
2269 gcc_assert (ti != NULL);
2271 if (dump_file)
2273 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2274 cur_trace->id, ti->id,
2275 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2276 (origin ? INSN_UID (origin) : 0));
2279 args_size = cur_trace->end_true_args_size;
2280 if (ti->beg_row == NULL)
2282 /* This is the first time we've encountered this trace. Propagate
2283 state across the edge and push the trace onto the work list. */
2284 ti->beg_row = copy_cfi_row (cur_row);
2285 ti->beg_true_args_size = args_size;
2287 ti->cfa_store = cur_trace->cfa_store;
2288 ti->cfa_temp = cur_trace->cfa_temp;
2289 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2291 trace_work_list.safe_push (ti);
2293 if (dump_file)
2294 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2296 else
2299 /* We ought to have the same state incoming to a given trace no
2300 matter how we arrive at the trace. Anything else means we've
2301 got some kind of optimization error. */
2302 gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2304 /* The args_size is allowed to conflict if it isn't actually used. */
2305 if (ti->beg_true_args_size != args_size)
2306 ti->args_size_undefined = true;
2310 /* Similarly, but handle the args_size and CFA reset across EH
2311 and non-local goto edges. */
2313 static void
2314 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2316 HOST_WIDE_INT save_args_size, delta;
2317 dw_cfa_location save_cfa;
2319 save_args_size = cur_trace->end_true_args_size;
2320 if (save_args_size == 0)
2322 maybe_record_trace_start (start, origin);
2323 return;
2326 delta = -save_args_size;
2327 cur_trace->end_true_args_size = 0;
2329 save_cfa = cur_row->cfa;
2330 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2332 /* Convert a change in args_size (always a positive in the
2333 direction of stack growth) to a change in stack pointer. */
2334 #ifndef STACK_GROWS_DOWNWARD
2335 delta = -delta;
2336 #endif
2337 cur_row->cfa.offset += delta;
2340 maybe_record_trace_start (start, origin);
2342 cur_trace->end_true_args_size = save_args_size;
2343 cur_row->cfa = save_cfa;
2346 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2347 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2349 static void
2350 create_trace_edges (rtx_insn *insn)
2352 rtx tmp;
2353 int i, n;
2355 if (JUMP_P (insn))
2357 rtx_jump_table_data *table;
2359 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2360 return;
2362 if (tablejump_p (insn, NULL, &table))
2364 rtvec vec = table->get_labels ();
2366 n = GET_NUM_ELEM (vec);
2367 for (i = 0; i < n; ++i)
2369 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2370 maybe_record_trace_start (lab, insn);
2373 else if (computed_jump_p (insn))
2375 for (rtx_insn_list *lab = forced_labels; lab; lab = lab->next ())
2376 maybe_record_trace_start (lab->insn (), insn);
2378 else if (returnjump_p (insn))
2380 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2382 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2383 for (i = 0; i < n; ++i)
2385 rtx_insn *lab =
2386 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2387 maybe_record_trace_start (lab, insn);
2390 else
2392 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2393 gcc_assert (lab != NULL);
2394 maybe_record_trace_start (lab, insn);
2397 else if (CALL_P (insn))
2399 /* Sibling calls don't have edges inside this function. */
2400 if (SIBLING_CALL_P (insn))
2401 return;
2403 /* Process non-local goto edges. */
2404 if (can_nonlocal_goto (insn))
2405 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2406 lab;
2407 lab = lab->next ())
2408 maybe_record_trace_start_abnormal (lab->insn (), insn);
2410 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2412 int i, n = seq->len ();
2413 for (i = 0; i < n; ++i)
2414 create_trace_edges (seq->insn (i));
2415 return;
2418 /* Process EH edges. */
2419 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2421 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2422 if (lp)
2423 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2427 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2429 static void
2430 scan_insn_after (rtx_insn *insn)
2432 if (RTX_FRAME_RELATED_P (insn))
2433 dwarf2out_frame_debug (insn);
2434 notice_args_size (insn);
2437 /* Scan the trace beginning at INSN and create the CFI notes for the
2438 instructions therein. */
2440 static void
2441 scan_trace (dw_trace_info *trace)
2443 rtx_insn *prev, *insn = trace->head;
2444 dw_cfa_location this_cfa;
2446 if (dump_file)
2447 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2448 trace->id, rtx_name[(int) GET_CODE (insn)],
2449 INSN_UID (insn));
2451 trace->end_row = copy_cfi_row (trace->beg_row);
2452 trace->end_true_args_size = trace->beg_true_args_size;
2454 cur_trace = trace;
2455 cur_row = trace->end_row;
2457 this_cfa = cur_row->cfa;
2458 cur_cfa = &this_cfa;
2460 for (prev = insn, insn = NEXT_INSN (insn);
2461 insn;
2462 prev = insn, insn = NEXT_INSN (insn))
2464 rtx_insn *control;
2466 /* Do everything that happens "before" the insn. */
2467 add_cfi_insn = prev;
2469 /* Notice the end of a trace. */
2470 if (BARRIER_P (insn))
2472 /* Don't bother saving the unneeded queued registers at all. */
2473 queued_reg_saves.truncate (0);
2474 break;
2476 if (save_point_p (insn))
2478 /* Propagate across fallthru edges. */
2479 dwarf2out_flush_queued_reg_saves ();
2480 maybe_record_trace_start (insn, NULL);
2481 break;
2484 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2485 continue;
2487 /* Handle all changes to the row state. Sequences require special
2488 handling for the positioning of the notes. */
2489 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2491 rtx_insn *elt;
2492 int i, n = pat->len ();
2494 control = pat->insn (0);
2495 if (can_throw_internal (control))
2496 notice_eh_throw (control);
2497 dwarf2out_flush_queued_reg_saves ();
2499 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2501 /* ??? Hopefully multiple delay slots are not annulled. */
2502 gcc_assert (n == 2);
2503 gcc_assert (!RTX_FRAME_RELATED_P (control));
2504 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2506 elt = pat->insn (1);
2508 if (INSN_FROM_TARGET_P (elt))
2510 HOST_WIDE_INT restore_args_size;
2511 cfi_vec save_row_reg_save;
2513 /* If ELT is an instruction from target of an annulled
2514 branch, the effects are for the target only and so
2515 the args_size and CFA along the current path
2516 shouldn't change. */
2517 add_cfi_insn = NULL;
2518 restore_args_size = cur_trace->end_true_args_size;
2519 cur_cfa = &cur_row->cfa;
2520 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2522 scan_insn_after (elt);
2524 /* ??? Should we instead save the entire row state? */
2525 gcc_assert (!queued_reg_saves.length ());
2527 create_trace_edges (control);
2529 cur_trace->end_true_args_size = restore_args_size;
2530 cur_row->cfa = this_cfa;
2531 cur_row->reg_save = save_row_reg_save;
2532 cur_cfa = &this_cfa;
2534 else
2536 /* If ELT is a annulled branch-taken instruction (i.e.
2537 executed only when branch is not taken), the args_size
2538 and CFA should not change through the jump. */
2539 create_trace_edges (control);
2541 /* Update and continue with the trace. */
2542 add_cfi_insn = insn;
2543 scan_insn_after (elt);
2544 def_cfa_1 (&this_cfa);
2546 continue;
2549 /* The insns in the delay slot should all be considered to happen
2550 "before" a call insn. Consider a call with a stack pointer
2551 adjustment in the delay slot. The backtrace from the callee
2552 should include the sp adjustment. Unfortunately, that leaves
2553 us with an unavoidable unwinding error exactly at the call insn
2554 itself. For jump insns we'd prefer to avoid this error by
2555 placing the notes after the sequence. */
2556 if (JUMP_P (control))
2557 add_cfi_insn = insn;
2559 for (i = 1; i < n; ++i)
2561 elt = pat->insn (i);
2562 scan_insn_after (elt);
2565 /* Make sure any register saves are visible at the jump target. */
2566 dwarf2out_flush_queued_reg_saves ();
2567 any_cfis_emitted = false;
2569 /* However, if there is some adjustment on the call itself, e.g.
2570 a call_pop, that action should be considered to happen after
2571 the call returns. */
2572 add_cfi_insn = insn;
2573 scan_insn_after (control);
2575 else
2577 /* Flush data before calls and jumps, and of course if necessary. */
2578 if (can_throw_internal (insn))
2580 notice_eh_throw (insn);
2581 dwarf2out_flush_queued_reg_saves ();
2583 else if (!NONJUMP_INSN_P (insn)
2584 || clobbers_queued_reg_save (insn)
2585 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2586 dwarf2out_flush_queued_reg_saves ();
2587 any_cfis_emitted = false;
2589 add_cfi_insn = insn;
2590 scan_insn_after (insn);
2591 control = insn;
2594 /* Between frame-related-p and args_size we might have otherwise
2595 emitted two cfa adjustments. Do it now. */
2596 def_cfa_1 (&this_cfa);
2598 /* Minimize the number of advances by emitting the entire queue
2599 once anything is emitted. */
2600 if (any_cfis_emitted
2601 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2602 dwarf2out_flush_queued_reg_saves ();
2604 /* Note that a test for control_flow_insn_p does exactly the
2605 same tests as are done to actually create the edges. So
2606 always call the routine and let it not create edges for
2607 non-control-flow insns. */
2608 create_trace_edges (control);
2611 add_cfi_insn = NULL;
2612 cur_row = NULL;
2613 cur_trace = NULL;
2614 cur_cfa = NULL;
2617 /* Scan the function and create the initial set of CFI notes. */
2619 static void
2620 create_cfi_notes (void)
2622 dw_trace_info *ti;
2624 gcc_checking_assert (!queued_reg_saves.exists ());
2625 gcc_checking_assert (!trace_work_list.exists ());
2627 /* Always begin at the entry trace. */
2628 ti = &trace_info[0];
2629 scan_trace (ti);
2631 while (!trace_work_list.is_empty ())
2633 ti = trace_work_list.pop ();
2634 scan_trace (ti);
2637 queued_reg_saves.release ();
2638 trace_work_list.release ();
2641 /* Return the insn before the first NOTE_INSN_CFI after START. */
2643 static rtx_insn *
2644 before_next_cfi_note (rtx_insn *start)
2646 rtx_insn *prev = start;
2647 while (start)
2649 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2650 return prev;
2651 prev = start;
2652 start = NEXT_INSN (start);
2654 gcc_unreachable ();
2657 /* Insert CFI notes between traces to properly change state between them. */
2659 static void
2660 connect_traces (void)
2662 unsigned i, n = trace_info.length ();
2663 dw_trace_info *prev_ti, *ti;
2665 /* ??? Ideally, we should have both queued and processed every trace.
2666 However the current representation of constant pools on various targets
2667 is indistinguishable from unreachable code. Assume for the moment that
2668 we can simply skip over such traces. */
2669 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2670 these are not "real" instructions, and should not be considered.
2671 This could be generically useful for tablejump data as well. */
2672 /* Remove all unprocessed traces from the list. */
2673 for (i = n - 1; i > 0; --i)
2675 ti = &trace_info[i];
2676 if (ti->beg_row == NULL)
2678 trace_info.ordered_remove (i);
2679 n -= 1;
2681 else
2682 gcc_assert (ti->end_row != NULL);
2685 /* Work from the end back to the beginning. This lets us easily insert
2686 remember/restore_state notes in the correct order wrt other notes. */
2687 prev_ti = &trace_info[n - 1];
2688 for (i = n - 1; i > 0; --i)
2690 dw_cfi_row *old_row;
2692 ti = prev_ti;
2693 prev_ti = &trace_info[i - 1];
2695 add_cfi_insn = ti->head;
2697 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2698 for the portion of the function in the alternate text
2699 section. The row state at the very beginning of that
2700 new FDE will be exactly the row state from the CIE. */
2701 if (ti->switch_sections)
2702 old_row = cie_cfi_row;
2703 else
2705 old_row = prev_ti->end_row;
2706 /* If there's no change from the previous end state, fine. */
2707 if (cfi_row_equal_p (old_row, ti->beg_row))
2709 /* Otherwise check for the common case of sharing state with
2710 the beginning of an epilogue, but not the end. Insert
2711 remember/restore opcodes in that case. */
2712 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2714 dw_cfi_ref cfi;
2716 /* Note that if we blindly insert the remember at the
2717 start of the trace, we can wind up increasing the
2718 size of the unwind info due to extra advance opcodes.
2719 Instead, put the remember immediately before the next
2720 state change. We know there must be one, because the
2721 state at the beginning and head of the trace differ. */
2722 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2723 cfi = new_cfi ();
2724 cfi->dw_cfi_opc = DW_CFA_remember_state;
2725 add_cfi (cfi);
2727 add_cfi_insn = ti->head;
2728 cfi = new_cfi ();
2729 cfi->dw_cfi_opc = DW_CFA_restore_state;
2730 add_cfi (cfi);
2732 old_row = prev_ti->beg_row;
2734 /* Otherwise, we'll simply change state from the previous end. */
2737 change_cfi_row (old_row, ti->beg_row);
2739 if (dump_file && add_cfi_insn != ti->head)
2741 rtx_insn *note;
2743 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2744 prev_ti->id, ti->id);
2746 note = ti->head;
2749 note = NEXT_INSN (note);
2750 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2751 output_cfi_directive (dump_file, NOTE_CFI (note));
2753 while (note != add_cfi_insn);
2757 /* Connect args_size between traces that have can_throw_internal insns. */
2758 if (cfun->eh->lp_array)
2760 HOST_WIDE_INT prev_args_size = 0;
2762 for (i = 0; i < n; ++i)
2764 ti = &trace_info[i];
2766 if (ti->switch_sections)
2767 prev_args_size = 0;
2768 if (ti->eh_head == NULL)
2769 continue;
2770 gcc_assert (!ti->args_size_undefined);
2772 if (ti->beg_delay_args_size != prev_args_size)
2774 /* ??? Search back to previous CFI note. */
2775 add_cfi_insn = PREV_INSN (ti->eh_head);
2776 add_cfi_args_size (ti->beg_delay_args_size);
2779 prev_args_size = ti->end_delay_args_size;
2784 /* Set up the pseudo-cfg of instruction traces, as described at the
2785 block comment at the top of the file. */
2787 static void
2788 create_pseudo_cfg (void)
2790 bool saw_barrier, switch_sections;
2791 dw_trace_info ti;
2792 rtx_insn *insn;
2793 unsigned i;
2795 /* The first trace begins at the start of the function,
2796 and begins with the CIE row state. */
2797 trace_info.create (16);
2798 memset (&ti, 0, sizeof (ti));
2799 ti.head = get_insns ();
2800 ti.beg_row = cie_cfi_row;
2801 ti.cfa_store = cie_cfi_row->cfa;
2802 ti.cfa_temp.reg = INVALID_REGNUM;
2803 trace_info.quick_push (ti);
2805 if (cie_return_save)
2806 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2808 /* Walk all the insns, collecting start of trace locations. */
2809 saw_barrier = false;
2810 switch_sections = false;
2811 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2813 if (BARRIER_P (insn))
2814 saw_barrier = true;
2815 else if (NOTE_P (insn)
2816 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2818 /* We should have just seen a barrier. */
2819 gcc_assert (saw_barrier);
2820 switch_sections = true;
2822 /* Watch out for save_point notes between basic blocks.
2823 In particular, a note after a barrier. Do not record these,
2824 delaying trace creation until the label. */
2825 else if (save_point_p (insn)
2826 && (LABEL_P (insn) || !saw_barrier))
2828 memset (&ti, 0, sizeof (ti));
2829 ti.head = insn;
2830 ti.switch_sections = switch_sections;
2831 ti.id = trace_info.length ();
2832 trace_info.safe_push (ti);
2834 saw_barrier = false;
2835 switch_sections = false;
2839 /* Create the trace index after we've finished building trace_info,
2840 avoiding stale pointer problems due to reallocation. */
2841 trace_index
2842 = new hash_table<trace_info_hasher> (trace_info.length ());
2843 dw_trace_info *tp;
2844 FOR_EACH_VEC_ELT (trace_info, i, tp)
2846 dw_trace_info **slot;
2848 if (dump_file)
2849 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2850 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2851 tp->switch_sections ? " (section switch)" : "");
2853 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2854 gcc_assert (*slot == NULL);
2855 *slot = tp;
2859 /* Record the initial position of the return address. RTL is
2860 INCOMING_RETURN_ADDR_RTX. */
2862 static void
2863 initial_return_save (rtx rtl)
2865 unsigned int reg = INVALID_REGNUM;
2866 HOST_WIDE_INT offset = 0;
2868 switch (GET_CODE (rtl))
2870 case REG:
2871 /* RA is in a register. */
2872 reg = dwf_regno (rtl);
2873 break;
2875 case MEM:
2876 /* RA is on the stack. */
2877 rtl = XEXP (rtl, 0);
2878 switch (GET_CODE (rtl))
2880 case REG:
2881 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2882 offset = 0;
2883 break;
2885 case PLUS:
2886 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2887 offset = INTVAL (XEXP (rtl, 1));
2888 break;
2890 case MINUS:
2891 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2892 offset = -INTVAL (XEXP (rtl, 1));
2893 break;
2895 default:
2896 gcc_unreachable ();
2899 break;
2901 case PLUS:
2902 /* The return address is at some offset from any value we can
2903 actually load. For instance, on the SPARC it is in %i7+8. Just
2904 ignore the offset for now; it doesn't matter for unwinding frames. */
2905 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2906 initial_return_save (XEXP (rtl, 0));
2907 return;
2909 default:
2910 gcc_unreachable ();
2913 if (reg != DWARF_FRAME_RETURN_COLUMN)
2915 if (reg != INVALID_REGNUM)
2916 record_reg_saved_in_reg (rtl, pc_rtx);
2917 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2921 static void
2922 create_cie_data (void)
2924 dw_cfa_location loc;
2925 dw_trace_info cie_trace;
2927 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2928 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2930 memset (&cie_trace, 0, sizeof (cie_trace));
2931 cur_trace = &cie_trace;
2933 add_cfi_vec = &cie_cfi_vec;
2934 cie_cfi_row = cur_row = new_cfi_row ();
2936 /* On entry, the Canonical Frame Address is at SP. */
2937 memset (&loc, 0, sizeof (loc));
2938 loc.reg = dw_stack_pointer_regnum;
2939 loc.offset = INCOMING_FRAME_SP_OFFSET;
2940 def_cfa_1 (&loc);
2942 if (targetm.debug_unwind_info () == UI_DWARF2
2943 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2945 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2947 /* For a few targets, we have the return address incoming into a
2948 register, but choose a different return column. This will result
2949 in a DW_CFA_register for the return, and an entry in
2950 regs_saved_in_regs to match. If the target later stores that
2951 return address register to the stack, we want to be able to emit
2952 the DW_CFA_offset against the return column, not the intermediate
2953 save register. Save the contents of regs_saved_in_regs so that
2954 we can re-initialize it at the start of each function. */
2955 switch (cie_trace.regs_saved_in_regs.length ())
2957 case 0:
2958 break;
2959 case 1:
2960 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2961 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2962 cie_trace.regs_saved_in_regs.release ();
2963 break;
2964 default:
2965 gcc_unreachable ();
2969 add_cfi_vec = NULL;
2970 cur_row = NULL;
2971 cur_trace = NULL;
2974 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2975 state at each location within the function. These notes will be
2976 emitted during pass_final. */
2978 static unsigned int
2979 execute_dwarf2_frame (void)
2981 /* The first time we're called, compute the incoming frame state. */
2982 if (cie_cfi_vec == NULL)
2983 create_cie_data ();
2985 dwarf2out_alloc_current_fde ();
2987 create_pseudo_cfg ();
2989 /* Do the work. */
2990 create_cfi_notes ();
2991 connect_traces ();
2992 add_cfis_to_fde ();
2994 /* Free all the data we allocated. */
2996 size_t i;
2997 dw_trace_info *ti;
2999 FOR_EACH_VEC_ELT (trace_info, i, ti)
3000 ti->regs_saved_in_regs.release ();
3002 trace_info.release ();
3004 delete trace_index;
3005 trace_index = NULL;
3007 return 0;
3010 /* Convert a DWARF call frame info. operation to its string name */
3012 static const char *
3013 dwarf_cfi_name (unsigned int cfi_opc)
3015 const char *name = get_DW_CFA_name (cfi_opc);
3017 if (name != NULL)
3018 return name;
3020 return "DW_CFA_<unknown>";
3023 /* This routine will generate the correct assembly data for a location
3024 description based on a cfi entry with a complex address. */
3026 static void
3027 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3029 dw_loc_descr_ref loc;
3030 unsigned long size;
3032 if (cfi->dw_cfi_opc == DW_CFA_expression)
3034 unsigned r =
3035 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3036 dw2_asm_output_data (1, r, NULL);
3037 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3039 else
3040 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3042 /* Output the size of the block. */
3043 size = size_of_locs (loc);
3044 dw2_asm_output_data_uleb128 (size, NULL);
3046 /* Now output the operations themselves. */
3047 output_loc_sequence (loc, for_eh);
3050 /* Similar, but used for .cfi_escape. */
3052 static void
3053 output_cfa_loc_raw (dw_cfi_ref cfi)
3055 dw_loc_descr_ref loc;
3056 unsigned long size;
3058 if (cfi->dw_cfi_opc == DW_CFA_expression)
3060 unsigned r =
3061 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3062 fprintf (asm_out_file, "%#x,", r);
3063 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3065 else
3066 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3068 /* Output the size of the block. */
3069 size = size_of_locs (loc);
3070 dw2_asm_output_data_uleb128_raw (size);
3071 fputc (',', asm_out_file);
3073 /* Now output the operations themselves. */
3074 output_loc_sequence_raw (loc);
3077 /* Output a Call Frame Information opcode and its operand(s). */
3079 void
3080 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3082 unsigned long r;
3083 HOST_WIDE_INT off;
3085 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3086 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3087 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3088 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3089 ((unsigned HOST_WIDE_INT)
3090 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3091 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3093 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3094 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3095 "DW_CFA_offset, column %#lx", r);
3096 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3097 dw2_asm_output_data_uleb128 (off, NULL);
3099 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3101 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3102 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3103 "DW_CFA_restore, column %#lx", r);
3105 else
3107 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3108 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3110 switch (cfi->dw_cfi_opc)
3112 case DW_CFA_set_loc:
3113 if (for_eh)
3114 dw2_asm_output_encoded_addr_rtx (
3115 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3116 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3117 false, NULL);
3118 else
3119 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3120 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3121 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3122 break;
3124 case DW_CFA_advance_loc1:
3125 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3126 fde->dw_fde_current_label, NULL);
3127 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3128 break;
3130 case DW_CFA_advance_loc2:
3131 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3132 fde->dw_fde_current_label, NULL);
3133 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3134 break;
3136 case DW_CFA_advance_loc4:
3137 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3138 fde->dw_fde_current_label, NULL);
3139 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3140 break;
3142 case DW_CFA_MIPS_advance_loc8:
3143 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3144 fde->dw_fde_current_label, NULL);
3145 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3146 break;
3148 case DW_CFA_offset_extended:
3149 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3150 dw2_asm_output_data_uleb128 (r, NULL);
3151 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3152 dw2_asm_output_data_uleb128 (off, NULL);
3153 break;
3155 case DW_CFA_def_cfa:
3156 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3157 dw2_asm_output_data_uleb128 (r, NULL);
3158 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3159 break;
3161 case DW_CFA_offset_extended_sf:
3162 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3163 dw2_asm_output_data_uleb128 (r, NULL);
3164 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3165 dw2_asm_output_data_sleb128 (off, NULL);
3166 break;
3168 case DW_CFA_def_cfa_sf:
3169 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3170 dw2_asm_output_data_uleb128 (r, NULL);
3171 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3172 dw2_asm_output_data_sleb128 (off, NULL);
3173 break;
3175 case DW_CFA_restore_extended:
3176 case DW_CFA_undefined:
3177 case DW_CFA_same_value:
3178 case DW_CFA_def_cfa_register:
3179 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3180 dw2_asm_output_data_uleb128 (r, NULL);
3181 break;
3183 case DW_CFA_register:
3184 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3185 dw2_asm_output_data_uleb128 (r, NULL);
3186 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3187 dw2_asm_output_data_uleb128 (r, NULL);
3188 break;
3190 case DW_CFA_def_cfa_offset:
3191 case DW_CFA_GNU_args_size:
3192 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3193 break;
3195 case DW_CFA_def_cfa_offset_sf:
3196 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3197 dw2_asm_output_data_sleb128 (off, NULL);
3198 break;
3200 case DW_CFA_GNU_window_save:
3201 break;
3203 case DW_CFA_def_cfa_expression:
3204 case DW_CFA_expression:
3205 output_cfa_loc (cfi, for_eh);
3206 break;
3208 case DW_CFA_GNU_negative_offset_extended:
3209 /* Obsoleted by DW_CFA_offset_extended_sf. */
3210 gcc_unreachable ();
3212 default:
3213 break;
3218 /* Similar, but do it via assembler directives instead. */
3220 void
3221 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3223 unsigned long r, r2;
3225 switch (cfi->dw_cfi_opc)
3227 case DW_CFA_advance_loc:
3228 case DW_CFA_advance_loc1:
3229 case DW_CFA_advance_loc2:
3230 case DW_CFA_advance_loc4:
3231 case DW_CFA_MIPS_advance_loc8:
3232 case DW_CFA_set_loc:
3233 /* Should only be created in a code path not followed when emitting
3234 via directives. The assembler is going to take care of this for
3235 us. But this routines is also used for debugging dumps, so
3236 print something. */
3237 gcc_assert (f != asm_out_file);
3238 fprintf (f, "\t.cfi_advance_loc\n");
3239 break;
3241 case DW_CFA_offset:
3242 case DW_CFA_offset_extended:
3243 case DW_CFA_offset_extended_sf:
3244 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3245 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3246 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3247 break;
3249 case DW_CFA_restore:
3250 case DW_CFA_restore_extended:
3251 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3252 fprintf (f, "\t.cfi_restore %lu\n", r);
3253 break;
3255 case DW_CFA_undefined:
3256 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3257 fprintf (f, "\t.cfi_undefined %lu\n", r);
3258 break;
3260 case DW_CFA_same_value:
3261 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3262 fprintf (f, "\t.cfi_same_value %lu\n", r);
3263 break;
3265 case DW_CFA_def_cfa:
3266 case DW_CFA_def_cfa_sf:
3267 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3268 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3269 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3270 break;
3272 case DW_CFA_def_cfa_register:
3273 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3274 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3275 break;
3277 case DW_CFA_register:
3278 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3279 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3280 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3281 break;
3283 case DW_CFA_def_cfa_offset:
3284 case DW_CFA_def_cfa_offset_sf:
3285 fprintf (f, "\t.cfi_def_cfa_offset "
3286 HOST_WIDE_INT_PRINT_DEC"\n",
3287 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3288 break;
3290 case DW_CFA_remember_state:
3291 fprintf (f, "\t.cfi_remember_state\n");
3292 break;
3293 case DW_CFA_restore_state:
3294 fprintf (f, "\t.cfi_restore_state\n");
3295 break;
3297 case DW_CFA_GNU_args_size:
3298 if (f == asm_out_file)
3300 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3301 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3302 if (flag_debug_asm)
3303 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3304 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3305 fputc ('\n', f);
3307 else
3309 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3310 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3312 break;
3314 case DW_CFA_GNU_window_save:
3315 fprintf (f, "\t.cfi_window_save\n");
3316 break;
3318 case DW_CFA_def_cfa_expression:
3319 if (f != asm_out_file)
3321 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3322 break;
3324 /* FALLTHRU */
3325 case DW_CFA_expression:
3326 if (f != asm_out_file)
3328 fprintf (f, "\t.cfi_cfa_expression ...\n");
3329 break;
3331 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3332 output_cfa_loc_raw (cfi);
3333 fputc ('\n', f);
3334 break;
3336 default:
3337 gcc_unreachable ();
3341 void
3342 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3344 if (dwarf2out_do_cfi_asm ())
3345 output_cfi_directive (asm_out_file, cfi);
3348 static void
3349 dump_cfi_row (FILE *f, dw_cfi_row *row)
3351 dw_cfi_ref cfi;
3352 unsigned i;
3354 cfi = row->cfa_cfi;
3355 if (!cfi)
3357 dw_cfa_location dummy;
3358 memset (&dummy, 0, sizeof (dummy));
3359 dummy.reg = INVALID_REGNUM;
3360 cfi = def_cfa_0 (&dummy, &row->cfa);
3362 output_cfi_directive (f, cfi);
3364 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3365 if (cfi)
3366 output_cfi_directive (f, cfi);
3369 void debug_cfi_row (dw_cfi_row *row);
3371 void
3372 debug_cfi_row (dw_cfi_row *row)
3374 dump_cfi_row (stderr, row);
3378 /* Save the result of dwarf2out_do_frame across PCH.
3379 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3380 static GTY(()) signed char saved_do_cfi_asm = 0;
3382 /* Decide whether we want to emit frame unwind information for the current
3383 translation unit. */
3385 bool
3386 dwarf2out_do_frame (void)
3388 /* We want to emit correct CFA location expressions or lists, so we
3389 have to return true if we're going to output debug info, even if
3390 we're not going to output frame or unwind info. */
3391 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3392 return true;
3394 if (saved_do_cfi_asm > 0)
3395 return true;
3397 if (targetm.debug_unwind_info () == UI_DWARF2)
3398 return true;
3400 if ((flag_unwind_tables || flag_exceptions)
3401 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3402 return true;
3404 return false;
3407 /* Decide whether to emit frame unwind via assembler directives. */
3409 bool
3410 dwarf2out_do_cfi_asm (void)
3412 int enc;
3414 if (saved_do_cfi_asm != 0)
3415 return saved_do_cfi_asm > 0;
3417 /* Assume failure for a moment. */
3418 saved_do_cfi_asm = -1;
3420 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3421 return false;
3422 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3423 return false;
3425 /* Make sure the personality encoding is one the assembler can support.
3426 In particular, aligned addresses can't be handled. */
3427 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3428 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3429 return false;
3430 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3431 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3432 return false;
3434 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3435 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3436 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3437 && !flag_unwind_tables && !flag_exceptions
3438 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3439 return false;
3441 /* Success! */
3442 saved_do_cfi_asm = 1;
3443 return true;
3446 namespace {
3448 const pass_data pass_data_dwarf2_frame =
3450 RTL_PASS, /* type */
3451 "dwarf2", /* name */
3452 OPTGROUP_NONE, /* optinfo_flags */
3453 TV_FINAL, /* tv_id */
3454 0, /* properties_required */
3455 0, /* properties_provided */
3456 0, /* properties_destroyed */
3457 0, /* todo_flags_start */
3458 0, /* todo_flags_finish */
3461 class pass_dwarf2_frame : public rtl_opt_pass
3463 public:
3464 pass_dwarf2_frame (gcc::context *ctxt)
3465 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3468 /* opt_pass methods: */
3469 virtual bool gate (function *);
3470 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3472 }; // class pass_dwarf2_frame
3474 bool
3475 pass_dwarf2_frame::gate (function *)
3477 #ifndef HAVE_prologue
3478 /* Targets which still implement the prologue in assembler text
3479 cannot use the generic dwarf2 unwinding. */
3480 return false;
3481 #endif
3483 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3484 from the optimized shrink-wrapping annotations that we will compute.
3485 For now, only produce the CFI notes for dwarf2. */
3486 return dwarf2out_do_frame ();
3489 } // anon namespace
3491 rtl_opt_pass *
3492 make_pass_dwarf2_frame (gcc::context *ctxt)
3494 return new pass_dwarf2_frame (ctxt);
3497 #include "gt-dwarf2cfi.h"